Ngigx+Tomcat is configured with static and static separation and load balancing
- 2020-05-12 06:48:06
- OfStack
Since the company has used Ngnix before, I feel a little curious about it when I just came into contact with Nginx, so I studied it.
The version I use under windows is nginx-1.8.1:
1. Start the Ngnix
Double-click nginx. exe in the nginx-1.8.1 folder. When there are two nginx processes in the task manager, it means that the startup is successful!
2. Ngnix common commands
Mandatory shutdown of nginx-s stop nginx-s quit shut down safely When nginx-s reload changes the configuration file, restart the nginx worker process and the configuration file takes effect nginx-s reopen open the log file3. Nginx configuration
The following configuration integrated online information, note down, in case you forget.
#Nginx The users and groups used
#user nobody;
# The number of working child processes (usually equal to CPU The number or 2 Times the CPU )
worker_processes 1;
# The error log resides in the path
#error_log logs/error.log;
#error_log logs/error.log notice;
#error_log logs/error.log info;
# The specified pid Storage file
#pid logs/nginx.pid;
events {
# Using the network IO model linux advice epoll . FreeBSD The proposal USES kqueue
#use epoll;
# use epoll Model improves performance win Don't need
#use epoll;
# Maximum number of connections allowed
worker_connections 1024;
}
http {
# Extension and file type mapping table
include mime.types;
# The default type
default_type application/octet-stream;
# Define the log format
#log_format main '$remote_addr - $remote_user [$time_local] "$request" '
# '$status $body_bytes_sent "$http_referer" '
# '"$http_user_agent" "$http_x_forwarded_for"';
#access_log logs/access.log main;
# Enable kernel replication mode and keep it on as fast as possible IO The efficiency of
sendfile on;
#tcp_nopush on;
#keepalive_timeout 0;
# HTTP1.1 Support for persistent connections alive
# Lower the value of each connection alive Time can be in 1 Increases the number of responsive connections to a certain extent, so 1 This value can be reduced as appropriate
keepalive_timeout 65;
# Start the gzip Compression function Settings, effectively reduce network traffic
gzip on;
gzip_min_length 1k; # The minimum 1K
gzip_buffers 4 16k;
gzip_http_version 1.0;
gzip_comp_level 2;
gzip_types text/plain application/x-javascripttext/css application/xml;
gzip_vary on;
# Static file cache
# Maximum number of caches, files not in use
open_file_cache max=655350 inactive=20s;
# Verify the cache expiration interval
open_file_cache_valid 30s;
# The minimum number of times a document is used during the validity period
open_file_cache_min_uses 2;
#xbq add
#upstream Load balancing, here configure the server address and port number to be polled, max_fails For the number of times the request was allowed to fail, the default is 1.
#weight For polling weights, different weight assignments can be used to balance the hit rate of the server.
upstream hostname {
server 127.0.0.1:9000 max_fails=0 weight=2;
server 127.0.0.1:9001 max_fails=0 weight=2;
}
server {
listen 8181;
server_name localhost;
#charset koi8-r;
#access_log logs/host.access.log main;
root /img; # in nginx-1.8.1 New in the folder img Folder for static resources
location / {
#root html;
#index index.html index.htm;
#xbq add
proxy_pass http://hostname;
# The following 3 Bar directive allows redefinition and addition 1 The request header information that will be transferred to the proxy server
# The request header Host information
proxy_set_header Host $host;
# Real client IP
proxy_set_header X-Real-IP $remote_addr;
# Proxy routing information, fetch here IP There are safety risks
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# Real user access protocol
proxy_set_header X-Forwarded-Proto $scheme;
# The default value default .
# The back-end response 302 when tomcat header In the location the host is http://192.168.1.62:8080
# because tomcat The request received is nginx Send the past , nginx Initiated request url host is http://192.168.1.62:8080
# Set to default Later, nginx Automatically put the response in the header location host The section is replaced with the one requested by the current user host Part of the
# Many online tutorials set this value to off With substitution disabled,
# So the user's browser gets it 302 After the jump http://192.168.1.62:8080 , exposing the back-end server directly to the browser
# So don't set this configuration to gild the lily unless you need to
proxy_redirect default;
client_max_body_size 10m; # Maximum number of bytes per file allowed for client request
client_body_buffer_size 128k; # The buffer agent buffers the maximum number of bytes requested by the client
proxy_connect_timeout 90; #nginx The connection timeout with the back-end server
proxy_read_timeout 90; # Back end server response time after successful connection
proxy_buffer_size 4k; # Set up the proxy server ( nginx ) the buffer size to hold the user header information
proxy_buffers 6 32k; #proxy_buffers Buffer the average page in 32k Set this as follows
proxy_busy_buffers_size 64k;# Buffer size under high load ( proxy_buffers*2 )
proxy_temp_file_write_size 64k; # Set the size of the cache folder to be greater than this value and will start from upstream The server transfer
}
#xbq add
# configuration Nginx Static separation, defined directly from the static page /usr/nginxStaticFile ( Nginx Publish directory) read.
location ~\.(gif|jpg|jpeg|png|css|js|php)$ {
#expires The time to define the user's browser cache is 7 Day, if the static page is not often updated, can be set longer, which can save bandwidth and relieve the pressure on the server E:/staticResource;
expires 7d;
}
#xbq add
# To enable the nginx status Listening to the page
location /nginxstatus {
stub_status on;
access_log on;
}
#error_page 404 /404.html;
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
# proxy the PHP scripts to Apache listening on 127.0.0.1:80
#
#location ~ \.php$ {
# proxy_pass http://127.0.0.1;
#}
# pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000
#
#location ~ \.php$ {
# root html;
# fastcgi_pass 127.0.0.1:9000;
# fastcgi_index index.php;
# fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name;
# include fastcgi_params;
#}
# deny access to .htaccess files, if Apache's document root
# concurs with nginx's one
#
#location ~ /\.ht {
# deny all;
#}
}
# another virtual host using mix of IP-, name-, and port-based configuration
#
#server {
# listen 8000;
# listen somename:8080;
# server_name somename alias another.alias;
# location / {
# root html;
# index index.html index.htm;
# }
#}
# HTTPS server
#
#server {
# listen 443 ssl;
# server_name localhost;
# ssl_certificate cert.pem;
# ssl_certificate_key cert.key;
# ssl_session_cache shared:SSL:1m;
# ssl_session_timeout 5m;
# ssl_ciphers HIGH:!aNULL:!MD5;
# ssl_prefer_server_ciphers on;
# location / {
# root html;
# index index.html index.htm;
# }
#}
}