概述
socket属性so_linger
struct
linger {
int
l_onoff;
/* Linger active */
int
l_linger;
/* How long to linger for */
};
一:close()
1:l_onoff
= 0;
这是close() SO_LINGER属性的默认值
发送缓冲中的数据以及fin会继续发送到客户端
接收缓冲区中如果还有数据,则会给客户端发送rst数据包。客户端收到后,会放弃自己接收缓冲区中所有数据。
2:l_onoff
=1;
l_linger
=0;
发送缓冲区和接收缓冲区中的所有数据均抛弃,并给客户端发送rst数据包
3:l_onoff
=1;
l_linger
=!0;
当close采用默认选项的时候,即情况1下,会出现一些
问题,尤其是在pipelining情况下更容易发生
客户端发送了10个请求
但服务器只能处理8个请求
服务器返回8个response
服务器调用close
此时服务器的发送缓冲区如还存在response数据,继续发送给客户端,不受影响
客户端的接收缓冲区还有response数据,但因为服务器close时发送了rst数据包,若客户端先收到了rst数据包,则会抛弃自己的接收缓冲区内容
客户端丢掉了若干response
二:shutdown()
shutdown(sockfd, SHUT_RD)
关闭sockfd的读端,抛弃接收缓冲区的数据,并且不能进行读操作
shutdown(sockfd, SHUT_WR)
关闭sockfd的写端,不能进行写操作
shutdown(sockfd, SHUT_RDWR)
相当于调用shutdown(sockfd, SHUT_RD) shutdown(sockfd, SHUT_WR) 各一次。
close和shutdown的区别在于,close减少fd的引用计数,直到引用计数为0的时候,套接字被释放
但是shutdown会影响所有进程的此套接字通信
三:完美的方案:
在服务器给客户端提供服务的时候,有以下情况需要考虑:
1.服务器不要过早的发送rst ,避免客户端丢失数据
2.怎样保证数据正确到达客户端,并被正确接收。需要服务器做些验证
选项SO_LINGER、close()、shutdown()、read()做配合设计,可得出完美方案:
1) 设置SO_LINGER选项参数l_onof非0而l_linger为0;(这样调用第5步的时候,直接给客户端发送rst数据包了)
2) 调用函数shutdown(sock_fd, SHUT_WR);(这样发送缓冲区不能再写数据,会逐步的发给客户端)
3) 设置超时定时器,假定为t秒;(等待客户端接受完数据返回给fin)
4) 调用函数read(sock_fd)阻塞等待,直到读到EOF或被定时器超时中断。(如果客户端没有返回fin,直到定时器超时,则默认客户端已经接受完数据,可以发送rst了)
5) 执行函数close(sock_fd)或者调用exit(0)进程退出。(直接发送rst给客户端,通过发送RST而取代正常的TCP四次挥手)
nginx和客户端采用的类似于上面的策略,但在1)的时候,并没有设置SO_LINGER参数,只有判断出请求已经超时,即第4步发生超时的时候,才会如此设置。可以将第1步骤放在4后,当超时时,直接发送rst,不然正常close
由3个指令控制:
lingering_close
lingering_time
lingering_timeout
1)lingering_close 可以配置 on,off,always。
lingering_close off 上面策略不作用,直接调用close()
lingering_close always 表示一直采取这个策略
lingering_close on 分情况采取这种策略,比如如果客户端发送的请求没有请求体的时候
2)lingering_time
lingering_close启用后,这个配置项对于上传大文件很有用。上文讲过,当用户请求的Content-Length大于max_client_body_size配置时,Nginx服务会立刻向用户发送413(Request entity too large)响应。但是,很多客户端可能不管413返回值,仍然持续不断地上传HTTP body,这时,经过了lingering_time设置的时间后,Nginx将不管用户是否仍在上传,都会把连接关闭掉。
3)lingering_timeout
lingering_close生效后,在关闭连接前,会检测是否有用户发送的数据到达服务器,如果超过lingering_timeout时间后还没有数据可读,就直接关闭连接;否则,必须在读取完连接缓冲区上的数据并丢弃掉后才会关闭连接。
详细代码分析:
static void
ngx_http_finalize_connection(ngx_http_request_t *r)
{
ngx_http_core_loc_conf_t *clcf;
clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module);
if (r->main->count != 1) {
if (r->discard_body) {
r->read_event_handler = ngx_http_discarded_request_body_handler;
ngx_add_timer(r->connection->read, clcf->lingering_timeout);
if (r->lingering_time == 0) {
r->lingering_time = ngx_time()
+ (time_t) (clcf->lingering_time / 1000);
}
}
ngx_http_close_request(r, 0);
return;
}
//首先判断是否为keepalive情况,那么去处理keepalive
if (!ngx_terminate
&& !ngx_exiting
&& r->keepalive
&& clcf->keepalive_timeout > 0)
{
ngx_http_set_keepalive(r);
return;
}
//如果配置了lingering_close always ;或者 配置了 lingering_close on并且满足某些条件,则进行先shutdown,再wait,再close操作。
if (clcf->lingering_close == NGX_HTTP_LINGERING_ALWAYS
|| (clcf->lingering_close == NGX_HTTP_LINGERING_ON
&& (r->lingering_close
|| r->header_in->pos < r->header_in->last
|| r->connection->read->ready)))
{
ngx_http_set_lingering_close(r);
return;
}
//如果不满足上面情况,则直接调用close操作,SO_LINGER参数采用默认配置
ngx_http_close_request(r, 0);
}
lingering_close默认设置是on,在普遍的短连接的http请求,会走到ngx_http_set_lingering_close(r)。
static void
ngx_http_set_lingering_close(ngx_http_request_t *r)
{
ngx_event_t *rev, *wev;
ngx_connection_t *c;
ngx_http_core_loc_conf_t *clcf;
c = r->connection;
clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module);
rev = c->read;
rev->handler = ngx_http_lingering_close_handler;
r->lingering_time = ngx_time() + (time_t) (clcf->lingering_time / 1000);
ngx_add_timer(rev, clcf->lingering_timeout);
if (ngx_handle_read_event(rev, 0) != NGX_OK) {
ngx_http_close_request(r, 0);
return;
}
wev = c->write;
wev->handler = ngx_http_empty_handler;
if (wev->active && (ngx_event_flags & NGX_USE_LEVEL_EVENT)) {
if (ngx_del_event(wev, NGX_WRITE_EVENT, 0) != NGX_OK) {
ngx_http_close_request(r, 0);
return;
}
}
if (ngx_shutdown_socket(c->fd, NGX_WRITE_SHUTDOWN) == -1) {
ngx_connection_error(c, ngx_socket_errno,
ngx_shutdown_socket_n " failed");
ngx_http_close_request(r, 0);
return;
}
if (rev->ready) {
ngx_http_lingering_close_handler(rev);
}
}
static void
ngx_http_lingering_close_handler(ngx_event_t *rev)
{
ssize_t n;
ngx_msec_t timer;
ngx_connection_t *c;
ngx_http_request_t *r;
ngx_http_core_loc_conf_t *clcf;
u_char buffer[NGX_HTTP_LINGERING_BUFFER_SIZE];
c = rev->data;
r = c->data;
ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0,
"http lingering close handler");
if (rev->timedout) {
ngx_http_close_request(r, 0);
return;
}
timer = (ngx_msec_t) (r->lingering_time - ngx_time());
if (timer <= 0) {
ngx_http_close_request(r, 0);
return;
}
do {
n = c->recv(c, buffer, NGX_HTTP_LINGERING_BUFFER_SIZE);
ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0, "lingering read: %d", n);
if (n == NGX_ERROR || n == 0) {
ngx_http_close_request(r, 0);
return;
}
} while (rev->ready);
if (ngx_handle_read_event(rev, 0) != NGX_OK) {
ngx_http_close_request(r, 0);
return;
}
clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module);
timer *= 1000;
if (timer > clcf->lingering_timeout) {
timer = clcf->lingering_timeout;
}
ngx_add_timer(rev, timer);
}
static void
ngx_http_close_request(ngx_http_request_t *r, ngx_int_t rc)
{
ngx_connection_t *c;
r = r->main;
c = r->connection;
ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0,
"http request count:%d blk:%d", r->count, r->blocked);
if (r->count == 0) {
ngx_log_error(NGX_LOG_ALERT, c->log, 0, "http request count is zero");
}
r->count--;
if (r->count || r->blocked) {
return;
}
ngx_http_free_request(r, rc);
ngx_http_close_connection(c);
}
static void
ngx_http_free_request(ngx_http_request_t *r, ngx_int_t rc)
{
ngx_log_t *log;
struct linger linger;
ngx_http_cleanup_t *cln;
ngx_http_log_ctx_t *ctx;
ngx_http_core_loc_conf_t *clcf;
log = r->connection->log;
ngx_log_debug0(NGX_LOG_DEBUG_HTTP, log, 0, "http close request");
if (r->pool == NULL) {
ngx_log_error(NGX_LOG_ALERT, log, 0, "http request already closed");
return;
}
for (cln = r->cleanup; cln; cln = cln->next) {
if (cln->handler) {
cln->handler(cln->data);
}
}
#if (NGX_STAT_STUB)
if (r->stat_reading) {
(void) ngx_atomic_fetch_add(ngx_stat_reading, -1);
}
if (r->stat_writing) {
(void) ngx_atomic_fetch_add(ngx_stat_writing, -1);
}
#endif
if (rc > 0 && (r->headers_out.status == 0 || r->connection->sent == 0)) {
r->headers_out.status = rc;
}
log->action = "logging request";
ngx_http_log_request(r);
log->action = "closing request";
if (r->connection->timedout) {
clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module);
if (clcf->reset_timedout_connection) {
linger.l_onoff = 1;
linger.l_linger = 0;
if (setsockopt(r->connection->fd, SOL_SOCKET, SO_LINGER,
(const void *) &linger, sizeof(struct linger)) == -1)
{
ngx_log_error(NGX_LOG_ALERT, log, ngx_socket_errno,
"setsockopt(SO_LINGER) failed");
}
}
}
/* the various request strings were allocated from r->pool */
ctx = log->data;
ctx->request = NULL;
r->request_line.len = 0;
r->connection->destroyed = 1;
ngx_destroy_pool(r->pool);
}
static void
ngx_http_close_connection(ngx_connection_t *c)
{
ngx_pool_t *pool;
ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0,
"close http connection: %d", c->fd);
#if (NGX_HTTP_SSL)
if (c->ssl) {
if (ngx_ssl_shutdown(c) == NGX_AGAIN) {
c->ssl->handler = ngx_http_close_connection;
return;
}
}
#endif
#if (NGX_STAT_STUB)
(void) ngx_atomic_fetch_add(ngx_stat_active, -1);
#endif
c->destroyed = 1;
pool = c->pool;
ngx_close_connection(c);
ngx_destroy_pool(pool);
}
void
ngx_close_connection(ngx_connection_t *c)
{
ngx_err_t err;
ngx_uint_t log_error, level;
ngx_socket_t fd;
if (c->fd == -1) {
ngx_log_error(NGX_LOG_ALERT, c->log, 0, "connection already closed");
return;
}
if (c->read->timer_set) {
ngx_del_timer(c->read);
}
if (c->write->timer_set) {
ngx_del_timer(c->write);
}
if (ngx_del_conn) {
ngx_del_conn(c, NGX_CLOSE_EVENT);
} else {
if (c->read->active || c->read->disabled) {
ngx_del_event(c->read, NGX_READ_EVENT, NGX_CLOSE_EVENT);
}
if (c->write->active || c->write->disabled) {
ngx_del_event(c->write, NGX_WRITE_EVENT, NGX_CLOSE_EVENT);
}
}
#if (NGX_THREADS)
/*
* we have to clean the connection information before the closing
* because another thread may reopen the same file descriptor
* before we clean the connection
*/
ngx_mutex_lock(ngx_posted_events_mutex);
if (c->read->prev) {
ngx_delete_posted_event(c->read);
}
if (c->write->prev) {
ngx_delete_posted_event(c->write);
}
c->read->closed = 1;
c->write->closed = 1;
if (c->single_connection) {
ngx_unlock(&c->lock);
c->read->locked = 0;
c->write->locked = 0;
}
ngx_mutex_unlock(ngx_posted_events_mutex);
#else
if (c->read->prev) {
ngx_delete_posted_event(c->read);
}
if (c->write->prev) {
ngx_delete_posted_event(c->write);
}
c->read->closed = 1;
c->write->closed = 1;
#endif
ngx_reusable_connection(c, 0);
log_error = c->log_error;
ngx_free_connection(c);
fd = c->fd;
c->fd = (ngx_socket_t) -1;
if (ngx_close_socket(fd) == -1) {
err = ngx_socket_errno;
if (err == NGX_ECONNRESET || err == NGX_ENOTCONN) {
switch (log_error) {
case NGX_ERROR_INFO:
level = NGX_LOG_INFO;
break;
case NGX_ERROR_ERR:
level = NGX_LOG_ERR;
break;
default:
level = NGX_LOG_CRIT;
}
} else {
level = NGX_LOG_CRIT;
}
/* we use ngx_cycle->log because c->log was in c->pool */
ngx_log_error(level, ngx_cycle->log, err,
ngx_close_socket_n " %d failed", fd);
}
}
最后
以上就是含蓄帅哥为你收集整理的nginx lingering_close的全部内容,希望文章能够帮你解决nginx lingering_close所遇到的程序开发问题。
如果觉得靠谱客网站的内容还不错,欢迎将靠谱客网站推荐给程序员好友。
发表评论 取消回复