nginx-cp: update to 1.26.0

This commit is contained in:
Raven 2024-04-25 17:09:07 +06:00
parent c772974094
commit 035bc54cd2
6 changed files with 276 additions and 1579 deletions

View File

@ -1,54 +0,0 @@
From 284a0c73771e3a2c57af6e74d96d9a6878b2e7b4 Mon Sep 17 00:00:00 2001
From: Maxim Dounin <mdounin@mdounin.ru>
Date: Tue, 17 Oct 2023 02:39:38 +0300
Subject: [PATCH] Core: fixed memory leak on configuration reload with PCRE2.
In ngx_regex_cleanup() allocator wasn't configured when calling
pcre2_compile_context_free() and pcre2_match_data_free(), resulting
in no ngx_free() call and leaked memory. Fix is ensure that allocator
is configured for global allocations, so that ngx_free() is actually
called to free memory.
Additionally, ngx_regex_compile_context was cleared in
ngx_regex_module_init(). It should be either not cleared, so it will
be freed by ngx_regex_cleanup(), or properly freed. Fix is to
not clear it, so ngx_regex_cleanup() will be able to free it.
Reported by ZhenZhong Wu,
https://mailman.nginx.org/pipermail/nginx-devel/2023-September/3Z5FIKUDRN2WBSL3JWTZJ7SXDA6YIWPB.html
---
src/core/ngx_regex.c | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/src/core/ngx_regex.c b/src/core/ngx_regex.c
index 91381f4994..5b13c5db38 100644
--- a/src/core/ngx_regex.c
+++ b/src/core/ngx_regex.c
@@ -600,6 +600,8 @@ ngx_regex_cleanup(void *data)
* the new cycle, these will be re-allocated.
*/
+ ngx_regex_malloc_init(NULL);
+
if (ngx_regex_compile_context) {
pcre2_compile_context_free(ngx_regex_compile_context);
ngx_regex_compile_context = NULL;
@@ -611,6 +613,8 @@ ngx_regex_cleanup(void *data)
ngx_regex_match_data_size = 0;
}
+ ngx_regex_malloc_done();
+
#endif
}
@@ -706,9 +710,6 @@ ngx_regex_module_init(ngx_cycle_t *cycle)
ngx_regex_malloc_done();
ngx_regex_studies = NULL;
-#if (NGX_PCRE2)
- ngx_regex_compile_context = NULL;
-#endif
return NGX_OK;
}

View File

@ -1,70 +0,0 @@
# HG changeset patch
# User Maxim Dounin <mdounin@mdounin.ru>
# Date 1696940019 -10800
# Node ID cdda286c0f1b4b10f30d4eb6a63fefb9b8708ecc
# Parent 3db945fda515014d220151046d02f3960bcfca0a
HTTP/2: per-iteration stream handling limit.
To ensure that attempts to flood servers with many streams are detected
early, a limit of no more than 2 * max_concurrent_streams new streams per one
event loop iteration was introduced. This limit is applied even if
max_concurrent_streams is not yet reached - for example, if corresponding
streams are handled synchronously or reset.
Further, refused streams are now limited to maximum of max_concurrent_streams
and 100, similarly to priority_limit initial value, providing some tolerance
to clients trying to open several streams at the connection start, yet
low tolerance to flooding attempts.
diff -r 3db945fda515 -r cdda286c0f1b src/http/v2/ngx_http_v2.c
--- a/src/http/v2/ngx_http_v2.c Fri Sep 22 19:23:57 2023 +0400
+++ b/src/http/v2/ngx_http_v2.c Tue Oct 10 15:13:39 2023 +0300
@@ -347,6 +347,7 @@
ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "http2 read handler");
h2c->blocked = 1;
+ h2c->new_streams = 0;
if (c->close) {
c->close = 0;
@@ -1284,6 +1285,14 @@
goto rst_stream;
}
+ if (h2c->new_streams++ >= 2 * h2scf->concurrent_streams) {
+ ngx_log_error(NGX_LOG_INFO, h2c->connection->log, 0,
+ "client sent too many streams at once");
+
+ status = NGX_HTTP_V2_REFUSED_STREAM;
+ goto rst_stream;
+ }
+
if (!h2c->settings_ack
&& !(h2c->state.flags & NGX_HTTP_V2_END_STREAM_FLAG)
&& h2scf->preread_size < NGX_HTTP_V2_DEFAULT_WINDOW)
@@ -1349,6 +1358,12 @@
rst_stream:
+ if (h2c->refused_streams++ > ngx_max(h2scf->concurrent_streams, 100)) {
+ ngx_log_error(NGX_LOG_INFO, h2c->connection->log, 0,
+ "client sent too many refused streams");
+ return ngx_http_v2_connection_error(h2c, NGX_HTTP_V2_NO_ERROR);
+ }
+
if (ngx_http_v2_send_rst_stream(h2c, h2c->state.sid, status) != NGX_OK) {
return ngx_http_v2_connection_error(h2c, NGX_HTTP_V2_INTERNAL_ERROR);
}
diff -r 3db945fda515 -r cdda286c0f1b src/http/v2/ngx_http_v2.h
--- a/src/http/v2/ngx_http_v2.h Fri Sep 22 19:23:57 2023 +0400
+++ b/src/http/v2/ngx_http_v2.h Tue Oct 10 15:13:39 2023 +0300
@@ -172,6 +172,8 @@
ngx_uint_t processing;
ngx_uint_t frames;
ngx_uint_t idle;
+ ngx_uint_t new_streams;
+ ngx_uint_t refused_streams;
ngx_uint_t priority_limit;
ngx_uint_t pushing;

View File

@ -1,119 +0,0 @@
From c93cb45ae30760b7cd4ce2d9e053a36449d4e233 Mon Sep 17 00:00:00 2001
From: Maxim Dounin <mdounin@mdounin.ru>
Date: Wed, 18 Oct 2023 04:30:11 +0300
Subject: [PATCH] Core: changed ngx_queue_sort() to use merge sort.
This improves nginx startup times significantly when using very large number
of locations due to computational complexity of the sorting algorithm being
used: insertion sort is O(n*n) on average, while merge sort is O(n*log(n)).
In particular, in a test configuration with 20k locations total startup
time is reduced from 8 seconds to 0.9 seconds.
Prodded by Yusuke Nojima,
https://mailman.nginx.org/pipermail/nginx-devel/2023-September/NUL3Y2FPPFSHMPTFTL65KXSXNTX3NQMK.html
---
src/core/ngx_queue.c | 52 +++++++++++++++++++++++++++++++++-----------
src/core/ngx_queue.h | 3 +++
2 files changed, 42 insertions(+), 13 deletions(-)
diff --git a/src/core/ngx_queue.c b/src/core/ngx_queue.c
index 3cacaf3a88..3d1d589884 100644
--- a/src/core/ngx_queue.c
+++ b/src/core/ngx_queue.c
@@ -9,6 +9,10 @@
#include <ngx_core.h>
+static void ngx_queue_merge(ngx_queue_t *queue, ngx_queue_t *tail,
+ ngx_int_t (*cmp)(const ngx_queue_t *, const ngx_queue_t *));
+
+
/*
* find the middle queue element if the queue has odd number of elements
* or the first element of the queue's second part otherwise
@@ -45,13 +49,13 @@ ngx_queue_middle(ngx_queue_t *queue)
}
-/* the stable insertion sort */
+/* the stable merge sort */
void
ngx_queue_sort(ngx_queue_t *queue,
ngx_int_t (*cmp)(const ngx_queue_t *, const ngx_queue_t *))
{
- ngx_queue_t *q, *prev, *next;
+ ngx_queue_t *q, tail;
q = ngx_queue_head(queue);
@@ -59,22 +63,44 @@ ngx_queue_sort(ngx_queue_t *queue,
return;
}
- for (q = ngx_queue_next(q); q != ngx_queue_sentinel(queue); q = next) {
+ q = ngx_queue_middle(queue);
+
+ ngx_queue_split(queue, q, &tail);
+
+ ngx_queue_sort(queue, cmp);
+ ngx_queue_sort(&tail, cmp);
+
+ ngx_queue_merge(queue, &tail, cmp);
+}
+
- prev = ngx_queue_prev(q);
- next = ngx_queue_next(q);
+static void
+ngx_queue_merge(ngx_queue_t *queue, ngx_queue_t *tail,
+ ngx_int_t (*cmp)(const ngx_queue_t *, const ngx_queue_t *))
+{
+ ngx_queue_t *q1, *q2;
- ngx_queue_remove(q);
+ q1 = ngx_queue_head(queue);
+ q2 = ngx_queue_head(tail);
- do {
- if (cmp(prev, q) <= 0) {
- break;
- }
+ for ( ;; ) {
+ if (q1 == ngx_queue_sentinel(queue)) {
+ ngx_queue_add(queue, tail);
+ break;
+ }
- prev = ngx_queue_prev(prev);
+ if (q2 == ngx_queue_sentinel(tail)) {
+ break;
+ }
+
+ if (cmp(q1, q2) <= 0) {
+ q1 = ngx_queue_next(q1);
+ continue;
+ }
- } while (prev != ngx_queue_sentinel(queue));
+ ngx_queue_remove(q2);
+ ngx_queue_insert_before(q1, q2);
- ngx_queue_insert_after(prev, q);
+ q2 = ngx_queue_head(tail);
}
}
diff --git a/src/core/ngx_queue.h b/src/core/ngx_queue.h
index 038bf12113..0f82f173e4 100644
--- a/src/core/ngx_queue.h
+++ b/src/core/ngx_queue.h
@@ -47,6 +47,9 @@ struct ngx_queue_s {
(h)->prev = x
+#define ngx_queue_insert_before ngx_queue_insert_tail
+
+
#define ngx_queue_head(h) \
(h)->next

View File

@ -18,6 +18,7 @@
%bcond_without brotli
%bcond_without fancyindex
%bcond_without vts
%bcond_with http3
%if 0%{?rhel} >= 8
%bcond_with gperftools
@ -42,8 +43,8 @@
Name: nginx-cp
Epoch: 1
Version: 1.24.0
Release: 5%{?dist}
Version: 1.26.0
Release: 1%{?dist}
Summary: A high performance web server and reverse proxy server
Group: System Environment/Daemons
@ -97,8 +98,8 @@ Patch0: nginx-auto-cc-gcc.patch
Patch1: 0002-fix-PIDFile-handling.patch
%if %{with certmin_patches}
# 2 in 1 patch
Patch3: nginx__cloudflare_1.19+.patch
# Dynamic TLS records patch
Patch3: nginx_dynamic_tls_records.patch
%endif
%if %{with brotli}
@ -109,11 +110,6 @@ Patch4: ngx_brotli-config.patch
Patch5: nginx-1.24-lua-mod-lowering-luajit-alert-severity.patch
%endif
Patch6: CVE-2023-44487.patch
Patch7: c93cb45ae30760b7cd4ce2d9e053a36449d4e233.patch
Patch8: 284a0c73771e3a2c57af6e74d96d9a6878b2e7b4.patch
BuildRequires: make binutils
BuildRequires: gcc >= 4.8
@ -121,11 +117,12 @@ BuildRequires: gcc >= 4.8
%if %{with gperftools}
BuildRequires: gperftools-devel
%endif
%if 0%{?rhel} <= 8
BuildRequires: openssl3-devel >= %{openssl_ver}
Requires: openssl3-libs >= %{openssl_ver}
%if %{with http3}
BuildRequires: pkgconfig(openssl) >= 3.8
%else
BuildRequires: openssl-devel
BuildRequires: pkgconfig(openssl) >= %{openssl_ver}
BuildRequires: pkgconfig(openssl) < 3.5
%endif
BuildRequires: pcre2-devel
@ -238,7 +235,7 @@ modules and features:
- ngx_http_geoip2_module
- ngx_http_fancyindex_module
- ngx_http_lua_module
includes dynamic-ssl-record-size, http2_hpack patches from certminmod
includes dynamic-ssl-record-size patch from certminmod
TFO support enabled
@ -288,12 +285,6 @@ tar -xf %{SOURCE304}
%patch5 -p1 -b .luajit
%endif
%patch6 -p1 -b .CVE-2023-44487
%patch7 -p1 -b .ngx_queue_sort
%if %{without lua}
%patch8 -p1 -b .pcre2-memleak
%endif
tar -xf %{SOURCE305}
%if %{with fancyindex}
@ -327,12 +318,6 @@ sed -i '1 i\LUAJIT_LIB=%{_libdir}' lua-nginx-module-%{luamod_ver}/config
%endif
%build
# %if 0%{?rhel} <= 7
# # Build with gcc8
# %enable_devtoolset8
# %endif
# nginx does not utilize a standard configure script. It has its own
# and the standard configure options cause the nginx configure script
# to error out. This is is also the reason for the DESTDIR environment
@ -341,9 +326,13 @@ sed -i '1 i\LUAJIT_LIB=%{_libdir}' lua-nginx-module-%{luamod_ver}/config
ngx_ldflags="$RPM_LD_FLAGS -Wl,-E"
ngx_cflags="%{optflags} $(pcre2-config --cflags) -DTCP_FASTOPEN=23 -fPIC"
%if %{with http3}
ngx_cflags="-I%{ssl_prefix}/include/libressl $ngx_cflags"
%else
%if 0%{?rhel} <= 8
ngx_cflags="-I%{ssl_prefix}/include/openssl3 $ngx_cflags"
%endif
%endif
%if %{with lua}
ngx_cflags="-DNGX_LUA_ABORT_AT_PANIC $ngx_cflags"
@ -377,8 +366,8 @@ export DESTDIR=%{buildroot}
--with-threads \
--with-http_ssl_module \
--with-http_v2_module \
%if %{with certmin_patches}
--with-http_v2_hpack_enc \
%if %{with http3}
--with-http_v3_module \
%endif
--with-http_realip_module \
--with-http_addition_module \
@ -681,6 +670,11 @@ fi
%endif
%changelog
* Wed Apr 24 2024 Raven <raven@sysadmins.ws> - 1.26.0-1
- new stable version
- get rid of http2 HPACK
- add http3 build ability
* Mon Apr 8 2024 Raven <raven@sysadmins.ws> - 1.24.0-5
- upgrade lua-nginx-module to 0.10.26

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,254 @@
What we do now:
We use a static record size of 4K. This gives a good balance of latency and
throughput.
Optimize latency:
By initialy sending small (1 TCP segment) sized records, we are able to avoid
HoL blocking of the first byte. This means TTFB is sometime lower by a whole
RTT.
Optimizing throughput:
By sending increasingly larger records later in the connection, when HoL is not
a problem, we reduce the overhead of TLS record (29 bytes per record with
GCM/CHACHA-POLY).
Logic:
Start each connection with small records (1369 byte default, change with
ssl_dyn_rec_size_lo). After a given number of records (40, change with
ssl_dyn_rec_threshold) start sending larger records (4229, ssl_dyn_rec_size_hi).
Eventually after the same number of records, start sending the largest records
(ssl_buffer_size).
In case the connection idles for a given amount of time (1s,
ssl_dyn_rec_timeout), the process repeats itself (i.e. begin sending small
records again).
diff --color -uNr a/src/event/ngx_event_openssl.c b/src/event/ngx_event_openssl.c
--- a/src/event/ngx_event_openssl.c 2023-06-13 23:08:10.000000000 +0800
+++ b/src/event/ngx_event_openssl.c 2023-06-14 15:43:05.834243714 +0800
@@ -1674,6 +1674,7 @@
sc->buffer = ((flags & NGX_SSL_BUFFER) != 0);
sc->buffer_size = ssl->buffer_size;
+ sc->dyn_rec = ssl->dyn_rec;
sc->session_ctx = ssl->ctx;
@@ -2645,6 +2646,41 @@
for ( ;; ) {
+ /* Dynamic record resizing:
+ We want the initial records to fit into one TCP segment
+ so we don't get TCP HoL blocking due to TCP Slow Start.
+ A connection always starts with small records, but after
+ a given amount of records sent, we make the records larger
+ to reduce header overhead.
+ After a connection has idled for a given timeout, begin
+ the process from the start. The actual parameters are
+ configurable. If dyn_rec_timeout is 0, we assume dyn_rec is off. */
+
+ if (c->ssl->dyn_rec.timeout > 0 ) {
+
+ if (ngx_current_msec - c->ssl->dyn_rec_last_write >
+ c->ssl->dyn_rec.timeout)
+ {
+ buf->end = buf->start + c->ssl->dyn_rec.size_lo;
+ c->ssl->dyn_rec_records_sent = 0;
+
+ } else {
+ if (c->ssl->dyn_rec_records_sent >
+ c->ssl->dyn_rec.threshold * 2)
+ {
+ buf->end = buf->start + c->ssl->buffer_size;
+
+ } else if (c->ssl->dyn_rec_records_sent >
+ c->ssl->dyn_rec.threshold)
+ {
+ buf->end = buf->start + c->ssl->dyn_rec.size_hi;
+
+ } else {
+ buf->end = buf->start + c->ssl->dyn_rec.size_lo;
+ }
+ }
+ }
+
while (in && buf->last < buf->end && send < limit) {
if (in->buf->last_buf || in->buf->flush) {
flush = 1;
@@ -2784,6 +2820,9 @@
if (n > 0) {
+ c->ssl->dyn_rec_records_sent++;
+ c->ssl->dyn_rec_last_write = ngx_current_msec;
+
if (c->ssl->saved_read_handler) {
c->read->handler = c->ssl->saved_read_handler;
diff --color -uNr a/src/event/ngx_event_openssl.h b/src/event/ngx_event_openssl.h
--- a/src/event/ngx_event_openssl.h 2023-06-13 23:08:10.000000000 +0800
+++ b/src/event/ngx_event_openssl.h 2023-06-14 15:43:05.834243714 +0800
@@ -86,10 +86,19 @@
typedef struct ngx_ssl_ocsp_s ngx_ssl_ocsp_t;
+typedef struct {
+ ngx_msec_t timeout;
+ ngx_uint_t threshold;
+ size_t size_lo;
+ size_t size_hi;
+} ngx_ssl_dyn_rec_t;
+
+
struct ngx_ssl_s {
SSL_CTX *ctx;
ngx_log_t *log;
size_t buffer_size;
+ ngx_ssl_dyn_rec_t dyn_rec;
};
@@ -128,6 +137,10 @@
unsigned in_ocsp:1;
unsigned early_preread:1;
unsigned write_blocked:1;
+
+ ngx_ssl_dyn_rec_t dyn_rec;
+ ngx_msec_t dyn_rec_last_write;
+ ngx_uint_t dyn_rec_records_sent;
};
@@ -137,7 +150,7 @@
#define NGX_SSL_DFLT_BUILTIN_SCACHE -5
-#define NGX_SSL_MAX_SESSION_SIZE 4096
+#define NGX_SSL_MAX_SESSION_SIZE 16384
typedef struct ngx_ssl_sess_id_s ngx_ssl_sess_id_t;
diff --color -uNr a/src/http/modules/ngx_http_ssl_module.c b/src/http/modules/ngx_http_ssl_module.c
--- a/src/http/modules/ngx_http_ssl_module.c 2023-06-13 23:08:10.000000000 +0800
+++ b/src/http/modules/ngx_http_ssl_module.c 2023-06-14 15:43:05.834243714 +0800
@@ -290,6 +290,41 @@
offsetof(ngx_http_ssl_srv_conf_t, reject_handshake),
NULL },
+ { ngx_string("ssl_dyn_rec_enable"),
+ NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_CONF_FLAG,
+ ngx_conf_set_flag_slot,
+ NGX_HTTP_SRV_CONF_OFFSET,
+ offsetof(ngx_http_ssl_srv_conf_t, dyn_rec_enable),
+ NULL },
+
+ { ngx_string("ssl_dyn_rec_timeout"),
+ NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_CONF_FLAG,
+ ngx_conf_set_msec_slot,
+ NGX_HTTP_SRV_CONF_OFFSET,
+ offsetof(ngx_http_ssl_srv_conf_t, dyn_rec_timeout),
+ NULL },
+
+ { ngx_string("ssl_dyn_rec_size_lo"),
+ NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_CONF_FLAG,
+ ngx_conf_set_size_slot,
+ NGX_HTTP_SRV_CONF_OFFSET,
+ offsetof(ngx_http_ssl_srv_conf_t, dyn_rec_size_lo),
+ NULL },
+
+ { ngx_string("ssl_dyn_rec_size_hi"),
+ NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_CONF_FLAG,
+ ngx_conf_set_size_slot,
+ NGX_HTTP_SRV_CONF_OFFSET,
+ offsetof(ngx_http_ssl_srv_conf_t, dyn_rec_size_hi),
+ NULL },
+
+ { ngx_string("ssl_dyn_rec_threshold"),
+ NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_CONF_FLAG,
+ ngx_conf_set_num_slot,
+ NGX_HTTP_SRV_CONF_OFFSET,
+ offsetof(ngx_http_ssl_srv_conf_t, dyn_rec_threshold),
+ NULL },
+
ngx_null_command
};
@@ -629,6 +664,11 @@
sscf->ocsp_cache_zone = NGX_CONF_UNSET_PTR;
sscf->stapling = NGX_CONF_UNSET;
sscf->stapling_verify = NGX_CONF_UNSET;
+ sscf->dyn_rec_enable = NGX_CONF_UNSET;
+ sscf->dyn_rec_timeout = NGX_CONF_UNSET_MSEC;
+ sscf->dyn_rec_size_lo = NGX_CONF_UNSET_SIZE;
+ sscf->dyn_rec_size_hi = NGX_CONF_UNSET_SIZE;
+ sscf->dyn_rec_threshold = NGX_CONF_UNSET_UINT;
return sscf;
}
@@ -694,6 +734,20 @@
ngx_conf_merge_str_value(conf->stapling_responder,
prev->stapling_responder, "");
+ ngx_conf_merge_value(conf->dyn_rec_enable, prev->dyn_rec_enable, 0);
+ ngx_conf_merge_msec_value(conf->dyn_rec_timeout, prev->dyn_rec_timeout,
+ 1000);
+ /* Default sizes for the dynamic record sizes are defined to fit maximal
+ TLS + IPv6 overhead in a single TCP segment for lo and 3 segments for hi:
+ 1369 = 1500 - 40 (IP) - 20 (TCP) - 10 (Time) - 61 (Max TLS overhead) */
+ ngx_conf_merge_size_value(conf->dyn_rec_size_lo, prev->dyn_rec_size_lo,
+ 1369);
+ /* 4229 = (1500 - 40 - 20 - 10) * 3 - 61 */
+ ngx_conf_merge_size_value(conf->dyn_rec_size_hi, prev->dyn_rec_size_hi,
+ 4229);
+ ngx_conf_merge_uint_value(conf->dyn_rec_threshold, prev->dyn_rec_threshold,
+ 40);
+
conf->ssl.log = cf->log;
if (conf->certificates) {
@@ -890,6 +944,28 @@
return NGX_CONF_ERROR;
}
+ if (conf->dyn_rec_enable) {
+ conf->ssl.dyn_rec.timeout = conf->dyn_rec_timeout;
+ conf->ssl.dyn_rec.threshold = conf->dyn_rec_threshold;
+
+ if (conf->buffer_size > conf->dyn_rec_size_lo) {
+ conf->ssl.dyn_rec.size_lo = conf->dyn_rec_size_lo;
+
+ } else {
+ conf->ssl.dyn_rec.size_lo = conf->buffer_size;
+ }
+
+ if (conf->buffer_size > conf->dyn_rec_size_hi) {
+ conf->ssl.dyn_rec.size_hi = conf->dyn_rec_size_hi;
+
+ } else {
+ conf->ssl.dyn_rec.size_hi = conf->buffer_size;
+ }
+
+ } else {
+ conf->ssl.dyn_rec.timeout = 0;
+ }
+
return NGX_CONF_OK;
}
diff --color -uNr a/src/http/modules/ngx_http_ssl_module.h b/src/http/modules/ngx_http_ssl_module.h
--- a/src/http/modules/ngx_http_ssl_module.h 2023-06-13 23:08:10.000000000 +0800
+++ b/src/http/modules/ngx_http_ssl_module.h 2023-06-14 15:43:38.264102815 +0800
@@ -62,6 +62,12 @@
ngx_flag_t stapling_verify;
ngx_str_t stapling_file;
ngx_str_t stapling_responder;
+
+ ngx_flag_t dyn_rec_enable;
+ ngx_msec_t dyn_rec_timeout;
+ size_t dyn_rec_size_lo;
+ size_t dyn_rec_size_hi;
+ ngx_uint_t dyn_rec_threshold;
} ngx_http_ssl_srv_conf_t;