From 25a875b60d99fdfcd33eacfdc14f9adb75b631f0 Mon Sep 17 00:00:00 2001 From: Gustavo Lopes Date: Mon, 17 Feb 2025 11:24:33 +0000 Subject: [PATCH 01/18] WAF: block responses --- cmake/deps/nginx-module.cmake | 2 + src/datadog_conf.h | 8 + src/datadog_context.cpp | 19 +- src/ngx_http_datadog_module.cpp | 22 + src/security/blocking.cpp | 47 +- src/security/buffer_pool.h | 61 ++ src/security/context.cpp | 888 +++++++++++++++++-- src/security/context.h | 53 +- src/security/library.cpp | 13 + src/security/library.h | 2 + test/cases/case.py | 31 +- test/cases/orchestration.py | 33 +- test/cases/sec_blocking/cert/example.com.crt | 10 + test/cases/sec_blocking/cert/example.com.key | 8 + test/cases/sec_blocking/conf/http.conf | 12 +- test/cases/sec_blocking/conf/waf.json | 51 ++ test/cases/sec_blocking/test_sec_blocking.py | 58 +- test/services/client/Dockerfile | 13 +- test/services/client/install-tools.sh | 2 +- 19 files changed, 1204 insertions(+), 129 deletions(-) create mode 100644 src/security/buffer_pool.h create mode 100644 test/cases/sec_blocking/cert/example.com.crt create mode 100644 test/cases/sec_blocking/cert/example.com.key diff --git a/cmake/deps/nginx-module.cmake b/cmake/deps/nginx-module.cmake index f2374d67..8cd3b214 100644 --- a/cmake/deps/nginx-module.cmake +++ b/cmake/deps/nginx-module.cmake @@ -66,6 +66,8 @@ target_include_directories(nginx_module ${nginx_SOURCE_DIR}/src/os/unix ${nginx_SOURCE_DIR}/objs ${nginx_SOURCE_DIR}/src/core + ${nginx_SOURCE_DIR}/src/stream + ${nginx_SOURCE_DIR}/src/http/v2 ${nginx_SOURCE_DIR}/src/event/modules) # vim: set et sw=2 ts=2: diff --git a/src/datadog_conf.h b/src/datadog_conf.h index b8c472a4..b8b090bd 100644 --- a/src/datadog_conf.h +++ b/src/datadog_conf.h @@ -138,6 +138,8 @@ struct datadog_main_conf_t { // DD_APPSEC_OBFUSCATION_PARAMETER_VALUE_REGEXP ngx_str_t appsec_obfuscation_value_regex = ngx_null_string; + std::size_t appsec_max_saved_output_data{NGX_CONF_UNSET_SIZE}; + // TODO: missing settings and their functionality // DD_TRACE_CLIENT_IP_RESOLVER_ENABLED (whether to collect headers and run the // client ip resolution. Also requires AppSec to be enabled or @@ -225,7 +227,13 @@ struct datadog_loc_conf_t { allow_sampling_delegation_in_subrequests_directive; #ifdef WITH_WAF + // the thread pool used to run the WAF on ngx_thread_pool_t *waf_pool{nullptr}; + + // whether temp file buffers received by the module output filter should be + // split into memory files. This can work around bugs in downstream filters + // than can't handle these buffers. Defaults to false. + ngx_flag_t waf_output_transform_temp{NGX_CONF_UNSET}; #endif #ifdef WITH_RUM diff --git a/src/datadog_context.cpp b/src/datadog_context.cpp index 35029b8f..cf58d5bd 100644 --- a/src/datadog_context.cpp +++ b/src/datadog_context.cpp @@ -21,7 +21,8 @@ DatadogContext::DatadogContext(ngx_http_request_t *request, ngx_http_core_loc_conf_t *core_loc_conf, datadog_loc_conf_t *loc_conf) #ifdef WITH_WAF - : sec_ctx_{security::Context::maybe_create()} + : sec_ctx_{security::Context::maybe_create( + *loc_conf, security::Library::max_saved_output_data())} #endif { if (loc_conf->enable_tracing) { @@ -82,9 +83,12 @@ ngx_int_t DatadogContext::on_header_filter(ngx_http_request_t *request) { return ngx_http_next_header_filter(request); } +#if defined(WITH_RUM) || defined(WITH_WAF) + RequestTracing *trace{}; +#endif #ifdef WITH_RUM if (loc_conf->rum_enable) { - auto *trace = find_trace(request); + trace = find_trace(request); if (trace != nullptr) { auto rum_span = trace->active_span().create_child(); rum_span.set_name("rum_sdk_injection.on_header"); @@ -97,6 +101,17 @@ ngx_int_t DatadogContext::on_header_filter(ngx_http_request_t *request) { rum_ctx_.on_header_filter(request, loc_conf, ngx_http_next_header_filter); } } +#elif WITH_WAF + if (sec_ctx_) { + trace = find_trace(request); + } +#endif + +#ifdef WITH_WAF + if (sec_ctx_ && trace) { + dd::Span &span = trace->active_span(); + return sec_ctx_->header_filter(*request, span); + } #endif return ngx_http_next_header_filter(request); diff --git a/src/ngx_http_datadog_module.cpp b/src/ngx_http_datadog_module.cpp index 3c9281ee..136f2aa8 100644 --- a/src/ngx_http_datadog_module.cpp +++ b/src/ngx_http_datadog_module.cpp @@ -375,6 +375,24 @@ static ngx_command_t datadog_commands[] = { offsetof(datadog_main_conf_t, appsec_obfuscation_value_regex), nullptr, }, + + { + ngx_string("datadog_appsec_max_saved_output_data"), + NGX_HTTP_MAIN_CONF|NGX_CONF_TAKE1, + ngx_conf_set_size_slot, + NGX_HTTP_MAIN_CONF_OFFSET, + offsetof(datadog_main_conf_t, appsec_obfuscation_value_regex), + nullptr, + }, + + { + ngx_string("datadog_appsec_output_transform_temp"), + NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_TAKE1, + ngx_conf_set_flag_slot, + NGX_HTTP_LOC_CONF_OFFSET, + offsetof(datadog_loc_conf_t, waf_output_transform_temp), + nullptr, + }, #endif DATADOG_RUM_DIRECTIVES ngx_null_command @@ -849,6 +867,10 @@ static char *merge_datadog_loc_conf(ngx_conf_t *cf, void *parent, if (conf->waf_pool == nullptr) { conf->waf_pool = prev->waf_pool; } + + if (conf->waf_output_transform_temp == NGX_CONF_UNSET) { + conf->waf_output_transform_temp = prev->waf_output_transform_temp; + } #endif #ifdef WITH_RUM diff --git a/src/security/blocking.cpp b/src/security/blocking.cpp index 69e14bdf..2625c4aa 100644 --- a/src/security/blocking.cpp +++ b/src/security/blocking.cpp @@ -8,6 +8,7 @@ #include "util.h" extern "C" { +#include #include } @@ -314,9 +315,20 @@ void BlockingService::block(BlockSpecification spec, ngx_http_request_t &req) { req.header_only = 1; } - ngx_http_discard_request_body(&req); - - // TODO: clear all current headers? + if (ngx_http_discard_request_body(&req) != NGX_OK) { + req.keepalive = 0; + } + ngx_http_clean_header(&req); + + // ngx_http_filter_finalize_request neutralizes other filters with: + // void *ctx = req.ctx[ngx_http_datadog_module.ctx_index]; + // ngx_memzero(req.ctx, sizeof(void *) * ngx_http_max_module); + // req.ctx[ngx_http_datadog_module.ctx_index] = ctx; + // req.filter_finalize = 1; + // This would prevent our blocking response from being changed by filters. + // However, at least the chunked body filter segfaults if its context is 0'ed. + // filter_finalize = 1 has its own problems, causing the connection->error to + // be set to 1. req.headers_out.status = resp.status; req.headers_out.content_type = BlockResponse::content_type_header(resp.ct); @@ -325,14 +337,18 @@ void BlockingService::block(BlockSpecification spec, ngx_http_request_t &req) { if (!resp.location.empty()) { push_header(req, "Location"sv, resp.location); } - if (templ) { - req.headers_out.content_length_n = static_cast(templ->len); - } else { - req.headers_out.content_length_n = 0; - } - - // TODO: bypass header filters? - auto res = ngx_http_send_header(&req); + // Filters may change the response, invalidating this length + // if (templ) { + // req.headers_out.content_length_n = static_cast(templ->len); + // } else { + // req.headers_out.content_length_n = 0; + // } + req.keepalive = 0; + req.lingering_close = 0; + + ngx_int_t res = ngx_http_send_header(&req); + ngx_log_debug1(NGX_LOG_DEBUG, req.connection->log, 0, + "Status %d returned by ngx_http_send_header", res); if (res == NGX_ERROR || res > NGX_OK || req.header_only) { ngx_http_finalize_request(&req, res); return; @@ -352,9 +368,16 @@ void BlockingService::block(BlockSpecification spec, ngx_http_request_t &req) { ngx_chain_t out{}; out.buf = b; - // TODO: bypass and call ngx_http_write_filter? ngx_http_output_filter(&req, &out); + + auto count = req.count; ngx_http_finalize_request(&req, NGX_DONE); + + // o/wise http/3 doesn't close the connection (via ngx_http_writer > + // ngx_http_finalize_request) + if (count - 1 > 0) { + ngx_post_event(req.connection->write, &ngx_posted_events); + } } BlockingService::BlockingService( diff --git a/src/security/buffer_pool.h b/src/security/buffer_pool.h new file mode 100644 index 00000000..27f9e3ab --- /dev/null +++ b/src/security/buffer_pool.h @@ -0,0 +1,61 @@ +#pragma once + +extern "C" { +#include +} +#include +#include +#include + +template +class BufferPool { + static inline auto tag = reinterpret_cast(Tag); + + public: + void update_chains(ngx_pool_t &pool, ngx_chain_t *out) noexcept { + ngx_chain_t *out_copy = out; + ngx_chain_update_chains(&pool, &free_, &busy_, &out_copy, tag); + } + + std::optional get_buffer(ngx_pool_t &pool) noexcept { + if (free_) { + ngx_chain_t *res = free_; + free_ = res->next; + res->next = nullptr; + + res->buf->recycled = 1; + res->buf->pos = res->buf->start; + res->buf->last = res->buf->start; + res->buf->flush = 0; + res->buf->sync = 0; + res->buf->last_buf = 0; + res->buf->last_in_chain = 0; + return {res}; + } + if (allocated_ >= NBuffers) { + return std::nullopt; + } + + ngx_buf_t *buf = ngx_create_temp_buf(&pool, BufferSize); + if (!buf) { + return std::nullopt; + } + auto chain = ngx_alloc_chain_link(&pool); + if (!chain) { + ngx_free_chain(&pool, chain); + return std::nullopt; + } + buf->tag = tag; + chain->buf = buf; + chain->next = nullptr; + allocated_++; + return {chain}; + } + + ngx_chain_t *busy() const { return busy_; } + + private: + ngx_chain_t *free_{}; + ngx_chain_t *busy_{}; + std::size_t allocated_{}; +}; diff --git a/src/security/context.cpp b/src/security/context.cpp index 7408d2f0..ab1be9ea 100644 --- a/src/security/context.cpp +++ b/src/security/context.cpp @@ -1,10 +1,6 @@ #include "context.h" -#include -#include -#include -#include - +#include #include #include #include @@ -25,9 +21,18 @@ #include "util.h" extern "C" { +#include +#include +#include +#include +#include #include #include +#include +#include +#include #include +#include #include #include } @@ -45,6 +50,43 @@ namespace { namespace dnsec = datadog::nginx::security; +std::size_t chain_length(ngx_chain_t const *ch) { + std::size_t len = 0; + for (ngx_chain_t const *cl = ch; cl; cl = cl->next) { + len++; + } + return len; +} +std::size_t chain_size(ngx_chain_t const *ch) { + std::size_t size = 0; + for (ngx_chain_t const *cl = ch; cl; cl = cl->next) { + size += ngx_buf_size(cl->buf); + } + return size; +} +std::size_t has_special(ngx_chain_t const *ch) { + for (ngx_chain_t const *cl = ch; cl; cl = cl->next) { + return ngx_buf_special(ch->buf); + } + return false; +} +std::size_t has_last(ngx_chain_t const *ch) { + for (ngx_chain_t const *cl = ch; cl; cl = cl->next) { + if (ch->buf->last) { + return true; + } + } + return false; +} +bool has_file(ngx_chain_t const *ch) { + for (ngx_chain_t const *cl = ch; cl; cl = cl->next) { + if (!ngx_buf_in_memory(cl->buf) && cl->buf->in_file) { + return true; + } + } + return false; +} + class JsonWriter : public rapidjson::Writer { using rapidjson::Writer::Writer; @@ -173,12 +215,21 @@ Context::Context(std::shared_ptr handle) stage_->store(stage::START, std::memory_order_relaxed); } -std::unique_ptr Context::maybe_create() { +std::unique_ptr Context::maybe_create( + datadog_loc_conf_t &loc_conf, + std::optional max_saved_output_data) { std::shared_ptr handle = Library::get_handle(); if (!handle) { return {}; } - return std::unique_ptr{new Context{std::move(handle)}}; + auto res = std::unique_ptr{new Context{std::move(handle)}}; + if (max_saved_output_data) { + res->max_saved_output_data_ = *max_saved_output_data; + } + if (loc_conf.waf_output_transform_temp != NGX_CONF_UNSET) { + res->output_transform_temp_ = loc_conf.waf_output_transform_temp; + } + return res; } template @@ -212,7 +263,7 @@ class PolTaskCtx { } bool submit(ngx_thread_pool_t *pool) noexcept { - replace_handlers(); + as_self().replace_handlers(); req_.main->count++; @@ -221,9 +272,9 @@ class PolTaskCtx { "failed to post task %p", &get_task()); req_.main->count--; - restore_handlers(); + as_self().restore_handlers(); - static_cast(this)->~Self(); + as_self().~Self(); return false; } @@ -255,7 +306,7 @@ class PolTaskCtx { try { ngx_log_debug(NGX_LOG_DEBUG_HTTP, req_log(), 0, "before task %p main", this); - block_spec_ = static_cast(this)->do_handle(*tp_log); + block_spec_ = as_self().do_handle(*tp_log); ngx_log_debug(NGX_LOG_DEBUG_HTTP, req_log(), 0, "after task %p main", this); ran_on_thread_.store(true, std::memory_order_release); @@ -269,7 +320,7 @@ class PolTaskCtx { } // define in subclasses - // std::optional do_handle(ngx_log_t &log) {} + std::optional do_handle(ngx_log_t &log) = delete; // runs on the main thread static void completion_handler(ngx_event_t *evt) noexcept { @@ -278,7 +329,7 @@ class PolTaskCtx { } void completion_handler_impl() noexcept { - restore_handlers(); + as_self().restore_handlers(); auto count = req_.main->count; ngx_log_debug(NGX_LOG_DEBUG_HTTP, req_log(), 0, @@ -295,7 +346,7 @@ class PolTaskCtx { req_.main->count--; ngx_log_debug1(NGX_LOG_DEBUG_HTTP, req_log(), 0, "calling complete on task %p", this); - static_cast(this)->complete(); + as_self().complete(); } } else { ngx_log_debug1( @@ -306,11 +357,11 @@ class PolTaskCtx { ngx_http_finalize_request(&req_, NGX_DONE); } - static_cast(this)->~Self(); + as_self().~Self(); } // define in subclasses - // void complete() noexcept {} + void complete() noexcept = delete; void replace_handlers() noexcept { req_.read_event_handler = ngx_http_block_reading; @@ -322,18 +373,20 @@ class PolTaskCtx { req_.read_event_handler = prev_read_evt_handler_; } else { ngx_log_error(NGX_LOG_ERR, req_log(), 0, - "unexpected read event handler %p", + "unexpected read event handler %p; not restoring", req_.read_event_handler); } if (req_.write_event_handler == PolTaskCtx::empty_write_handler) { req_.write_event_handler = prev_write_evt_handler_; } else { ngx_log_error(NGX_LOG_ERR, req_log(), 0, - "unexpected write event handler %p", + "unexpected write event handler %p; not restoring", req_.write_event_handler); } } + Self &as_self() { return *static_cast(this); } + static void empty_write_handler(ngx_http_request_t *req) { ngx_log_debug0(NGX_LOG_DEBUG_HTTP, req->connection->log, 0, "task wait empty handler"); @@ -649,6 +702,14 @@ std::optional Context::run_waf_start( return block_spec; } +ngx_int_t Context::header_filter(ngx_http_request_t &request, + dd::Span &span) noexcept { + return catch_exceptions( + "header_filter"sv, request, + [&]() { return Context::do_header_filter(request, span); }, + static_cast(NGX_ERROR)); +} + ngx_int_t Context::request_body_filter(ngx_http_request_t &request, ngx_chain_t *chain, dd::Span &span) noexcept { @@ -715,20 +776,77 @@ class PolFinalWafCtx : public PolTaskCtx { } void complete() noexcept { + bool const ran = ran_on_thread_.load(std::memory_order_acquire); + ngx_log_debug2(NGX_LOG_DEBUG_HTTP, req_.connection->log, 0, + "completion handler of waf req final task (ran: %s, " + "blocked: %s): start", + ran ? "true" : "false", block_spec_ ? "true" : "false"); + + req_.header_sent = 0; + + if (ran && block_spec_) { + span_.set_tag("appsec.blocked"sv, "true"sv); + ctx_.waf_final_done(req_, true); + auto *service = BlockingService::get_instance(); + assert(service != nullptr); + try { + // service->block calls finalize_request NGX_DONE + req_.main->count++; + ngx_log_debug(NGX_LOG_DEBUG_HTTP, req_.connection->log, 0, + "incremented request refcount to %d before sending " + "blocking response", + req_.main->count); + service->block(*block_spec_, req_); + } catch (const std::exception &e) { + ngx_log_error(NGX_LOG_ERR, req_.connection->log, 0, + "failed to block request: %s", e.what()); + ngx_http_finalize_request(&req_, NGX_DONE); + } + } else { + ctx_.waf_final_done(req_, false); + ngx_log_debug(NGX_LOG_DEBUG_HTTP, req_.connection->log, 0, + "not blocking after final waf run; triggering write event " + "on connection"); + ngx_post_event(req_.connection->write, &ngx_posted_events); + } + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, req_.connection->log, 0, - "completion of final waf task"); + "completion handler of waf req final task: finish"); + } + + ngx_event_handler_pt orig_conn_write_handler_{}; + void replace_handlers() noexcept { + auto &handler = req_.connection->write->handler; + orig_conn_write_handler_ = handler; + + handler = ngx_http_empty_handler; + } + + void restore_handlers() noexcept { + req_.connection->write->handler = orig_conn_write_handler_; } friend PolTaskCtx; + + public: + bool submit(ngx_thread_pool_t *pool) noexcept { + bool submitted = + static_cast *>(this)->submit(pool); + if (submitted) { + req_.header_sent = 1; // skip/alert when attempting to sent headers + } + return submitted; + } }; ngx_int_t Context::do_request_body_filter(ngx_http_request_t &request, ngx_chain_t *in, dd::Span &span) { auto st = stage_->load(std::memory_order_acquire); - ngx_log_debug3( - NGX_LOG_DEBUG_HTTP, request.connection->log, 0, - "request body filter %s in chain. Current data: %lu, Stage: %d", - in ? "with" : "without", filter_ctx_.out_total, st); + ngx_log_debug4(NGX_LOG_DEBUG_HTTP, request.connection->log, 0, + "waf request body filter %s in chain. accumulated=%uz, " + "copied=%uz, Stage: %d", + in ? "with" : "without", filter_ctx_.out_total, + filter_ctx_.copied_total, st); if (st == stage::AFTER_BEGIN_WAF) { ngx_log_debug1(NGX_LOG_DEBUG_HTTP, request.connection->log, 0, @@ -760,7 +878,7 @@ ngx_int_t Context::do_request_body_filter(ngx_http_request_t &request, // we're guaranteed to be called again synchronously, so we shouldn't // call the WAF at this point - if (buffer_chain(*request.pool, in, true) != NGX_OK) { + if (buffer_chain(filter_ctx_, *request.pool, in, true) != NGX_OK) { return NGX_HTTP_INTERNAL_SERVER_ERROR; } @@ -777,7 +895,7 @@ ngx_int_t Context::do_request_body_filter(ngx_http_request_t &request, bool run_waf = is_last || new_size >= kMaxFilterData; if (run_waf) { // do not consume the buffer so that this filter is not called again - if (buffer_chain(*request.pool, in, false) != NGX_OK) { + if (buffer_chain(filter_ctx_, *request.pool, in, false) != NGX_OK) { return NGX_HTTP_INTERNAL_SERVER_ERROR; } @@ -811,7 +929,7 @@ ngx_int_t Context::do_request_body_filter(ngx_http_request_t &request, goto pass_downstream; } } else { // !run_waf; we need more data - if (buffer_chain(*request.pool, in, true) != NGX_OK) { + if (buffer_chain(filter_ctx_, *request.pool, in, true) != NGX_OK) { return NGX_HTTP_INTERNAL_SERVER_ERROR; } } @@ -821,7 +939,7 @@ ngx_int_t Context::do_request_body_filter(ngx_http_request_t &request, ngx_log_debug1(NGX_LOG_DEBUG_HTTP, request.connection->log, 0, "first filter call after WAF ended, req refcount=%d", request.main->count); - if (buffer_chain(*request.pool, in, false) != NGX_OK) { + if (buffer_chain(filter_ctx_, *request.pool, in, false) != NGX_OK) { return NGX_HTTP_INTERNAL_SERVER_ERROR; } @@ -831,14 +949,6 @@ ngx_int_t Context::do_request_body_filter(ngx_http_request_t &request, filter_ctx_.clear(*request.pool); - for (auto *cl = filter_ctx_.out; cl;) { - auto *ln = cl; - cl = cl->next; - ngx_free_chain(request.pool, ln); - } - filter_ctx_.out = nullptr; - filter_ctx_.out_last = &filter_ctx_.out; - return rc; } @@ -852,7 +962,7 @@ ngx_int_t Context::do_request_body_filter(ngx_http_request_t &request, // if we are called, it's a bit troubling, because the write that happens // in the buffered chain in the next statement is not synchronized with // the read that happens in the WAF thread. - if (buffer_chain(*request.pool, in, false) != NGX_OK) { + if (buffer_chain(filter_ctx_, *request.pool, in, false) != NGX_OK) { return NGX_HTTP_INTERNAL_SERVER_ERROR; } } @@ -870,8 +980,32 @@ ngx_int_t Context::do_request_body_filter(ngx_http_request_t &request, return ngx_http_next_request_body_filter(&request, nullptr); } -ngx_int_t Context::buffer_chain(ngx_pool_t &pool, ngx_chain_t *in, - bool consume) { +ngx_int_t Context::buffer_chain(FilterCtx &filter_ctx, ngx_pool_t &pool, + ngx_chain_t const *in, bool consume) noexcept { + ngx_log_debug( + NGX_LOG_DEBUG_HTTP, pool.log, 0, + "buffer_chain: in=%p, chain_len=%uz, chain_size=%uz, consume=%d", in, + chain_length(in), chain_size(in), consume); + if (pool.log->log_level >= NGX_LOG_DEBUG) { + for (auto *in_ch = in; in_ch; in_ch = in_ch->next) { + const auto &buf = *in_ch->buf; + ngx_log_error( + NGX_LOG_DEBUG, pool.log, 0, + "buffer_chain link: " + "t:%d m: %d mmap: %d, r:%d f:%d fl:%d lb=%d s:%d %p %p-%p %p %O-%O", + buf.temporary, buf.memory, buf.mmap, buf.recycled, buf.in_file, + buf.flush, buf.last_buf, buf.sync, buf.start, buf.pos, buf.last, + buf.file, buf.file_pos, buf.file_last); + } + } + + if (in && filter_ctx.found_last) { + ngx_log_error( + NGX_LOG_NOTICE, pool.log, 0, + "given buffer after having already received one with ->last_buf"); + return NGX_ERROR; + } + for (auto *in_ch = in; in_ch; in_ch = in_ch->next) { ngx_chain_t *new_ch = ngx_alloc_chain_link(&pool); // uninitialized if (!new_ch) { @@ -879,53 +1013,310 @@ ngx_int_t Context::buffer_chain(ngx_pool_t &pool, ngx_chain_t *in, } auto *buf = in_ch->buf; - auto size = buf->last - buf->pos; - if (consume || buf->recycled) { // copy the buffer and consume the original - ngx_buf_t *new_buf = ngx_create_temp_buf(&pool, size); - if (!new_buf) { - return NGX_ERROR; + size_t size; + if (consume) { // copy the buffer and consume the original + ngx_buf_t *new_buf; + if (!buf->in_file) { + size = buf->last - buf->pos; + + if (size > 0) { + new_buf = ngx_create_temp_buf(&pool, size); + if (!new_buf) { + return NGX_ERROR; + } + new_buf->last = ngx_copy(new_buf->pos, buf->pos, size); + buf->pos = buf->last; // consume + filter_ctx.copied_total += size; + } else { + // special buffer + if (!ngx_buf_special(buf)) { + ngx_log_error( + NGX_LOG_NOTICE, pool.log, 0, + "unexpected empty non-special buffer in buffer_chain"); + } + new_buf = static_cast(ngx_calloc_buf(&pool)); + if (!new_buf) { + return NGX_ERROR; + } + new_buf->flush = buf->flush; + new_buf->sync = buf->sync; + } + } else { + // file buffers (or mixed memory/file buffers) + new_buf = static_cast(ngx_calloc_buf(&pool)); + if (!new_buf) { + return NGX_ERROR; + } + new_buf->in_file = 1; + new_buf->file = buf->file; + new_buf->file_pos = buf->file_pos; + new_buf->file_last = buf->file_last; + + buf->file_pos = buf->file_last; // consume + + size = new_buf->file_last - new_buf->file_pos; } - new_buf->tag = &ngx_http_datadog_module; - - new_buf->last = ngx_copy(new_buf->pos, buf->pos, size); - buf->pos = buf->last; new_buf->last_buf = buf->last_buf; - new_buf->tag = buf->tag; - + new_buf->tag = reinterpret_cast(kBufferTag); new_ch->buf = new_buf; - } else { + } else { // do not consume + size = ngx_buf_size(buf); new_ch->buf = buf; } new_ch->next = nullptr; - filter_ctx_.out_total += size; + filter_ctx.out_total += size; if (buf->last_buf) { - filter_ctx_.found_last = true; + filter_ctx.found_last = true; } - *filter_ctx_.out_last = new_ch; - filter_ctx_.out_last = &new_ch->next; + *filter_ctx.out_latest = new_ch; + filter_ctx.out_latest = &new_ch->next; } return NGX_OK; } +ngx_int_t Context::buffer_header_output(ngx_pool_t &pool, + ngx_chain_t *chain) noexcept { + ngx_log_debug(NGX_LOG_DEBUG_HTTP, pool.log, 0, + "buffer_header_output: saved_(len,size)=(%uz,%uz), " + "in_(len,size)=(%uz,%uz)", + chain_length(header_filter_ctx_.out), + chain_size(header_filter_ctx_.out), chain_length(chain), + chain_size(chain)); + + ngx_int_t res = buffer_chain(header_filter_ctx_, pool, chain, true); + + ngx_log_debug(NGX_LOG_DEBUG_HTTP, pool.log, 0, + "buffer_header_output: buffer_chain output was %d, " + "saved_(len,size)=(%uz,%uz)", + res, chain_length(header_filter_ctx_.out), + chain_size(header_filter_ctx_.out)); + + return res; +} + +ngx_int_t Context::send_buffered_header(ngx_http_request_t &request) noexcept { + ngx_log_debug(NGX_LOG_DEBUG_HTTP, request.connection->log, 0, + "send_buffered_header: buffered_(len,size)=(%uz,%uz)", + chain_length(header_filter_ctx_.out), + chain_size(header_filter_ctx_.out)); + + if (!request.stream) { + ngx_int_t rc = ngx_http_write_filter(&request, header_filter_ctx_.out); + header_filter_ctx_.clear(*request.pool); + return rc; + } + + // http/2 + ngx_connection_t &c = *request.stream->connection->connection; + ngx_chain_t *rem_chain = c.send_chain(&c, header_filter_ctx_.out, 0); + if (rem_chain == NGX_CHAIN_ERROR) { + ngx_log_error(NGX_LOG_NOTICE, c.log, 0, + "send_buffered_header: send_chain failed"); + return NGX_ERROR; + } + + ngx_log_debug(NGX_LOG_DEBUG_HTTP, c.log, 0, + "send_buffered_header: remaining chain_(len,size)=(%uz,%uz)", + chain_length(rem_chain), chain_size(rem_chain)); + for (ngx_chain_t *cl = header_filter_ctx_.out; cl && cl != rem_chain;) { + ngx_chain_t *ln = cl; + cl = cl->next; + ngx_free_chain(c.pool, ln); + } + header_filter_ctx_.replace_out(rem_chain); + if (rem_chain == nullptr) { + return NGX_OK; + } + return NGX_AGAIN; +} + void Context::FilterCtx::clear(ngx_pool_t &pool) noexcept { for (ngx_chain_t *cl = out; cl;) { ngx_chain_t *ln = cl; cl = cl->next; ngx_free_chain(&pool, ln); } + out = nullptr; - out_last = &out; + out_latest = &out; + out_total = 0; + copied_total = 0; + // found_last retained } -ngx_int_t Context::do_output_body_filter(ngx_http_request_t &request, - ngx_chain_t *chain, dd::Span &span) { +void Context::FilterCtx::replace_out(ngx_chain_t *new_out) noexcept { + out = new_out; + copied_total = 0; + out_total = 0; + ngx_chain_t **lastp = &out; + for (; *lastp; lastp = &(*lastp)->next) { + out_total += ngx_buf_size((*lastp)->buf); + } + out_latest = lastp; +} + +namespace { +class Http1TemporarySendChain { + public: + static Http1TemporarySendChain instance; + void activate(Context &ctx, ngx_http_request_t &request) noexcept { + current_ctx_ = &ctx; + prev_send_chain_ = request.connection->send_chain; + request.connection->send_chain = send_chain_save; + } + + void deactivate(ngx_http_request_t &request) noexcept { + auto *ctx = instance.current_ctx_; + assert(ctx != nullptr); + + if (request.out) { + // there is uncommitted data in the output chain; send_chain() was not + // called; ngx_http_write_filter() chose not to do it + ngx_log_debug(NGX_LOG_DEBUG_HTTP, request.connection->log, 0, + "temporary send_chain: uncommitted data in output chain " + "(%uz bytes); adding it to the header output buffer", + chain_size(request.out)); + ctx->buffer_header_output(*request.pool, request.out); + for (auto *cl = request.out; cl;) { + auto *next = cl->next; + ngx_free_chain(request.pool, cl); + cl = next; + } + request.out = nullptr; + request.connection->buffered &= ~NGX_HTTP_WRITE_BUFFERED; + } + + current_ctx_ = nullptr; + request.connection->send_chain = prev_send_chain_; + } + + private: + static ngx_chain_t *send_chain_save(ngx_connection_t *c, ngx_chain_t *in, + off_t limit) { + (void)limit; + + ngx_http_request_t *req = static_cast(c->data); + assert(req != nullptr); + + auto *ctx = instance.current_ctx_; + assert(ctx != nullptr); + if (ctx->buffer_header_output(*req->pool, in) != NGX_OK) { + return NGX_CHAIN_ERROR; + } + + return nullptr; + } + + Context *current_ctx_; + ngx_send_chain_pt prev_send_chain_; +}; +Http1TemporarySendChain Http1TemporarySendChain::instance; + +class Http2TemporarySendChain { + public: + static Http2TemporarySendChain instance; + void activate(Context &ctx, ngx_http_request_t &request) noexcept { + current_ctx_ = &ctx; + pool_ = request.pool; + + ngx_http_v2_connection_t &h2c = *request.stream->connection; + // test forceful NGX_AGAIN: h2c.connection->write->ready = 0; + ngx_send_chain_pt &stream_sc = h2c.connection->send_chain; + prev_send_chain_ = stream_sc; + + // ngx_http_v2_header_filter calls ngx_http_v2_queue_blocked_frame + // This puts the header frame either at the end of the chain (the first + // frame to go on the wire) or just before the first blocked frame (in + // wire order, just after the last blocked frame). + ngx_http_v2_out_frame_t **frame_ip; // insertion point + for (frame_ip = &h2c.last_out; *frame_ip; frame_ip = &(*frame_ip)->next) { + if ((*frame_ip)->blocked || (*frame_ip)->stream == nullptr) { + break; + } + } + frame_ip_ = frame_ip; + frame_ip_value_ = *frame_ip; + stream_sc = stream_send_chain_save; + } + + void deactivate(ngx_http_request_t &request) noexcept { + auto *ctx = instance.current_ctx_; + assert(ctx != nullptr); + + // we either flushed all the frames (because our replacement send_chain + // consumes all the buffers), or we flushed nothing + // (ngx_http_v2_send_output_queue returns NGX_AGAIN or error before + // calling send_chain). Consequently, if last_out is set, then frame_ip_ + // points to a valid frame (i.e., we're in the case where nothing was + // flushed) + if (*frame_ip_ != frame_ip_value_) { + // in this case, *frame_ip has to point to the header frame + assert(*frame_ip_ != nullptr); + ngx_http_v2_out_frame_t &header_frame = **frame_ip_; + assert(header_frame.first->buf->pos[3] == NGX_HTTP_V2_HEADERS_FRAME); + ngx_log_debug(NGX_LOG_DEBUG_HTTP, request.connection->log, 0, + "temporary send_chain: found header frame in uncommitted " + "h2c connection data frame_(len,size)=(%uz,%uz bytes); " + "adding it to the header output buffer and unqueuing it", + chain_length(header_frame.first), + chain_size(header_frame.first)); + ctx->buffer_header_output(*request.pool, header_frame.first); + ngx_http_v2_connection_t *h2c = request.stream->connection; + // calls ngx_http_v2_data_frame_handler: + header_frame.handler(h2c, &header_frame); + *frame_ip_ = header_frame.next; + // test forceful NGX_AGAIN: h2c.connection->write->ready = 1; + } + + current_ctx_ = nullptr; + pool_ = nullptr; + ngx_http_v2_connection_t &h2c = *request.stream->connection; + h2c.connection->send_chain = prev_send_chain_; + frame_ip_ = nullptr; + frame_ip_value_ = nullptr; + } + + private: + static ngx_chain_t *stream_send_chain_save(ngx_connection_t *c, + ngx_chain_t *in, off_t limit) { + (void)limit; + + ngx_http_request_t *req = static_cast(c->data); + assert(req != nullptr); + + Context *ctx = instance.current_ctx_; + assert(ctx != nullptr); + if (ctx->buffer_header_output(*instance.pool_, in) != NGX_OK) { + return NGX_CHAIN_ERROR; + } + + return nullptr; + } + + Context *current_ctx_; + ngx_pool_t *pool_; + ngx_send_chain_pt prev_send_chain_; + ngx_http_v2_out_frame_t **frame_ip_; + ngx_http_v2_out_frame_t *frame_ip_value_; +}; +Http2TemporarySendChain Http2TemporarySendChain::instance; +} // namespace + +ngx_int_t Context::do_header_filter(ngx_http_request_t &request, + dd::Span &span) { auto st = stage_->load(std::memory_order_acquire); + ngx_log_debug4(NGX_LOG_DEBUG_HTTP, request.connection->log, 0, + "waf header filter in stage %d, header_sent=%d, " + "buf_header_data_(len,size)=(%uz,%uz)", + st, request.header_sent, chain_length(header_filter_ctx_.out), + chain_size(header_filter_ctx_.out)); + if (st != stage::AFTER_BEGIN_WAF && st != stage::AFTER_ON_REQ_WAF) { - return ngx_http_next_output_body_filter(&request, chain); + return ngx_http_next_header_filter(&request); } PolFinalWafCtx &task_ctx = PolFinalWafCtx::create(request, *this, span); @@ -933,31 +1324,320 @@ ngx_int_t Context::do_output_body_filter(ngx_http_request_t &request, auto *conf = static_cast( ngx_http_get_module_loc_conf(&request, ngx_http_datadog_module)); - transition_to_stage(stage::BEFORE_RUN_WAF_END); + transition_to_stage(stage::PENDING_WAF_END); + + ngx_log_debug(NGX_LOG_DEBUG_HTTP, request.connection->log, 0, + "waf header filter: waf end task; replacing send_chain handler " + "and invoking the next header filter"); + + ngx_int_t rc; + if (request.stream) { + // http/2 + Http2TemporarySendChain::instance.activate(*this, request); + rc = ngx_http_next_header_filter(&request); + Http2TemporarySendChain::instance.deactivate(request); + } else { + // http/1.x or http/3 + Http1TemporarySendChain::instance.activate(*this, request); + rc = ngx_http_next_header_filter(&request); + Http1TemporarySendChain::instance.deactivate(request); + } + if (rc != NGX_OK) { + ngx_log_error(NGX_LOG_ERR, request.connection->log, 0, + "waf header filter: downstream filters returned %d", rc); + transition_to_stage(stage::AFTER_RUN_WAF_END); + return rc; + } else { + ngx_log_debug(NGX_LOG_DEBUG_HTTP, request.connection->log, 0, + "waf header filter: downstream filters returned NGX_OK; " + "attempting to submit WAF task"); + } if (task_ctx.submit(conf->waf_pool)) { + return NGX_OK; + } else { + ngx_log_error(NGX_LOG_NOTICE, request.connection->log, 0, + "failed to post waf end task; sending down the header data " + "immediately"); + transition_to_stage(stage::AFTER_RUN_WAF_END); + ngx_int_t rc = ngx_http_write_filter(&request, header_filter_ctx_.out); + header_filter_ctx_.clear(*request.pool); + ngx_log_debug(NGX_LOG_DEBUG_HTTP, request.connection->log, 0, + "waf header filter: ngx_http_write_filter returned %d", rc); + return rc; + } +} + +ngx_int_t Context::do_output_body_filter(ngx_http_request_t &request, + ngx_chain_t *const in, + dd::Span &span) { + auto st = stage_->load(std::memory_order_acquire); + ngx_log_debug( + NGX_LOG_DEBUG_HTTP, request.connection->log, 0, + "waf output body filter: in stage %d, header_sent=%d, " + "header_(len_sized)=(%uz,%uz), out_(len,size,copied)=(%uz,%uz,%uz), " + "in_chain_(len,size)=(%uz,%uz), l:%d, s:%d", + st, request.header_sent, chain_length(header_filter_ctx_.out), + chain_size(header_filter_ctx_.out), chain_length(out_filter_ctx_.out), + chain_size(out_filter_ctx_.out), out_filter_ctx_.copied_total, + chain_length(in), chain_size(in), has_last(in), has_special(in)); + + const bool buffering = st == stage::PENDING_WAF_END; + if (buffering) { + request.buffered |= 0x08; + + bool consume; + if (out_filter_ctx_.copied_total >= max_saved_output_data_) { + ngx_log_debug(NGX_LOG_DEBUG_HTTP, request.connection->log, 0, + "waf output body filter: too much copied data (%uz bytes " + ">= %uz), avoiding consuming more", + out_filter_ctx_.out_total, max_saved_output_data_); + consume = false; + } else { + consume = true; + } + + if (buffer_chain(out_filter_ctx_, *request.pool, in, consume) != NGX_OK) { + return NGX_HTTP_INTERNAL_SERVER_ERROR; + } + + ngx_log_debug2(NGX_LOG_DEBUG_HTTP, request.connection->log, 0, + "waf output body filter: there are now %uz bytes of " + "accumulated output data (%uz copied)", + out_filter_ctx_.out_total, out_filter_ctx_.copied_total); + + return consume ? NGX_OK : NGX_AGAIN; + } + + // !buffering + request.buffered &= ~0x08; + + if (st == stage::WAF_END_BLOCK_COMMIT) { + // commit of body of blocking response + assert(request.header_sent == 1); + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, request.connection->log, 0, + "waf output body filter: discarding %uz bytes of " + "accumulated data and passing down blocking response data", + out_filter_ctx_.out_total); + header_filter_ctx_.clear(*request.pool); + out_filter_ctx_.clear(*request.pool); + + transition_to_stage(stage::AFTER_RUN_WAF_END); + return ngx_http_next_output_body_filter(&request, in); + } + + // otherwise we sent down the buffered data + whatever we got + + if (header_filter_ctx_.out) { + // if we have header data, send it first. Bypass the body filter chain by + // invoking ngx_http_write_filter() directly + ngx_log_debug(NGX_LOG_DEBUG_HTTP, request.connection->log, 0, + "waf output body filter: sending down buffered header data"); + ngx_int_t rc = send_buffered_header(request); + if (rc == NGX_AGAIN) { + ngx_log_debug(NGX_LOG_DEBUG_HTTP, request.connection->log, 0, + "waf output body filter: send_buffered_header returned " + "NGX_AGAIN after sending down buffered header data; " + "returning NGX_AGAIN"); + request.buffered |= 0x08; + return NGX_AGAIN; + } + if (rc != NGX_OK) { + ngx_log_debug(NGX_LOG_DEBUG_HTTP, request.connection->log, 0, + "waf output body filter: ngx_http_write_filter returned %d " + "after sending down buffered header data; returning", + rc); + return rc; + } + // else continue + } + + if (out_filter_ctx_.out && !output_transform_temp_) { + // if we have data accumulated, add in to it (without consuming) and send it + // downstream. We add it all together to avoid having to do two calls + // downstream (plus handle the case where the first call doesn't return + // NGX_OK) + if (buffer_chain(out_filter_ctx_, *request.pool, in, false) != NGX_OK) { + return NGX_HTTP_INTERNAL_SERVER_ERROR; + } + + ngx_log_debug(NGX_LOG_DEBUG_HTTP, request.connection->log, 0, + "waf output body filter: sending down buffered out + in " + "chain, accum=%uz, copied=%uz, lb=%d", + out_filter_ctx_.out_total, out_filter_ctx_.copied_total, + out_filter_ctx_.found_last); + + auto rc = ngx_http_next_output_body_filter(&request, out_filter_ctx_.out); + ngx_log_debug(NGX_LOG_DEBUG_HTTP, request.connection->log, 0, + "waf output body filter: downstream filter returned %d", rc); + out_filter_ctx_.clear(*request.pool); + return rc; + } else if (output_transform_temp_) { + // if we're to transfrom temp buffers into memory buffers: + // * if we don't have data accumulated and we don't have a file, we can pass + // the in chain directly downstream + // * if we have data accumulated, we add in to the accumulated data, and + // split the chain at the first file buffer. We read as much of the file + // as we can given the amount of memory buffers we're willing to allocate, + // append it to the first part (before the file buffer), and send it + // downstream. The second part contains the unread part of the file buffer + // (if any), plus the subsequent buffers (if any). If the downstream + // filters, do not return NGX_OK, or if we have busy buffers (the + // downstream filters did not fully consume the data we passed them), we + // return, saving the second part for the next filter call. Otherwise, + // we loop, reading more of the file buffer and sending it downstream, + // until we either get an empty second part, have busy buffers, or + // downstream doesn't return NGX_OK. + + if (!out_filter_ctx_.out && !has_file(in)) { + goto passthru; // fast path + } + + if (buffer_chain(out_filter_ctx_, *request.pool, in, false) != NGX_OK) { + return NGX_HTTP_INTERNAL_SERVER_ERROR; + } + + again: + auto [first_part, second_part] = + modify_chain_at_temp(*request.pool, out_filter_ctx_.out); + ngx_log_debug(NGX_LOG_DEBUG_HTTP, request.connection->log, 0, + "waf output body filter: chain split " + "first_part_chain_(len,size)=(%uz,%uz), " + "second_part_chain_(len,size)=(%uz, %uz)", + chain_length(first_part), chain_size(first_part), + chain_length(second_part), chain_size(second_part)); + + if (!first_part && !second_part) { + return NGX_HTTP_INTERNAL_SERVER_ERROR; + } + + if (second_part) { + request.buffered |= 0x08; + } else { + request.buffered &= ~0x08; + } + out_filter_ctx_.replace_out(second_part); + + auto rc = ngx_http_next_output_body_filter(&request, first_part); + // see nginx development guide on ngx_chain_update_chains + buffer_pool_.update_chains(*request.pool, first_part); + if (rc != NGX_OK) { + ngx_log_debug(NGX_LOG_DEBUG_HTTP, request.connection->log, 0, + "waf output body filter: output body filter returned %d; " + "busy_(len,size)=(%uz,%uz), returning", + rc, chain_length(buffer_pool_.busy()), + chain_size(buffer_pool_.busy())); + return rc; + } + if (buffer_pool_.busy()) { + ngx_log_debug( + NGX_LOG_DEBUG_HTTP, request.connection->log, 0, + "waf output body filter: buffer pool has busy buffers; " + "out_(len,size)=(%uz,%uz), busy_(len,size)=(%uz,%uz), " + "returning NGX_AGAIN", + chain_length(out_filter_ctx_.out), chain_size(out_filter_ctx_.out), + chain_length(buffer_pool_.busy()), chain_size(buffer_pool_.busy())); + + // we need to be flushed, so we can flush downstream too + return NGX_AGAIN; + } + + if (out_filter_ctx_.out) { + ngx_log_debug( + NGX_LOG_DEBUG_HTTP, request.connection->log, 0, + "waf output body filter: downstream returned NGX_OK and we " + "still have data to send down and no busy buffers; looping"); + goto again; + } + + ngx_log_debug(NGX_LOG_DEBUG_HTTP, request.connection->log, 0, + "waf output body filter: downstream returned NGX_OK and we " + "have neither busy buffers nor further data to send down; " + "returning NGX_OK"); + return NGX_OK; + } else { + passthru: + // just pass in chain through + ngx_log_debug(NGX_LOG_DEBUG_HTTP, request.connection->log, 0, + "waf output body filter: sending down in chain directly"); + auto rc = ngx_http_next_output_body_filter(&request, in); ngx_log_debug(NGX_LOG_DEBUG_HTTP, request.connection->log, 0, - "posted waf end task"); - } - - // blocking not supported - // I think supporting this would involve registering a body filter that - // buffers the original request output while awaiting a response from the WAF - // (see the postpone filter). The reason for this is that the there is no way - // to suspend the request from the header filter. If we return something other - // than NGX_OK from it, the caller of ngx_http_send_header() won't try to send - // the body. - - // If we want to implement this in the future, this is the (untested) idea: we - // need to suppress sending the header from our header filter, return NGX_OK, - // enable caching the body from a body filter while the WAF is running, and - // once we get a response from the WAF: a) if we got blocking response, - // discard the buffered data and send our blocking response (headers - // included), or b) otherwise, invoke the next header filter to write the - // original header, send the cached body data, discard it and disable caching - // body data. - - return ngx_http_next_output_body_filter(&request, chain); + "waf_output_body_filter: downstream filter returned %d", rc); + return rc; + } +} + +std::pair Context::modify_chain_at_temp( + ngx_pool_t &pool, ngx_chain_t *in) noexcept { + ngx_chain_t *last_temp = nullptr; + ngx_chain_t *c_file_buf = nullptr; // first file buf + for (ngx_chain_t *c = in; c; c = c->next) { + const ngx_buf_t *b = c->buf; + if (!ngx_buf_in_memory(b) && b->in_file) { + c_file_buf = c; + break; + } + last_temp = c; + } + + if (!c_file_buf) { + return {in, nullptr}; + } + + ngx_chain_t *first_part; + ngx_chain_t *second_part{}; + ngx_chain_t **first_part_tail{}; + + if (last_temp) { + first_part = in; + first_part_tail = &last_temp->next; + } else { + first_part_tail = &first_part; + } + *first_part_tail = nullptr; + + ngx_buf_t &file_buf = *c_file_buf->buf; + off_t &file_pos = file_buf.file_pos; + const auto file_completely_read = [&file_pos, + file_last = file_buf.file_last]() -> bool { + return file_pos >= file_last; + }; + while (true) { // loop while we have buffers and !file_completely_read + std::optional maybe_buffer = buffer_pool_.get_buffer(pool); + if (!maybe_buffer) { + break; + } + ngx_buf_t &b = *(*maybe_buffer)->buf; + ssize_t amount_read = ngx_read_file( + file_buf.file, b.pos, + std::min( + static_cast(file_buf.file_last - file_buf.file_pos), + b.end - b.pos), + file_buf.file_pos); + if (amount_read <= 0) { + return {nullptr, nullptr}; + } + file_pos += amount_read; + b.last = b.pos + amount_read; + *first_part_tail = *maybe_buffer; + first_part_tail = &(*first_part_tail)->next; + + if (file_completely_read()) { // file completely read + b.flush = file_buf.flush; + b.last_buf = file_buf.last_buf; + b.last_in_chain = file_buf.last_in_chain; + break; + } + } + + if (file_completely_read()) { + second_part = c_file_buf->next; + ngx_free_chain(&pool, c_file_buf); + } else { + second_part = c_file_buf; + } + + return {first_part, second_part}; } std::optional Context::run_waf_req_post( @@ -1008,10 +1688,23 @@ void Context::waf_req_post_done(ngx_http_request_t &request, bool blocked) { } } +void Context::waf_final_done(ngx_http_request_t &request, bool blocked) { + bool res = checked_transition_to_stage( + stage::PENDING_WAF_END, + blocked ? stage::WAF_END_BLOCK_COMMIT : stage::AFTER_RUN_WAF_END); + + if (!res) { + ngx_log_error(NGX_LOG_ERR, request.connection->log, 0, + "call to waf_final_done without current stage being " + "BEFORE_RUN_WAF_END"); + return; + } +} + std::optional Context::run_waf_end( ngx_http_request_t &request, dd::Span &span) { auto st = stage_->load(std::memory_order_acquire); - if (st != stage::BEFORE_RUN_WAF_END) { + if (st != stage::PENDING_WAF_END) { return std::nullopt; } @@ -1026,9 +1719,14 @@ std::optional Context::run_waf_end( ddwaf_result_free(&result); } - transition_to_stage(stage::AFTER_RUN_WAF_END); + std::optional block_spec; + ddwaf_map_obj actions_arr{result.actions}; + if (!actions_arr.empty()) { + ActionsResult actions_res{actions_arr}; + block_spec = resolve_block_spec(actions_arr, *request.connection->log); + } - return std::nullopt; // we don't support blocking in the final waf run + return block_spec; } void Context::on_main_log_request(ngx_http_request_t &request, @@ -1062,3 +1760,27 @@ void Context::report_matches(ngx_http_request_t &request, dd::Span &span) { } } // namespace datadog::nginx::security + +extern "C" bool contains_str(ngx_chain_t *in, const char *str) { + for (ngx_chain_t *cl = in; cl; cl = cl->next) { + if (cl->buf->in_file) { + return true; + } + std::string_view sv{reinterpret_cast(cl->buf->pos), + static_cast(cl->buf->last - cl->buf->pos)}; + if (sv.find(str) != std::string_view::npos) { + return true; + } + } + return false; +} + +extern "C" bool is_long(ngx_chain_t *in) { + uint64_t i = 0; + for (ngx_chain_t *cl = in; cl; cl = cl->next) { + if (i++ > 20) { + return true; + } + } + return false; +} diff --git a/src/security/context.h b/src/security/context.h index fa46876e..6e0d98d9 100644 --- a/src/security/context.h +++ b/src/security/context.h @@ -8,12 +8,10 @@ #include #include #include -#include #include "../dd.h" #include "blocking.h" -#include "collection.h" -#include "ddwaf_obj.h" +#include "buffer_pool.h" #include "library.h" #include "util.h" @@ -27,6 +25,8 @@ extern "C" { namespace datadog::nginx::security { +static constexpr uintptr_t kBufferTag = 0xD47AD06; + struct DdwafResultFreeFunctor { void operator()(ddwaf_result &res) { ddwaf_result_free(&res); } }; @@ -56,13 +56,17 @@ class Context { public: // returns a new context or an empty unique_ptr if the waf is not active - static std::unique_ptr maybe_create(); + static std::unique_ptr maybe_create( + datadog_loc_conf_t &loc_conf, + std::optional max_saved_output_data); ngx_int_t request_body_filter(ngx_http_request_t &request, ngx_chain_t *chain, dd::Span &span) noexcept; bool on_request_start(ngx_http_request_t &request, dd::Span &span) noexcept; + ngx_int_t header_filter(ngx_http_request_t &request, dd::Span &span) noexcept; + ngx_int_t output_body_filter(ngx_http_request_t &request, ngx_chain_t *chain, dd::Span &span) noexcept; @@ -78,6 +82,8 @@ class Context { void waf_req_post_done(ngx_http_request_t &request, bool blocked); + void waf_final_done(ngx_http_request_t &request, bool blocked); + std::optional run_waf_end(ngx_http_request_t &request, dd::Span &span); @@ -85,13 +91,13 @@ class Context { bool do_on_request_start(ngx_http_request_t &request, dd::Span &span); ngx_int_t do_request_body_filter(ngx_http_request_t &request, ngx_chain_t *chain, dd::Span &span); + ngx_int_t do_header_filter(ngx_http_request_t &request, dd::Span &span); ngx_int_t do_output_body_filter(ngx_http_request_t &request, ngx_chain_t *chain, dd::Span &span); void do_on_main_log_request(ngx_http_request_t &request, dd::Span &span); bool has_matches() const noexcept; void report_matches(ngx_http_request_t &request, dd::Span &span); - ngx_int_t buffer_chain(ngx_pool_t &pool, ngx_chain_t *in, bool consume); enum class stage { /* Set on on_request_start (NGX_HTTP_ACCESS_PHASE) if there's no thread @@ -155,10 +161,13 @@ class Context { * submit the WAF final run. * Incoming transitions: AFTER_BEGIN_WAF → BEFORE_RUN_WAF_END * AFTER_ON_REQ_WAF → BEFORE_RUN_WAF_END */ - BEFORE_RUN_WAF_END, + PENDING_WAF_END, + + WAF_END_BLOCK_COMMIT, - /* Set on the thread of the final WAF run, after the WAF has run. - * Incoming transitions: BEFORE_RUN_WAF_END → AFTER_RUN_WAF_END */ + /* Set on the thread of the final WAF run, after the WAF has run, or + * directly on the main thread, if we could not submit the WAF final task. + * Incoming transitions: PENDING_WAF_END → AFTER_RUN_WAF_END */ AFTER_RUN_WAF_END, // possible final states: @@ -183,17 +192,37 @@ class Context { OwnedDdwafContext ctx_{nullptr}; DdwafMemres memres_; - static inline constexpr auto kMaxFilterData = 40 * 1024; + static inline constexpr std::size_t kMaxFilterData = 40 * 1024; + static inline constexpr std::size_t kDefaultMaxSavedOutputData = 256 * 1024; + std::size_t max_saved_output_data_{kDefaultMaxSavedOutputData}; + bool output_transform_temp_{false}; struct FilterCtx { - ngx_chain_t *out; // the buffered request body - ngx_chain_t **out_last{&out}; + ngx_chain_t *out; // the buffered request or response body + ngx_chain_t **out_latest{&out}; std::size_t out_total; + std::size_t copied_total; bool found_last; void clear(ngx_pool_t &pool) noexcept; + void replace_out(ngx_chain_t *new_out) noexcept; }; - FilterCtx filter_ctx_{}; + FilterCtx filter_ctx_{}; // for request body + FilterCtx header_filter_ctx_{}; // for the header data + FilterCtx out_filter_ctx_{}; // for response body + // these 5 buffers are in addition to those used to buffer data while the WAF + // in running + BufferPool<5, 16384, kBufferTag> buffer_pool_; + + std::pair modify_chain_at_temp( + ngx_pool_t &pool, ngx_chain_t *in) noexcept; + + static ngx_int_t buffer_chain(FilterCtx &filter_ctx, ngx_pool_t &pool, + ngx_chain_t const *in, bool consume) noexcept; + + public: + ngx_int_t buffer_header_output(ngx_pool_t &pool, ngx_chain_t *chain) noexcept; + ngx_int_t send_buffered_header(ngx_http_request_t &request) noexcept; }; } // namespace datadog::nginx::security diff --git a/src/security/library.cpp b/src/security/library.cpp index 522bde42..7aa2f932 100644 --- a/src/security/library.cpp +++ b/src/security/library.cpp @@ -285,6 +285,10 @@ class FinalizedConfigSettings { return obfuscation_value_regex_; }; + std::optional get_max_saved_output_data() const { + return appsec_max_saved_output_data_; + } + private: // NOLINTNEXTLINE(readability-identifier-naming) using ev_t = std::vector; @@ -324,6 +328,7 @@ class FinalizedConfigSettings { ngx_uint_t waf_timeout_usec_; std::string obfuscation_key_regex_; std::string obfuscation_value_regex_; + std::optional appsec_max_saved_output_data_; }; FinalizedConfigSettings::FinalizedConfigSettings( @@ -401,6 +406,11 @@ FinalizedConfigSettings::FinalizedConfigSettings( evs, "DD_APPSEC_OBFUSCATION_PARAMETER_VALUE_REGEXP"sv) .value_or(std::string{kDefaultObfuscationValueRegex}); } + + if (ngx_conf.appsec_max_saved_output_data != NGX_CONF_UNSET_SIZE) { + appsec_max_saved_output_data_.emplace( + ngx_conf.appsec_max_saved_output_data); + } } std::optional FinalizedConfigSettings::get_env_bool( @@ -607,4 +617,7 @@ std::vector Library::environment_variable_names() { "DD_APPSEC_OBFUSCATION_PARAMETER_VALUE_REGEXP"sv}; } +std::optional Library::max_saved_output_data() { + return config_settings_->get_max_saved_output_data(); +}; } // namespace datadog::nginx::security diff --git a/src/security/library.h b/src/security/library.h index cdbcd494..68414f8b 100644 --- a/src/security/library.h +++ b/src/security/library.h @@ -43,6 +43,8 @@ class Library { static std::vector environment_variable_names(); + static std::optional max_saved_output_data(); + protected: static void set_handle(OwnedDdwafHandle &&handle); diff --git a/test/cases/case.py b/test/cases/case.py index 0902311b..9ac52318 100644 --- a/test/cases/case.py +++ b/test/cases/case.py @@ -1,12 +1,11 @@ """Boilerplate for test cases""" -from . import orchestration - import os -import sys import time import unittest +from . import orchestration + class TestCase(unittest.TestCase): """Provide boilerplate for test cases. @@ -47,6 +46,23 @@ def setUpClass(cls): or openresty_value == "NO" or openresty_value == "") + @staticmethod + def compare_versions(v1, v2): + try: + parts1 = [int(x) for x in v1.split('.')] + parts2 = [int(x) for x in v2.split('.')] + except ValueError as e: + raise ValueError("Invalid version(s)") from e + + max_len = max(len(parts1), len(parts2)) + parts1.extend([0] * (max_len - len(parts1))) + parts2.extend([0] * (max_len - len(parts2))) + + for p1, p2 in zip(parts1, parts2): + if p1 != p2: + return p1 - p2 + return 0 + def setUp(self): if (type(self).waf_disabled and hasattr(type(self), "requires_waf") and type(self).requires_waf): @@ -61,6 +77,15 @@ def setUp(self): and type(self).requires_openresty): self.skipTest("OpenResty is disabled") + if hasattr(type(self), "min_nginx_version"): + min_version = type(self).min_nginx_version + with orchestration.singleton() as orch: + actual_version = orch.nginx_version() + if TestCase.compare_versions(actual_version, min_version) < 0: + self.skipTest( + f"requires nginx >= {min_version}, got {actual_version}" + ) + context = self.orch_context = orchestration.singleton() self.orch = context.__enter__() self.begin = time.monotonic() diff --git a/test/cases/orchestration.py b/test/cases/orchestration.py index 08109059..e946ec3e 100644 --- a/test/cases/orchestration.py +++ b/test/cases/orchestration.py @@ -318,7 +318,7 @@ def docker_compose_services(): return result.stdout.split() -def curl(url, headers, stderr=None, method="GET", body=None): +def curl(url, headers, stderr=None, method="GET", body=None, http_version=1): def header_args(): if isinstance(headers, dict): @@ -330,6 +330,15 @@ def header_args(): yield "--header" yield f"{name}: {value}" + if http_version == 1: + version_arg = "--http1.1" + elif http_version == 2: + version_arg = "--http2-prior-knowledge" + elif http_version == 3: + version_arg = "--http3-only" + else: + raise Exception(f"Unknown HTTP version: {http_version}") + # "curljson.sh" is a script that lives in the "client" docker compose # service. It's a wrapper around "curl" that outputs a JSON object of # information on the first line, and outputs a JSON string of the response @@ -353,6 +362,8 @@ def header_args(): "curljson.sh", f"-X{method}", *header_args(), + '-k', + version_arg, *body_args, url, ) @@ -526,22 +537,36 @@ def create_coverage_tarball(): if result.returncode != 0: raise Exception("Failed to create tarball") + @staticmethod + def nginx_version(): + result = subprocess.run(docker_compose_command("exec", "--", "nginx", + "nginx", "-v"), + capture_output=True, + text=True, + check=True) + match = re.search(r'nginx/([\d.]+)', result.stderr) + return match.group(1) if match else None + def send_nginx_http_request(self, path, port=80, headers={}, method="GET", - req_body=None): + req_body=None, + http_version=1, + tls=False): """Send a "GET " request to nginx, and return the resulting HTTP status code and response body as a tuple `(status, body)`. """ - url = f"http://nginx:{port}{path}" + protocol = "https" if tls else "http" + url = f"{protocol}://nginx:{port}{path}" print("fetching", url, file=self.verbose, flush=True) fields, headers, body = curl(url, headers, body=req_body, stderr=self.verbose, - method=method) + method=method, + http_version=http_version) return fields["response_code"], headers, body def setup_remote_config_payload(self, payload): diff --git a/test/cases/sec_blocking/cert/example.com.crt b/test/cases/sec_blocking/cert/example.com.crt new file mode 100644 index 00000000..2faf3703 --- /dev/null +++ b/test/cases/sec_blocking/cert/example.com.crt @@ -0,0 +1,10 @@ +-----BEGIN CERTIFICATE----- +MIIBVDCB+6ADAgECAhRBWmi1zB5PlJ+zCnhSa4SoyJJ5NzAKBggqhkjOPQQDAjAZ +MRcwFQYDVQQDDA5sb2NhbGhvc3Q6ODQ0MzAeFw0yNTAyMDgwMDM5NTBaFw0zNTAy +MDYwMDM5NTBaMBkxFzAVBgNVBAMMDmxvY2FsaG9zdDo4NDQzMFkwEwYHKoZIzj0C +AQYIKoZIzj0DAQcDQgAEIWLm5F6KOmkNpbHk/uFKPjx6cIwBPSpvCeRu3kMaxb3g +i1SfpvDAefJGZf2y2fQrAvtdst7KaKB/csUIi0Yds6MhMB8wHQYDVR0OBBYEFMZ9 +gqyVoJYDk3fwprm7bNSFFGWeMAoGCCqGSM49BAMCA0gAMEUCID5+i6BXUYd9OqdA +M1stEKV3fJQfNfXvpDz26O0qjHbmAiEAzVmgBojppHfeAMhouT1sBLyGw3mXX45X +dAFjdVM1LSY= +-----END CERTIFICATE----- \ No newline at end of file diff --git a/test/cases/sec_blocking/cert/example.com.key b/test/cases/sec_blocking/cert/example.com.key new file mode 100644 index 00000000..3d877f0a --- /dev/null +++ b/test/cases/sec_blocking/cert/example.com.key @@ -0,0 +1,8 @@ +-----BEGIN EC PARAMETERS----- +BggqhkjOPQMBBw== +-----END EC PARAMETERS----- +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEICOa5EKqwwWcbASPGZgOPiEi42jpoWUuCfbAcPHigGGSoAoGCCqGSM49 +AwEHoUQDQgAEIWLm5F6KOmkNpbHk/uFKPjx6cIwBPSpvCeRu3kMaxb3gi1SfpvDA +efJGZf2y2fQrAvtdst7KaKB/csUIi0Ydsw== +-----END EC PRIVATE KEY----- \ No newline at end of file diff --git a/test/cases/sec_blocking/conf/http.conf b/test/cases/sec_blocking/conf/http.conf index 7a49158c..9aa89f9f 100644 --- a/test/cases/sec_blocking/conf/http.conf +++ b/test/cases/sec_blocking/conf/http.conf @@ -20,7 +20,12 @@ http { client_max_body_size 10m; server { - listen 80; + listen 80; + listen 443 quic reuseport; + listen 443 ssl; + http2 on; + ssl_certificate /tmp/example.com.crt; + ssl_certificate_key /tmp/example.com.key; location /http { # This test assumes that auto-propagation is working. We'll request @@ -28,6 +33,11 @@ http { # resulting spans sent to the agent are marked as errors. proxy_pass http://http:8080; } + + location /resp_header_blocked { + add_header 'foo' 'block me' always; + proxy_pass http://http:8080; + } } } diff --git a/test/cases/sec_blocking/conf/waf.json b/test/cases/sec_blocking/conf/waf.json index e1970630..b72b0f1d 100644 --- a/test/cases/sec_blocking/conf/waf.json +++ b/test/cases/sec_blocking/conf/waf.json @@ -209,6 +209,57 @@ "on_match": [ "redirect_bad_status" ] + }, + { + "id": "block_response_header", + "name": "Block based on response header", + "tags": { + "type": "security_scanner", + "category": "attack_attempt" + }, + "conditions": [ + { + "parameters": { + "inputs": [ + { + "address": "server.response.headers.no_cookies" + } + ], + "regex": "^block me$" + }, + "operator": "match_regex" + } + ], + "on_match": [ + "block_501" + ] + }, + { + "id": "410", + "name": "Match 410 responses", + "tags": { + "type": "security_scanner", + "category": "attack_attempt" + }, + "conditions": [ + { + "parameters": { + "inputs": [ + { + "address": "server.response.status" + } + ], + "regex": "410" + }, + "operator": "match_regex" + } + ], + "transformers": [ + "values_only" + ], + "on_match": [ + "block_501" + ] } ] } diff --git a/test/cases/sec_blocking/test_sec_blocking.py b/test/cases/sec_blocking/test_sec_blocking.py index 5549f114..5ab75b08 100644 --- a/test/cases/sec_blocking/test_sec_blocking.py +++ b/test/cases/sec_blocking/test_sec_blocking.py @@ -8,6 +8,7 @@ class TestSecBlocking(case.TestCase): config_setup_done = False requires_waf = True + min_nginx_version = '1.26.0' def setUp(self): super().setUp() @@ -20,6 +21,14 @@ def setUp(self): conf_path = Path(__file__).parent / './conf/http.conf' conf_text = conf_path.read_text() + crt_path = Path(__file__).parent / './cert/example.com.crt' + crt_text = crt_path.read_text() + self.orch.nginx_replace_file('/tmp/example.com.crt', crt_text) + + key_path = Path(__file__).parent / './cert/example.com.key' + key_text = key_path.read_text() + self.orch.nginx_replace_file('/tmp/example.com.key', key_text) + status, log_lines = self.orch.nginx_replace_config( conf_text, conf_path.name) self.assertEqual(0, status, log_lines) @@ -29,6 +38,10 @@ def setUp(self): # Consume any previous logging from the agent. self.orch.sync_service('agent') + @staticmethod + def convert_headers(headers): + return {k.lower(): v for k, v in dict(headers).items()} + def run_with_ua(self, user_agent, accept): headers = {'User-Agent': user_agent, 'Accept': accept} status, headers, body = self.orch.send_nginx_http_request( @@ -36,9 +49,8 @@ def run_with_ua(self, user_agent, accept): self.orch.reload_nginx() log_lines = self.orch.sync_service('agent') - headers = dict(headers) - headers = {k.lower(): v for k, v in headers.items()} - return status, headers, body, log_lines + return status, TestSecBlocking.convert_headers( + headers), body, log_lines def run_with_body(self, content_type, req_body): status, headers, body = self.orch.send_nginx_http_request( @@ -53,7 +65,7 @@ def run_with_body(self, content_type, req_body): headers = {k.lower(): v for k, v in headers.items()} return status, headers, body, log_lines - def assert_has_report(self, log_lines): + def assert_has_report(self, log_lines, exp_match='block'): traces = [ json.loads(line) for line in log_lines if line.startswith('[[{') ] @@ -72,7 +84,7 @@ def predicate(x): appsec_rep = json.loads(trace[0][0]['meta']['_dd.appsec.json']) self.assertEqual(appsec_rep['triggers'][0]['rule']['on_match'][0], - 'block') + exp_match) def test_default_action(self): status, headers, body, log_lines = self.run_with_ua( @@ -149,3 +161,39 @@ def test_block_body_json_long(self): '{"a": "block_default", "b": "' + ('a' * 1024 * 1024)) self.assertEqual(status, 403) self.assert_has_report(log_lines) + + def block_on_status(self, http_version): + if http_version != 3: + status, headers, body = self.orch.send_nginx_http_request( + '/http/status/410', http_version=http_version) + else: + status, headers, body = self.orch.send_nginx_http_request( + '/http/status/410', tls=True, port=443, http_version=3) + self.orch.reload_nginx() + log_lines = self.orch.sync_service('agent') + self.assertEqual(501, status) + headers = TestSecBlocking.convert_headers(headers) + self.assertEqual(headers['content-type'], 'application/json') + self.assertRegex(body, r'"title":"You\'ve been blocked') + self.assert_has_report(log_lines, 'block_501') + + def test_block_on_status_http11(self): + self.block_on_status(1) + + def test_block_on_status_http2(self): + self.block_on_status(2) + + def test_block_on_status_http3(self): + self.block_on_status(3) + + def test_block_on_response_header(self): + nginx_version = self.orch.nginx_version() + status, headers, body = self.orch.send_nginx_http_request( + '/resp_header_blocked') + self.orch.reload_nginx() + log_lines = self.orch.sync_service('agent') + self.assertEqual(501, status) + headers = TestSecBlocking.convert_headers(headers) + self.assertEqual(headers['content-type'], 'application/json') + self.assertRegex(body, r'"title":"You\'ve been blocked') + self.assert_has_report(log_lines, 'block_501') diff --git a/test/services/client/Dockerfile b/test/services/client/Dockerfile index f8daff48..53c618c5 100644 --- a/test/services/client/Dockerfile +++ b/test/services/client/Dockerfile @@ -1,9 +1,10 @@ -from alpine:3.19 +FROM alpine/curl-http3 -copy install-tools.sh /tmp/install-tools.sh -run chmod +x /tmp/install-tools.sh && /tmp/install-tools.sh +COPY install-tools.sh /tmp/install-tools.sh +USER 0 +RUN chmod +x /tmp/install-tools.sh && /tmp/install-tools.sh -copy curljson.sh /usr/local/bin/curljson.sh +COPY curljson.sh /usr/local/bin/curljson.sh -copy sigwait.sh /usr/local/bin/sigwait.sh -entrypoint ["/usr/local/bin/sigwait.sh", "INT", "TERM"] +COPY sigwait.sh /usr/local/bin/sigwait.sh +ENTRYPOINT ["/usr/local/bin/sigwait.sh", "INT", "TERM"] diff --git a/test/services/client/install-tools.sh b/test/services/client/install-tools.sh index 5b178d20..621d4561 100644 --- a/test/services/client/install-tools.sh +++ b/test/services/client/install-tools.sh @@ -11,7 +11,7 @@ case "$(uname -m)" in esac apk update -apk add wget tar curl jq +apk add wget tar jq # grpcurl is a self-contained binary (Go program) GRPCURL_TAR="grpcurl_1.8.6_linux_${ARCH}.tar.gz" From 2820009e1f199c6f8a4a160c455c14908faa619a Mon Sep 17 00:00:00 2001 From: Gustavo Lopes Date: Wed, 19 Feb 2025 16:54:10 +0000 Subject: [PATCH 02/18] Remove temp file file conversion Looks unnecessary once the full header filter chain is run. --- src/datadog_conf.h | 5 - src/ngx_http_datadog_module.cpp | 13 --- src/security/buffer_pool.h | 61 ----------- src/security/context.cpp | 172 +------------------------------- src/security/context.h | 8 -- 5 files changed, 2 insertions(+), 257 deletions(-) delete mode 100644 src/security/buffer_pool.h diff --git a/src/datadog_conf.h b/src/datadog_conf.h index b8b090bd..66ac4e59 100644 --- a/src/datadog_conf.h +++ b/src/datadog_conf.h @@ -229,11 +229,6 @@ struct datadog_loc_conf_t { #ifdef WITH_WAF // the thread pool used to run the WAF on ngx_thread_pool_t *waf_pool{nullptr}; - - // whether temp file buffers received by the module output filter should be - // split into memory files. This can work around bugs in downstream filters - // than can't handle these buffers. Defaults to false. - ngx_flag_t waf_output_transform_temp{NGX_CONF_UNSET}; #endif #ifdef WITH_RUM diff --git a/src/ngx_http_datadog_module.cpp b/src/ngx_http_datadog_module.cpp index 136f2aa8..acd846f8 100644 --- a/src/ngx_http_datadog_module.cpp +++ b/src/ngx_http_datadog_module.cpp @@ -384,15 +384,6 @@ static ngx_command_t datadog_commands[] = { offsetof(datadog_main_conf_t, appsec_obfuscation_value_regex), nullptr, }, - - { - ngx_string("datadog_appsec_output_transform_temp"), - NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_TAKE1, - ngx_conf_set_flag_slot, - NGX_HTTP_LOC_CONF_OFFSET, - offsetof(datadog_loc_conf_t, waf_output_transform_temp), - nullptr, - }, #endif DATADOG_RUM_DIRECTIVES ngx_null_command @@ -867,10 +858,6 @@ static char *merge_datadog_loc_conf(ngx_conf_t *cf, void *parent, if (conf->waf_pool == nullptr) { conf->waf_pool = prev->waf_pool; } - - if (conf->waf_output_transform_temp == NGX_CONF_UNSET) { - conf->waf_output_transform_temp = prev->waf_output_transform_temp; - } #endif #ifdef WITH_RUM diff --git a/src/security/buffer_pool.h b/src/security/buffer_pool.h deleted file mode 100644 index 27f9e3ab..00000000 --- a/src/security/buffer_pool.h +++ /dev/null @@ -1,61 +0,0 @@ -#pragma once - -extern "C" { -#include -} -#include -#include -#include - -template -class BufferPool { - static inline auto tag = reinterpret_cast(Tag); - - public: - void update_chains(ngx_pool_t &pool, ngx_chain_t *out) noexcept { - ngx_chain_t *out_copy = out; - ngx_chain_update_chains(&pool, &free_, &busy_, &out_copy, tag); - } - - std::optional get_buffer(ngx_pool_t &pool) noexcept { - if (free_) { - ngx_chain_t *res = free_; - free_ = res->next; - res->next = nullptr; - - res->buf->recycled = 1; - res->buf->pos = res->buf->start; - res->buf->last = res->buf->start; - res->buf->flush = 0; - res->buf->sync = 0; - res->buf->last_buf = 0; - res->buf->last_in_chain = 0; - return {res}; - } - if (allocated_ >= NBuffers) { - return std::nullopt; - } - - ngx_buf_t *buf = ngx_create_temp_buf(&pool, BufferSize); - if (!buf) { - return std::nullopt; - } - auto chain = ngx_alloc_chain_link(&pool); - if (!chain) { - ngx_free_chain(&pool, chain); - return std::nullopt; - } - buf->tag = tag; - chain->buf = buf; - chain->next = nullptr; - allocated_++; - return {chain}; - } - - ngx_chain_t *busy() const { return busy_; } - - private: - ngx_chain_t *free_{}; - ngx_chain_t *busy_{}; - std::size_t allocated_{}; -}; diff --git a/src/security/context.cpp b/src/security/context.cpp index ab1be9ea..2f077f43 100644 --- a/src/security/context.cpp +++ b/src/security/context.cpp @@ -78,14 +78,6 @@ std::size_t has_last(ngx_chain_t const *ch) { } return false; } -bool has_file(ngx_chain_t const *ch) { - for (ngx_chain_t const *cl = ch; cl; cl = cl->next) { - if (!ngx_buf_in_memory(cl->buf) && cl->buf->in_file) { - return true; - } - } - return false; -} class JsonWriter : public rapidjson::Writer { using rapidjson::Writer::Writer; @@ -226,9 +218,6 @@ std::unique_ptr Context::maybe_create( if (max_saved_output_data) { res->max_saved_output_data_ = *max_saved_output_data; } - if (loc_conf.waf_output_transform_temp != NGX_CONF_UNSET) { - res->output_transform_temp_ = loc_conf.waf_output_transform_temp; - } return res; } @@ -1426,7 +1415,7 @@ ngx_int_t Context::do_output_body_filter(ngx_http_request_t &request, return ngx_http_next_output_body_filter(&request, in); } - // otherwise we sent down the buffered data + whatever we got + // otherwise we send down the buffered data + whatever we got if (header_filter_ctx_.out) { // if we have header data, send it first. Bypass the body filter chain by @@ -1452,7 +1441,7 @@ ngx_int_t Context::do_output_body_filter(ngx_http_request_t &request, // else continue } - if (out_filter_ctx_.out && !output_transform_temp_) { + if (out_filter_ctx_.out) { // if we have data accumulated, add in to it (without consuming) and send it // downstream. We add it all together to avoid having to do two calls // downstream (plus handle the case where the first call doesn't return @@ -1472,91 +1461,7 @@ ngx_int_t Context::do_output_body_filter(ngx_http_request_t &request, "waf output body filter: downstream filter returned %d", rc); out_filter_ctx_.clear(*request.pool); return rc; - } else if (output_transform_temp_) { - // if we're to transfrom temp buffers into memory buffers: - // * if we don't have data accumulated and we don't have a file, we can pass - // the in chain directly downstream - // * if we have data accumulated, we add in to the accumulated data, and - // split the chain at the first file buffer. We read as much of the file - // as we can given the amount of memory buffers we're willing to allocate, - // append it to the first part (before the file buffer), and send it - // downstream. The second part contains the unread part of the file buffer - // (if any), plus the subsequent buffers (if any). If the downstream - // filters, do not return NGX_OK, or if we have busy buffers (the - // downstream filters did not fully consume the data we passed them), we - // return, saving the second part for the next filter call. Otherwise, - // we loop, reading more of the file buffer and sending it downstream, - // until we either get an empty second part, have busy buffers, or - // downstream doesn't return NGX_OK. - - if (!out_filter_ctx_.out && !has_file(in)) { - goto passthru; // fast path - } - - if (buffer_chain(out_filter_ctx_, *request.pool, in, false) != NGX_OK) { - return NGX_HTTP_INTERNAL_SERVER_ERROR; - } - - again: - auto [first_part, second_part] = - modify_chain_at_temp(*request.pool, out_filter_ctx_.out); - ngx_log_debug(NGX_LOG_DEBUG_HTTP, request.connection->log, 0, - "waf output body filter: chain split " - "first_part_chain_(len,size)=(%uz,%uz), " - "second_part_chain_(len,size)=(%uz, %uz)", - chain_length(first_part), chain_size(first_part), - chain_length(second_part), chain_size(second_part)); - - if (!first_part && !second_part) { - return NGX_HTTP_INTERNAL_SERVER_ERROR; - } - - if (second_part) { - request.buffered |= 0x08; - } else { - request.buffered &= ~0x08; - } - out_filter_ctx_.replace_out(second_part); - - auto rc = ngx_http_next_output_body_filter(&request, first_part); - // see nginx development guide on ngx_chain_update_chains - buffer_pool_.update_chains(*request.pool, first_part); - if (rc != NGX_OK) { - ngx_log_debug(NGX_LOG_DEBUG_HTTP, request.connection->log, 0, - "waf output body filter: output body filter returned %d; " - "busy_(len,size)=(%uz,%uz), returning", - rc, chain_length(buffer_pool_.busy()), - chain_size(buffer_pool_.busy())); - return rc; - } - if (buffer_pool_.busy()) { - ngx_log_debug( - NGX_LOG_DEBUG_HTTP, request.connection->log, 0, - "waf output body filter: buffer pool has busy buffers; " - "out_(len,size)=(%uz,%uz), busy_(len,size)=(%uz,%uz), " - "returning NGX_AGAIN", - chain_length(out_filter_ctx_.out), chain_size(out_filter_ctx_.out), - chain_length(buffer_pool_.busy()), chain_size(buffer_pool_.busy())); - - // we need to be flushed, so we can flush downstream too - return NGX_AGAIN; - } - - if (out_filter_ctx_.out) { - ngx_log_debug( - NGX_LOG_DEBUG_HTTP, request.connection->log, 0, - "waf output body filter: downstream returned NGX_OK and we " - "still have data to send down and no busy buffers; looping"); - goto again; - } - - ngx_log_debug(NGX_LOG_DEBUG_HTTP, request.connection->log, 0, - "waf output body filter: downstream returned NGX_OK and we " - "have neither busy buffers nor further data to send down; " - "returning NGX_OK"); - return NGX_OK; } else { - passthru: // just pass in chain through ngx_log_debug(NGX_LOG_DEBUG_HTTP, request.connection->log, 0, "waf output body filter: sending down in chain directly"); @@ -1567,79 +1472,6 @@ ngx_int_t Context::do_output_body_filter(ngx_http_request_t &request, } } -std::pair Context::modify_chain_at_temp( - ngx_pool_t &pool, ngx_chain_t *in) noexcept { - ngx_chain_t *last_temp = nullptr; - ngx_chain_t *c_file_buf = nullptr; // first file buf - for (ngx_chain_t *c = in; c; c = c->next) { - const ngx_buf_t *b = c->buf; - if (!ngx_buf_in_memory(b) && b->in_file) { - c_file_buf = c; - break; - } - last_temp = c; - } - - if (!c_file_buf) { - return {in, nullptr}; - } - - ngx_chain_t *first_part; - ngx_chain_t *second_part{}; - ngx_chain_t **first_part_tail{}; - - if (last_temp) { - first_part = in; - first_part_tail = &last_temp->next; - } else { - first_part_tail = &first_part; - } - *first_part_tail = nullptr; - - ngx_buf_t &file_buf = *c_file_buf->buf; - off_t &file_pos = file_buf.file_pos; - const auto file_completely_read = [&file_pos, - file_last = file_buf.file_last]() -> bool { - return file_pos >= file_last; - }; - while (true) { // loop while we have buffers and !file_completely_read - std::optional maybe_buffer = buffer_pool_.get_buffer(pool); - if (!maybe_buffer) { - break; - } - ngx_buf_t &b = *(*maybe_buffer)->buf; - ssize_t amount_read = ngx_read_file( - file_buf.file, b.pos, - std::min( - static_cast(file_buf.file_last - file_buf.file_pos), - b.end - b.pos), - file_buf.file_pos); - if (amount_read <= 0) { - return {nullptr, nullptr}; - } - file_pos += amount_read; - b.last = b.pos + amount_read; - *first_part_tail = *maybe_buffer; - first_part_tail = &(*first_part_tail)->next; - - if (file_completely_read()) { // file completely read - b.flush = file_buf.flush; - b.last_buf = file_buf.last_buf; - b.last_in_chain = file_buf.last_in_chain; - break; - } - } - - if (file_completely_read()) { - second_part = c_file_buf->next; - ngx_free_chain(&pool, c_file_buf); - } else { - second_part = c_file_buf; - } - - return {first_part, second_part}; -} - std::optional Context::run_waf_req_post( ngx_http_request_t &request, dd::Span &span) { ddwaf_obj input; diff --git a/src/security/context.h b/src/security/context.h index 6e0d98d9..fd350caa 100644 --- a/src/security/context.h +++ b/src/security/context.h @@ -11,7 +11,6 @@ #include "../dd.h" #include "blocking.h" -#include "buffer_pool.h" #include "library.h" #include "util.h" @@ -195,7 +194,6 @@ class Context { static inline constexpr std::size_t kMaxFilterData = 40 * 1024; static inline constexpr std::size_t kDefaultMaxSavedOutputData = 256 * 1024; std::size_t max_saved_output_data_{kDefaultMaxSavedOutputData}; - bool output_transform_temp_{false}; struct FilterCtx { ngx_chain_t *out; // the buffered request or response body @@ -210,12 +208,6 @@ class Context { FilterCtx filter_ctx_{}; // for request body FilterCtx header_filter_ctx_{}; // for the header data FilterCtx out_filter_ctx_{}; // for response body - // these 5 buffers are in addition to those used to buffer data while the WAF - // in running - BufferPool<5, 16384, kBufferTag> buffer_pool_; - - std::pair modify_chain_at_temp( - ngx_pool_t &pool, ngx_chain_t *in) noexcept; static ngx_int_t buffer_chain(FilterCtx &filter_ctx, ngx_pool_t &pool, ngx_chain_t const *in, bool consume) noexcept; From 3ea7ba0d4004e14c1e897923deae072b582cec7d Mon Sep 17 00:00:00 2001 From: Gustavo Lopes Date: Fri, 21 Feb 2025 10:23:27 +0000 Subject: [PATCH 03/18] Fix out-of-bounds read in ngx_header_writer.h --- src/ngx_header_writer.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/ngx_header_writer.h b/src/ngx_header_writer.h index bbf44ed9..e0adc1b1 100644 --- a/src/ngx_header_writer.h +++ b/src/ngx_header_writer.h @@ -73,7 +73,8 @@ class NgxHeaderWriter : public datadog::tracing::DictWriter { } if (key.size() != h[i].key.len || - ngx_strcasecmp((u_char *)key.data(), h[i].key.data) != 0) { + ngx_strncasecmp((u_char *)key.data(), h[i].key.data, key.size()) != + 0) { continue; } From 579a70a28d57da41c5c60bf25c6eccbda66f6aad Mon Sep 17 00:00:00 2001 From: Gustavo Lopes Date: Fri, 21 Feb 2025 10:26:39 +0000 Subject: [PATCH 04/18] Fix blocking paths Fixes refcount management and use-after-frees. Prefers to call ngx_http_finalize_request() with NGX_OK rather than NGX_DONE so that filter finalization, subrequest post actions, lingering close, and keepalive logic are respected. --- src/security/blocking.cpp | 32 +++++----- src/security/blocking.h | 2 +- src/security/context.cpp | 84 ++++++++++++++++++++------- test/cases/sec_blocking/conf/waf.json | 27 +++++++++ 4 files changed, 106 insertions(+), 39 deletions(-) diff --git a/src/security/blocking.cpp b/src/security/blocking.cpp index 2625c4aa..3e1696ea 100644 --- a/src/security/blocking.cpp +++ b/src/security/blocking.cpp @@ -304,7 +304,7 @@ void BlockingService::initialize(std::optional templ_html, new BlockingService(templ_html, templ_json)); } -void BlockingService::block(BlockSpecification spec, ngx_http_request_t &req) { +ngx_int_t BlockingService::block(BlockSpecification spec, ngx_http_request_t &req) { BlockResponse const resp = BlockResponse::resolve_content_type(spec, req); ngx_str_t *templ{}; if (resp.ct == BlockResponse::ContentType::HTML) { @@ -318,6 +318,8 @@ void BlockingService::block(BlockSpecification spec, ngx_http_request_t &req) { if (ngx_http_discard_request_body(&req) != NGX_OK) { req.keepalive = 0; } + + req.header_sent = 0; ngx_http_clean_header(&req); // ngx_http_filter_finalize_request neutralizes other filters with: @@ -343,21 +345,23 @@ void BlockingService::block(BlockSpecification spec, ngx_http_request_t &req) { // } else { // req.headers_out.content_length_n = 0; // } + if (req.header_only) { + req.headers_out.content_length_n = 0; + req.chunked = 0; + } req.keepalive = 0; req.lingering_close = 0; ngx_int_t res = ngx_http_send_header(&req); ngx_log_debug1(NGX_LOG_DEBUG, req.connection->log, 0, - "Status %d returned by ngx_http_send_header", res); - if (res == NGX_ERROR || res > NGX_OK || req.header_only) { - ngx_http_finalize_request(&req, res); - return; + "block(): status %d returned by ngx_http_send_header", res); + if (res != NGX_OK || req.header_only) { + return res; } ngx_buf_t *b = static_cast(ngx_calloc_buf(req.pool)); if (b == nullptr) { - ngx_http_finalize_request(&req, NGX_ERROR); - return; + return NGX_ERROR; } b->pos = templ->data; @@ -368,16 +372,10 @@ void BlockingService::block(BlockSpecification spec, ngx_http_request_t &req) { ngx_chain_t out{}; out.buf = b; - ngx_http_output_filter(&req, &out); - - auto count = req.count; - ngx_http_finalize_request(&req, NGX_DONE); - - // o/wise http/3 doesn't close the connection (via ngx_http_writer > - // ngx_http_finalize_request) - if (count - 1 > 0) { - ngx_post_event(req.connection->write, &ngx_posted_events); - } + res = ngx_http_output_filter(&req, &out); + ngx_log_debug(NGX_LOG_DEBUG, req.connection->log, 0, + "block(): status %d returned by ngx_http_output_filter", res); + return res; } BlockingService::BlockingService( diff --git a/src/security/blocking.h b/src/security/blocking.h index b50eae76..a7c4f9de 100644 --- a/src/security/blocking.h +++ b/src/security/blocking.h @@ -37,7 +37,7 @@ class BlockingService { static BlockingService *get_instance() { return instance.get(); } - void block(BlockSpecification spec, ngx_http_request_t &req); + [[nodiscard]] ngx_int_t block(BlockSpecification spec, ngx_http_request_t &req); private: BlockingService(std::optional templ_html_path, diff --git a/src/security/context.cpp b/src/security/context.cpp index 2f077f43..5e91c378 100644 --- a/src/security/context.cpp +++ b/src/security/context.cpp @@ -1,6 +1,5 @@ #include "context.h" -#include #include #include #include @@ -29,6 +28,7 @@ extern "C" { #include #include #include +#include #include #include #include @@ -296,6 +296,8 @@ class PolTaskCtx { ngx_log_debug(NGX_LOG_DEBUG_HTTP, req_log(), 0, "before task %p main", this); block_spec_ = as_self().do_handle(*tp_log); + // test long libddwaf call + // ::usleep(2000000); ngx_log_debug(NGX_LOG_DEBUG_HTTP, req_log(), 0, "after task %p main", this); ran_on_thread_.store(true, std::memory_order_release); @@ -336,6 +338,7 @@ class PolTaskCtx { ngx_log_debug1(NGX_LOG_DEBUG_HTTP, req_log(), 0, "calling complete on task %p", this); as_self().complete(); + // req_ may be invalid at this point } } else { ngx_log_debug1( @@ -414,20 +417,32 @@ class Pol1stWafCtx : public PolTaskCtx { auto *service = BlockingService::get_instance(); assert(service != nullptr); + ngx_int_t rc; try { - service->block(*block_spec_, req_); + rc = service->block(*block_spec_, req_); + + ngx_log_debug(NGX_LOG_DEBUG_HTTP, req_.connection->log, 0, + "completion handler of waf start task: sent blocking " + "response (rc: %d, c: %d)", + rc, req_.main->count); } catch (const std::exception &e) { ngx_log_error(NGX_LOG_ERR, req_.connection->log, 0, "failed to block request: %s", e.what()); - ngx_http_finalize_request(&req_, NGX_DONE); + rc = NGX_ERROR; } + + ngx_log_debug(NGX_LOG_DEBUG_HTTP, req_.connection->log, 0, + "completion handler of waf start task: finish: calling " + "ngx_http_finalize_request with %d", + rc); + ngx_http_finalize_request(&req_, rc); + // the request may have been destroyed at this point } else { req_.phase_handler++; // move past us ngx_post_event(req_.connection->write, &ngx_posted_events); + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, req_.connection->log, 0, + "completion handler of waf start task: normal finish"); } - - ngx_log_debug0(NGX_LOG_DEBUG_HTTP, req_.connection->log, 0, - "completion handler of waf start task: finish"); } friend PolTaskCtx; @@ -735,13 +750,25 @@ class PolReqBodyWafCtx : public PolTaskCtx { auto *service = BlockingService::get_instance(); assert(service != nullptr); + ngx_int_t rc; try { - service->block(*block_spec_, req_); + rc = service->block(*block_spec_, req_); + ngx_log_debug(NGX_LOG_DEBUG_HTTP, req_.connection->log, 0, + "completion handler of waf req post task: sent " + "blocking response (rc: %d, c: %d)", + rc, req_.main->count); } catch (const std::exception &e) { ngx_log_error(NGX_LOG_ERR, req_.connection->log, 0, "failed to block request: %s", e.what()); - ngx_http_finalize_request(&req_, NGX_DONE); + rc = NGX_ERROR; } + + ngx_log_debug(NGX_LOG_DEBUG_HTTP, req_.connection->log, 0, + "completion handler of waf start task: finish: calling " + "ngx_http_finalize_request with %d", + rc); + ngx_http_finalize_request(&req_, rc); + // the request may have been destroyed at this point } else { ctx_.waf_req_post_done(req_, false); ngx_log_debug(NGX_LOG_DEBUG_HTTP, req_.connection->log, 0, @@ -771,36 +798,48 @@ class PolFinalWafCtx : public PolTaskCtx { "blocked: %s): start", ran ? "true" : "false", block_spec_ ? "true" : "false"); - req_.header_sent = 0; - if (ran && block_spec_) { span_.set_tag("appsec.blocked"sv, "true"sv); ctx_.waf_final_done(req_, true); auto *service = BlockingService::get_instance(); assert(service != nullptr); + + ngx_int_t rc; try { - // service->block calls finalize_request NGX_DONE - req_.main->count++; ngx_log_debug(NGX_LOG_DEBUG_HTTP, req_.connection->log, 0, - "incremented request refcount to %d before sending " - "blocking response", - req_.main->count); - service->block(*block_spec_, req_); + "completion handler of waf req final task: sending " + "blocking response"); + rc = service->block(*block_spec_, req_); + ngx_log_debug(NGX_LOG_DEBUG_HTTP, req_.connection->log, 0, + "completion handler of waf req final task: sent blocking " + "response (rc: %d, c: %d)", + rc, req_.main->count); } catch (const std::exception &e) { ngx_log_error(NGX_LOG_ERR, req_.connection->log, 0, "failed to block request: %s", e.what()); - ngx_http_finalize_request(&req_, NGX_DONE); + rc = NGX_ERROR; + } + + ngx_log_debug(NGX_LOG_DEBUG_HTTP, req_.connection->log, 0, + "completion handler of waf req final task: calling " + "ngx_http_finalize_request with %d", + rc); + + const auto count_before = req_.count; + ngx_http_finalize_request(&req_, rc); + // if count_before == 1, the request has likely been destroyed at this + // point, although we cannot be sure (e.g. there may be a post action) + if (count_before > 1) { + ngx_post_event(req_.connection->write, &ngx_posted_events); } + // req_ may be invalid at this point } else { ctx_.waf_final_done(req_, false); + ngx_post_event(req_.connection->write, &ngx_posted_events); ngx_log_debug(NGX_LOG_DEBUG_HTTP, req_.connection->log, 0, "not blocking after final waf run; triggering write event " "on connection"); - ngx_post_event(req_.connection->write, &ngx_posted_events); } - - ngx_log_debug0(NGX_LOG_DEBUG_HTTP, req_.connection->log, 0, - "completion handler of waf req final task: finish"); } ngx_event_handler_pt orig_conn_write_handler_{}; @@ -1096,6 +1135,9 @@ ngx_int_t Context::send_buffered_header(ngx_http_request_t &request) noexcept { if (!request.stream) { ngx_int_t rc = ngx_http_write_filter(&request, header_filter_ctx_.out); header_filter_ctx_.clear(*request.pool); + ngx_log_debug(NGX_LOG_DEBUG_HTTP, request.connection->log, 0, + "send_buffered_header: ngx_http_write_filter returned %d", + rc); return rc; } diff --git a/test/cases/sec_blocking/conf/waf.json b/test/cases/sec_blocking/conf/waf.json index b72b0f1d..987c3b40 100644 --- a/test/cases/sec_blocking/conf/waf.json +++ b/test/cases/sec_blocking/conf/waf.json @@ -260,6 +260,33 @@ "on_match": [ "block_501" ] + }, + { + "id": "411", + "name": "Match 411 responses", + "tags": { + "type": "security_scanner", + "category": "attack_attempt" + }, + "conditions": [ + { + "parameters": { + "inputs": [ + { + "address": "server.response.status" + } + ], + "regex": "411" + }, + "operator": "match_regex" + } + ], + "transformers": [ + "values_only" + ], + "on_match": [ + "redirect" + ] } ] } From 7ba5b002c1f64cfbc14f43e4bb7be412af774875 Mon Sep 17 00:00:00 2001 From: Gustavo Lopes Date: Fri, 21 Feb 2025 10:28:18 +0000 Subject: [PATCH 05/18] Fix Context::buffer_chain() with mixed memory and file buffers --- src/security/context.cpp | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/src/security/context.cpp b/src/security/context.cpp index 5e91c378..5432beb2 100644 --- a/src/security/context.cpp +++ b/src/security/context.cpp @@ -1082,6 +1082,21 @@ ngx_int_t Context::buffer_chain(FilterCtx &filter_ctx, ngx_pool_t &pool, buf->file_pos = buf->file_last; // consume + // mixed + if (buf->temporary) { + size = buf->last - buf->pos; + new_buf->temporary = 1; + if (size > 0) { + new_buf->pos = static_cast(ngx_palloc(&pool, size)); + if (!new_buf->pos) { + return NGX_ERROR; + } + new_buf->last = ngx_copy(new_buf->pos, buf->pos, size); + buf->pos = buf->last; // consume + filter_ctx.copied_total += size; + } + } + size = new_buf->file_last - new_buf->file_pos; } From e5a8d0527b5be9963922708f18d1e7b3caaf72ca Mon Sep 17 00:00:00 2001 From: Gustavo Lopes Date: Fri, 21 Feb 2025 10:28:39 +0000 Subject: [PATCH 06/18] More tests of blocking with http2/3 --- test/cases/sec_blocking/test_sec_blocking.py | 82 ++++++++++++++++++-- 1 file changed, 76 insertions(+), 6 deletions(-) diff --git a/test/cases/sec_blocking/test_sec_blocking.py b/test/cases/sec_blocking/test_sec_blocking.py index 5ab75b08..44e554da 100644 --- a/test/cases/sec_blocking/test_sec_blocking.py +++ b/test/cases/sec_blocking/test_sec_blocking.py @@ -42,22 +42,32 @@ def setUp(self): def convert_headers(headers): return {k.lower(): v for k, v in dict(headers).items()} - def run_with_ua(self, user_agent, accept): + def run_with_ua(self, user_agent, accept, http_version=1): headers = {'User-Agent': user_agent, 'Accept': accept} - status, headers, body = self.orch.send_nginx_http_request( - '/http', 80, headers) + if http_version == 3: + status, headers, body = self.orch.send_nginx_http_request( + '/http', tls=True, port=443, headers=headers, http_version=3) + else: + status, headers, body = self.orch.send_nginx_http_request( + '/http', 80, headers, http_version=http_version) self.orch.reload_nginx() log_lines = self.orch.sync_service('agent') return status, TestSecBlocking.convert_headers( headers), body, log_lines - def run_with_body(self, content_type, req_body): + def run_with_body(self, content_type, req_body, http_version=1): + if http_version == 3: + port, tls = 443, True + else: + port, tls = 80, False status, headers, body = self.orch.send_nginx_http_request( '/http', - 80, + port=port, + tls=tls, headers={'content-type': content_type}, - req_body=req_body) + req_body=req_body, + http_version=http_version) self.orch.reload_nginx() log_lines = self.orch.sync_service('agent') @@ -129,6 +139,18 @@ def test_html_action(self): self.assertEqual(status, 403) self.assertEqual(headers['content-type'], 'text/html;charset=utf-8') + def test_html_action_http2(self): + status, headers, body, _ = self.run_with_ua('block_html', + 'application/json', http_version=2) + self.assertEqual(status, 403) + self.assertEqual(headers['content-type'], 'text/html;charset=utf-8') + + def test_html_action_http3(self): + status, headers, body, _ = self.run_with_ua('block_html', + 'application/json', http_version=3) + self.assertEqual(status, 403) + self.assertEqual(headers['content-type'], 'text/html;charset=utf-8') + def test_json_action(self): status, headers, body, _ = self.run_with_ua('block_json', 'text/html') self.assertEqual(status, 403) @@ -144,6 +166,16 @@ def test_redirect_action(self): self.assertEqual(status, 301) self.assertEqual(headers['location'], 'https://www.cloudflare.com') + def test_redirect_action_http2(self): + status, headers, _, _ = self.run_with_ua('redirect', '*/*', http_version=2) + self.assertEqual(status, 301) + self.assertEqual(headers['location'], 'https://www.cloudflare.com') + + def test_redirect_action_http3(self): + status, headers, _, _ = self.run_with_ua('redirect', '*/*', http_version=3) + self.assertEqual(status, 301) + self.assertEqual(headers['location'], 'https://www.cloudflare.com') + def test_redirect_bad_status(self): status, headers, _, _ = self.run_with_ua('redirect_bad_status', '*/*') self.assertEqual(status, 303) @@ -162,6 +194,20 @@ def test_block_body_json_long(self): self.assertEqual(status, 403) self.assert_has_report(log_lines) + def test_block_body_json_long_http2(self): + status, _, _, log_lines = self.run_with_body( + 'application/json', + '{"a": "block_default", "b": "' + ('a' * 1024 * 1024), http_version=2) + self.assertEqual(status, 403) + self.assert_has_report(log_lines) + + def test_block_body_json_long_http3(self): + status, _, _, log_lines = self.run_with_body( + 'application/json', + '{"a": "block_default", "b": "' + ('a' * 1024 * 1024), http_version=3) + self.assertEqual(status, 403) + self.assert_has_report(log_lines) + def block_on_status(self, http_version): if http_version != 3: status, headers, body = self.orch.send_nginx_http_request( @@ -186,6 +232,30 @@ def test_block_on_status_http2(self): def test_block_on_status_http3(self): self.block_on_status(3) + + def block_on_status_redirect(self, http_version): + if http_version != 3: + status, headers, body = self.orch.send_nginx_http_request( + '/http/status/411', http_version=http_version) + else: + status, headers, body = self.orch.send_nginx_http_request( + '/http/status/411', tls=True, port=443, http_version=3) + self.orch.reload_nginx() + log_lines = self.orch.sync_service('agent') + self.assertEqual(301, status) + headers = TestSecBlocking.convert_headers(headers) + self.assertEqual(headers['location'], 'https://www.cloudflare.com') + self.assert_has_report(log_lines, 'redirect') + + def test_block_on_status_redirect_http11(self): + self.block_on_status(1) + + def test_block_on_status_redirect_http2(self): + self.block_on_status(2) + + def test_block_on_status_redirect_http3(self): + self.block_on_status(3) + def test_block_on_response_header(self): nginx_version = self.orch.nginx_version() status, headers, body = self.orch.send_nginx_http_request( From c260438a5716e3bd597d898565cf5381305b5423 Mon Sep 17 00:00:00 2001 From: Gustavo Lopes Date: Fri, 21 Feb 2025 10:28:53 +0000 Subject: [PATCH 07/18] Add valgrind suppressions file --- valgrind.supp | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100644 valgrind.supp diff --git a/valgrind.supp b/valgrind.supp new file mode 100644 index 00000000..04c0f5e8 --- /dev/null +++ b/valgrind.supp @@ -0,0 +1,24 @@ +{ + re_SparseSetT_contains_cond + Memcheck:Cond + fun:_ZNK3re210SparseSetTIvE8containsEi + ... +} +{ + re_SparseSetT_contains_value8 + Memcheck:Value8 + fun:_ZNK3re210SparseSetTIvE8containsEi + ... +} +{ + re2_spartArray_has_index_cond + Memcheck:Cond + fun:_ZNK3re211SparseArrayIPNS_3NFA6ThreadEE9has_indexEi + ... +} +{ + re2_spartArray_has_index_value8 + Memcheck:Value8 + fun:_ZNK3re211SparseArrayIPNS_3NFA6ThreadEE9has_indexEi + ... +} From b2c7184a5e288bcd25e568b0020b4a6760fd3125 Mon Sep 17 00:00:00 2001 From: Gustavo Lopes Date: Fri, 21 Feb 2025 10:55:07 +0000 Subject: [PATCH 08/18] Makefile test target fix --- Makefile | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index 5e131186..c02a12db 100644 --- a/Makefile +++ b/Makefile @@ -142,7 +142,7 @@ endif --env RESTY_VERSION=$(RESTY_VERSION) \ --env NGINX_VERSION=$(NGINX_VERSION) \ --env WAF=$(WAF) \ - --mount type=bind,source="$(PWD)",target=/mnt/repo \ + --mount type=bind,source="$(PWD)",target=/mnt/repo \ $(DOCKER_REPOS):latest \ bash -c "cd /mnt/repo && ./bin/openresty/build_openresty.sh && make build-openresty-aux" @@ -158,11 +158,15 @@ build-openresty-aux: .PHONY: test test: - python3 test/bin/run.py --platform ${ARCH} --image ${BASE_IMAGE} --module-path .musl-build/ngx_http_datadog_module.so -- --verbose --failfast $(TEST_ARGS) + python3 test/bin/run.py --platform $(DOCKER_PLATFORM) --image ${BASE_IMAGE} \ + --module-path .musl-build/ngx_http_datadog_module.so -- \ + --verbose --failfast $(TEST_ARGS) .PHONY: test-openresty test-openresty: - RESTY_TEST=ON python3 test/bin/run.py --platform ${ARCH} --image ${BASE_IMAGE} --module-path .openresty-build/ngx_http_datadog_module.so -- --verbose --failfast $(TEST_ARGS) + RESTY_TEST=ON python3 test/bin/run.py --platform $(DOCKER_PLATFORM) \ + --image ${BASE_IMAGE} --module-path .openresty-build/ngx_http_datadog_module.so -- \ + --verbose --failfast $(TEST_ARGS) .PHONY: build-and-test build-and-test: build-musl test From 91f84a7b86402e80126497c54ee0ec8699a428e3 Mon Sep 17 00:00:00 2001 From: Gustavo Lopes Date: Fri, 21 Feb 2025 14:42:24 +0000 Subject: [PATCH 09/18] Don't run tests with --failfast --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index c02a12db..9065f5e6 100644 --- a/Makefile +++ b/Makefile @@ -160,13 +160,13 @@ build-openresty-aux: test: python3 test/bin/run.py --platform $(DOCKER_PLATFORM) --image ${BASE_IMAGE} \ --module-path .musl-build/ngx_http_datadog_module.so -- \ - --verbose --failfast $(TEST_ARGS) + --verbose $(TEST_ARGS) .PHONY: test-openresty test-openresty: RESTY_TEST=ON python3 test/bin/run.py --platform $(DOCKER_PLATFORM) \ --image ${BASE_IMAGE} --module-path .openresty-build/ngx_http_datadog_module.so -- \ - --verbose --failfast $(TEST_ARGS) + --verbose $(TEST_ARGS) .PHONY: build-and-test build-and-test: build-musl test From 358dd45430bf1a6411fe9af97e27a85ee3511d57 Mon Sep 17 00:00:00 2001 From: Gustavo Lopes Date: Fri, 21 Feb 2025 14:42:34 +0000 Subject: [PATCH 10/18] Fix tests locking up --- test/cases/lazy_singleton.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/test/cases/lazy_singleton.py b/test/cases/lazy_singleton.py index 5eca4cc3..2b2aae6e 100644 --- a/test/cases/lazy_singleton.py +++ b/test/cases/lazy_singleton.py @@ -31,8 +31,10 @@ def __init__(self, make, start, stop): @contextlib.contextmanager def context(self): self.increment_reference_count() - yield self.instance - self.decrement_reference_count() + try: + yield self.instance + finally: + self.decrement_reference_count() def increment_reference_count(self): self.reference_count += 1 From 308d1fd10f910a530d7dc8ea3e2edfa35b1126f5 Mon Sep 17 00:00:00 2001 From: Gustavo Lopes Date: Fri, 21 Feb 2025 15:06:14 +0000 Subject: [PATCH 11/18] lint --- Makefile | 2 +- src/security/blocking.cpp | 7 ++++--- src/security/blocking.h | 7 +++---- src/security/context.cpp | 2 +- test/cases/sec_blocking/test_sec_blocking.py | 21 +++++++++++++------- 5 files changed, 23 insertions(+), 16 deletions(-) diff --git a/Makefile b/Makefile index 9065f5e6..6b6a3e0d 100644 --- a/Makefile +++ b/Makefile @@ -122,7 +122,7 @@ build-musl-aux build-musl-cov-aux: -DCMAKE_TOOLCHAIN_FILE=/sysroot/$(ARCH)-none-linux-musl/Toolchain.cmake \ -DNGINX_PATCH_AWAY_LIBC=ON \ -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) \ - -DNGINX_VERSION="$(NGINX_VERSION)" \ + -DNGINX_VERSION="$(NGINX_VERSION)" \ -DNGINX_DATADOG_ASM_ENABLED="$(WAF)" . \ -DNGINX_DATADOG_RUM_ENABLED="$(RUM)" . \ -DNGINX_COVERAGE=$(COVERAGE) \ diff --git a/src/security/blocking.cpp b/src/security/blocking.cpp index 3e1696ea..c0bea634 100644 --- a/src/security/blocking.cpp +++ b/src/security/blocking.cpp @@ -304,7 +304,8 @@ void BlockingService::initialize(std::optional templ_html, new BlockingService(templ_html, templ_json)); } -ngx_int_t BlockingService::block(BlockSpecification spec, ngx_http_request_t &req) { +ngx_int_t BlockingService::block(BlockSpecification spec, + ngx_http_request_t &req) { BlockResponse const resp = BlockResponse::resolve_content_type(spec, req); ngx_str_t *templ{}; if (resp.ct == BlockResponse::ContentType::HTML) { @@ -346,8 +347,8 @@ ngx_int_t BlockingService::block(BlockSpecification spec, ngx_http_request_t &re // req.headers_out.content_length_n = 0; // } if (req.header_only) { - req.headers_out.content_length_n = 0; - req.chunked = 0; + req.headers_out.content_length_n = 0; + req.chunked = 0; } req.keepalive = 0; req.lingering_close = 0; diff --git a/src/security/blocking.h b/src/security/blocking.h index a7c4f9de..8f0bb0c8 100644 --- a/src/security/blocking.h +++ b/src/security/blocking.h @@ -1,10 +1,8 @@ #pragma once -#include #include #include -#include -#include +#include #include extern "C" { @@ -37,7 +35,8 @@ class BlockingService { static BlockingService *get_instance() { return instance.get(); } - [[nodiscard]] ngx_int_t block(BlockSpecification spec, ngx_http_request_t &req); + [[nodiscard]] ngx_int_t block(BlockSpecification spec, + ngx_http_request_t &req); private: BlockingService(std::optional templ_html_path, diff --git a/src/security/context.cpp b/src/security/context.cpp index 5432beb2..bda0beec 100644 --- a/src/security/context.cpp +++ b/src/security/context.cpp @@ -1087,7 +1087,7 @@ ngx_int_t Context::buffer_chain(FilterCtx &filter_ctx, ngx_pool_t &pool, size = buf->last - buf->pos; new_buf->temporary = 1; if (size > 0) { - new_buf->pos = static_cast(ngx_palloc(&pool, size)); + new_buf->pos = static_cast(ngx_palloc(&pool, size)); if (!new_buf->pos) { return NGX_ERROR; } diff --git a/test/cases/sec_blocking/test_sec_blocking.py b/test/cases/sec_blocking/test_sec_blocking.py index 44e554da..e78e2b0a 100644 --- a/test/cases/sec_blocking/test_sec_blocking.py +++ b/test/cases/sec_blocking/test_sec_blocking.py @@ -141,13 +141,15 @@ def test_html_action(self): def test_html_action_http2(self): status, headers, body, _ = self.run_with_ua('block_html', - 'application/json', http_version=2) + 'application/json', + http_version=2) self.assertEqual(status, 403) self.assertEqual(headers['content-type'], 'text/html;charset=utf-8') def test_html_action_http3(self): status, headers, body, _ = self.run_with_ua('block_html', - 'application/json', http_version=3) + 'application/json', + http_version=3) self.assertEqual(status, 403) self.assertEqual(headers['content-type'], 'text/html;charset=utf-8') @@ -167,12 +169,16 @@ def test_redirect_action(self): self.assertEqual(headers['location'], 'https://www.cloudflare.com') def test_redirect_action_http2(self): - status, headers, _, _ = self.run_with_ua('redirect', '*/*', http_version=2) + status, headers, _, _ = self.run_with_ua('redirect', + '*/*', + http_version=2) self.assertEqual(status, 301) self.assertEqual(headers['location'], 'https://www.cloudflare.com') def test_redirect_action_http3(self): - status, headers, _, _ = self.run_with_ua('redirect', '*/*', http_version=3) + status, headers, _, _ = self.run_with_ua('redirect', + '*/*', + http_version=3) self.assertEqual(status, 301) self.assertEqual(headers['location'], 'https://www.cloudflare.com') @@ -197,14 +203,16 @@ def test_block_body_json_long(self): def test_block_body_json_long_http2(self): status, _, _, log_lines = self.run_with_body( 'application/json', - '{"a": "block_default", "b": "' + ('a' * 1024 * 1024), http_version=2) + '{"a": "block_default", "b": "' + ('a' * 1024 * 1024), + http_version=2) self.assertEqual(status, 403) self.assert_has_report(log_lines) def test_block_body_json_long_http3(self): status, _, _, log_lines = self.run_with_body( 'application/json', - '{"a": "block_default", "b": "' + ('a' * 1024 * 1024), http_version=3) + '{"a": "block_default", "b": "' + ('a' * 1024 * 1024), + http_version=3) self.assertEqual(status, 403) self.assert_has_report(log_lines) @@ -232,7 +240,6 @@ def test_block_on_status_http2(self): def test_block_on_status_http3(self): self.block_on_status(3) - def block_on_status_redirect(self, http_version): if http_version != 3: status, headers, body = self.orch.send_nginx_http_request( From d5c11ae7458f258f4c4da622778ecbe533e3d841 Mon Sep 17 00:00:00 2001 From: Gustavo Lopes Date: Fri, 21 Feb 2025 15:58:42 +0000 Subject: [PATCH 12/18] Fix nginx skip in openresty --- test/cases/orchestration.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/cases/orchestration.py b/test/cases/orchestration.py index e946ec3e..5c7a3577 100644 --- a/test/cases/orchestration.py +++ b/test/cases/orchestration.py @@ -544,7 +544,7 @@ def nginx_version(): capture_output=True, text=True, check=True) - match = re.search(r'nginx/([\d.]+)', result.stderr) + match = re.search(r'/([\d.]+)', result.stderr) return match.group(1) if match else None def send_nginx_http_request(self, From 154b0c0f1b716521726684ec74da12330da3a101 Mon Sep 17 00:00:00 2001 From: Gustavo Lopes Date: Mon, 24 Feb 2025 12:09:09 +0000 Subject: [PATCH 13/18] Fix configuration appsec_max_saved_output_data --- src/ngx_http_datadog_module.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ngx_http_datadog_module.cpp b/src/ngx_http_datadog_module.cpp index acd846f8..beb17006 100644 --- a/src/ngx_http_datadog_module.cpp +++ b/src/ngx_http_datadog_module.cpp @@ -381,7 +381,7 @@ static ngx_command_t datadog_commands[] = { NGX_HTTP_MAIN_CONF|NGX_CONF_TAKE1, ngx_conf_set_size_slot, NGX_HTTP_MAIN_CONF_OFFSET, - offsetof(datadog_main_conf_t, appsec_obfuscation_value_regex), + offsetof(datadog_main_conf_t, appsec_max_saved_output_data), nullptr, }, #endif From 59716886c301091666e44c5e15d1f7c448518294 Mon Sep 17 00:00:00 2001 From: Gustavo Lopes Date: Mon, 24 Feb 2025 12:09:27 +0000 Subject: [PATCH 14/18] Remove debugging functions --- src/security/context.cpp | 27 +-------------------------- 1 file changed, 1 insertion(+), 26 deletions(-) diff --git a/src/security/context.cpp b/src/security/context.cpp index bda0beec..8e1ccef1 100644 --- a/src/security/context.cpp +++ b/src/security/context.cpp @@ -1475,8 +1475,7 @@ ngx_int_t Context::do_output_body_filter(ngx_http_request_t &request, // otherwise we send down the buffered data + whatever we got if (header_filter_ctx_.out) { - // if we have header data, send it first. Bypass the body filter chain by - // invoking ngx_http_write_filter() directly + // if we have header data, send it first ngx_log_debug(NGX_LOG_DEBUG_HTTP, request.connection->log, 0, "waf output body filter: sending down buffered header data"); ngx_int_t rc = send_buffered_header(request); @@ -1649,27 +1648,3 @@ void Context::report_matches(ngx_http_request_t &request, dd::Span &span) { } } // namespace datadog::nginx::security - -extern "C" bool contains_str(ngx_chain_t *in, const char *str) { - for (ngx_chain_t *cl = in; cl; cl = cl->next) { - if (cl->buf->in_file) { - return true; - } - std::string_view sv{reinterpret_cast(cl->buf->pos), - static_cast(cl->buf->last - cl->buf->pos)}; - if (sv.find(str) != std::string_view::npos) { - return true; - } - } - return false; -} - -extern "C" bool is_long(ngx_chain_t *in) { - uint64_t i = 0; - for (ngx_chain_t *cl = in; cl; cl = cl->next) { - if (i++ > 20) { - return true; - } - } - return false; -} From e9066a76021fb885dba9e62759a0791199365cab Mon Sep 17 00:00:00 2001 From: Gustavo Lopes Date: Mon, 24 Feb 2025 12:09:47 +0000 Subject: [PATCH 15/18] Increase test coverage --- test/cases/sec_blocking/conf/http.conf | 1 + test/cases/sec_blocking/test_sec_blocking.py | 43 +++++++++++++++++--- test/services/http/http.js | 36 ++++++++++++++++ 3 files changed, 74 insertions(+), 6 deletions(-) diff --git a/test/cases/sec_blocking/conf/http.conf b/test/cases/sec_blocking/conf/http.conf index 9aa89f9f..1435f4e7 100644 --- a/test/cases/sec_blocking/conf/http.conf +++ b/test/cases/sec_blocking/conf/http.conf @@ -16,6 +16,7 @@ http { datadog_appsec_ruleset_file /tmp/waf.json; datadog_appsec_waf_timeout 2s; datadog_waf_thread_pool_name waf_thread_pool; + datadog_appsec_max_saved_output_data 64k; client_max_body_size 10m; diff --git a/test/cases/sec_blocking/test_sec_blocking.py b/test/cases/sec_blocking/test_sec_blocking.py index e78e2b0a..bce43b30 100644 --- a/test/cases/sec_blocking/test_sec_blocking.py +++ b/test/cases/sec_blocking/test_sec_blocking.py @@ -42,14 +42,16 @@ def setUp(self): def convert_headers(headers): return {k.lower(): v for k, v in dict(headers).items()} - def run_with_ua(self, user_agent, accept, http_version=1): + def run_with_ua(self, user_agent, accept, http_version=1, tls=False): headers = {'User-Agent': user_agent, 'Accept': accept} - if http_version == 3: - status, headers, body = self.orch.send_nginx_http_request( - '/http', tls=True, port=443, headers=headers, http_version=3) + if http_version == 3 or tls: + port = 443 + tls = True else: - status, headers, body = self.orch.send_nginx_http_request( - '/http', 80, headers, http_version=http_version) + port = 80 + + status, headers, body = self.orch.send_nginx_http_request( + '/http', port, headers, http_version=http_version, tls=tls) self.orch.reload_nginx() log_lines = self.orch.sync_service('agent') @@ -96,6 +98,28 @@ def predicate(x): self.assertEqual(appsec_rep['triggers'][0]['rule']['on_match'][0], exp_match) + def no_block_long_response(self, http_version, port, tls): + status, headers, body = self.orch.send_nginx_http_request( + '/http/repeat?card=10000&num_bouts=3&delay=200', + tls=tls, + port=port, + http_version=http_version) + self.orch.reload_nginx() + log_lines = self.orch.sync_service('agent') + self.assertEqual(200, status) + headers = TestSecBlocking.convert_headers(headers) + self.assertEqual(headers['content-type'], 'text/plain') + self.assertEqual(body, "Hello world!\n" * 30000) + + def test_no_block_long_response_http11(self): + self.no_block_long_response(1, 80, False) + + def test_no_block_long_response_http2(self): + self.no_block_long_response(2, 80, False) + + def test_no_block_long_response_http3(self): + self.no_block_long_response(3, 443, True) + def test_default_action(self): status, headers, body, log_lines = self.run_with_ua( 'block_default', '*/*') @@ -139,6 +163,13 @@ def test_html_action(self): self.assertEqual(status, 403) self.assertEqual(headers['content-type'], 'text/html;charset=utf-8') + def test_html_action_tls(self): + status, headers, body, _ = self.run_with_ua('block_html', + 'application/json', + tls=True) + self.assertEqual(status, 403) + self.assertEqual(headers['content-type'], 'text/html;charset=utf-8') + def test_html_action_http2(self): status, headers, body, _ = self.run_with_ua('block_html', 'application/json', diff --git a/test/services/http/http.js b/test/services/http/http.js index aeac0b27..0e7bab87 100644 --- a/test/services/http/http.js +++ b/test/services/http/http.js @@ -12,6 +12,37 @@ const ignoreRequestBody = request => { request.on('end', () => {}); } +const sendRepeatResponse = (request, response) => { + try { + const urlObj = new URL(request.url, `http://${request.headers.host}`); + const card = parseInt(urlObj.searchParams.get("card")); + const numBouts = parseInt(urlObj.searchParams.get("num_bouts")); + const delay = parseInt(urlObj.searchParams.get("delay")); + if (isNaN(card) || isNaN(numBouts) || isNaN(delay)) { + response.writeHead(400, { "Content-Type": "text/plain" }); + response.end("Invalid query parameters"); + return; + } + // Pre-calculate the output string once. + const output = "Hello world!\n".repeat(card); + response.writeHead(200, { "Content-Type": "text/plain" }); + let boutCount = 0; + const sendBout = () => { + response.write(output); + boutCount++; + if (boutCount >= numBouts) { + response.end(); + } else { + setTimeout(sendBout, delay); + } + } + setTimeout(sendBout, delay); + } catch (err) { + response.writeHead(500, { "Content-Type": "text/plain" }); + response.end("Server error"); + } +} + const requestListener = function (request, response) { ignoreRequestBody(request); if (request.url === '/auth') { @@ -26,6 +57,11 @@ const requestListener = function (request, response) { return; } + if (request.url.match(/.*\/repeat\/?(?:\?.*)?$/)) { + sendRepeatResponse(request, response); + return; + } + const responseBody = JSON.stringify({ "service": "http", "headers": request.headers From 11a952dafa7b75ef1b70d2ebc5f892ca2ab1a3d1 Mon Sep 17 00:00:00 2001 From: Gustavo Lopes Date: Tue, 25 Feb 2025 12:57:42 +0000 Subject: [PATCH 16/18] Bump version to 1.6.0 --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 20e29901..c8d9cb01 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -7,7 +7,7 @@ set(CMAKE_EXPORT_COMPILE_COMMANDS ON) cmake_policy(SET CMP0068 NEW) cmake_policy(SET CMP0135 NEW) -set(NGINX_DATADOG_VERSION 1.5.0) +set(NGINX_DATADOG_VERSION 1.6.0) project(ngx_http_datadog_module VERSION ${NGINX_DATADOG_VERSION}) option(NGINX_DATADOG_ASM_ENABLED "Build with libddwaf" ON) From d1235a22a95d5d0b31683b3da4711154267436f9 Mon Sep 17 00:00:00 2001 From: Gustavo Lopes Date: Tue, 25 Feb 2025 13:23:02 +0000 Subject: [PATCH 17/18] Block: reset err_status and headers_out.status_line too --- src/security/blocking.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/security/blocking.cpp b/src/security/blocking.cpp index c0bea634..00120d23 100644 --- a/src/security/blocking.cpp +++ b/src/security/blocking.cpp @@ -334,6 +334,8 @@ ngx_int_t BlockingService::block(BlockSpecification spec, // be set to 1. req.headers_out.status = resp.status; + req.headers_out.status_line.len = 0; + req.err_status = 0; req.headers_out.content_type = BlockResponse::content_type_header(resp.ct); req.headers_out.content_type_len = req.headers_out.content_type.len; From 7cfce280d9b85b03970220029fde0475675e8446 Mon Sep 17 00:00:00 2001 From: Gustavo Lopes Date: Wed, 26 Feb 2025 11:08:29 +0000 Subject: [PATCH 18/18] Address PR comments --- src/security/context.cpp | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/src/security/context.cpp b/src/security/context.cpp index 8e1ccef1..6335445a 100644 --- a/src/security/context.cpp +++ b/src/security/context.cpp @@ -66,13 +66,13 @@ std::size_t chain_size(ngx_chain_t const *ch) { } std::size_t has_special(ngx_chain_t const *ch) { for (ngx_chain_t const *cl = ch; cl; cl = cl->next) { - return ngx_buf_special(ch->buf); + return ngx_buf_special(cl->buf); } return false; } std::size_t has_last(ngx_chain_t const *ch) { for (ngx_chain_t const *cl = ch; cl; cl = cl->next) { - if (ch->buf->last) { + if (cl->buf->last) { return true; } } @@ -1211,6 +1211,7 @@ class Http1TemporarySendChain { static Http1TemporarySendChain instance; void activate(Context &ctx, ngx_http_request_t &request) noexcept { current_ctx_ = &ctx; + current_pool_ = request.pool; prev_send_chain_ = request.connection->send_chain; request.connection->send_chain = send_chain_save; } @@ -1241,16 +1242,13 @@ class Http1TemporarySendChain { } private: - static ngx_chain_t *send_chain_save(ngx_connection_t *c, ngx_chain_t *in, - off_t limit) { - (void)limit; - - ngx_http_request_t *req = static_cast(c->data); - assert(req != nullptr); - + static ngx_chain_t *send_chain_save([[maybe_unused]] ngx_connection_t *c, + ngx_chain_t *in, + [[maybe_unused]] off_t limit) { auto *ctx = instance.current_ctx_; assert(ctx != nullptr); - if (ctx->buffer_header_output(*req->pool, in) != NGX_OK) { + auto *pool = instance.current_pool_; + if (ctx->buffer_header_output(*pool, in) != NGX_OK) { return NGX_CHAIN_ERROR; } @@ -1258,6 +1256,7 @@ class Http1TemporarySendChain { } Context *current_ctx_; + ngx_pool_t *current_pool_; ngx_send_chain_pt prev_send_chain_; }; Http1TemporarySendChain Http1TemporarySendChain::instance; @@ -1328,9 +1327,8 @@ class Http2TemporarySendChain { private: static ngx_chain_t *stream_send_chain_save(ngx_connection_t *c, - ngx_chain_t *in, off_t limit) { - (void)limit; - + ngx_chain_t *in, + [[maybe_unused]] off_t limit) { ngx_http_request_t *req = static_cast(c->data); assert(req != nullptr);