diff --git a/.codecov.yml b/.codecov.yml index a53c8dc84..ba43a9289 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -1,6 +1,6 @@ comment: layout: "condensed_header, diff, flags, components" - + component_management: individual_components: - component_id: crashtracker # this is an identifier that should not be changed @@ -87,3 +87,7 @@ component_management: name: datadog-trace-utils # this is a display name, and can be changed freely paths: - datadog-trace-utils + - component_id: datadog-log + name: datadog-log + paths: + - datadog-log diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index ce673af2c..8537ed8a1 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -138,7 +138,7 @@ jobs: env: RUSTFLAGS: "${{ matrix.flags }}" run: | - cargo run --bin release --features profiling,telemetry,data-pipeline,symbolizer,crashtracker,library-config --release -- --out $LIBDD_OUTPUT_FOLDER + cargo run --bin release --features profiling,telemetry,data-pipeline,symbolizer,crashtracker,library-config,log --release -- --out $LIBDD_OUTPUT_FOLDER - name: 'Publish libdatadog' uses: actions/upload-artifact@4cec3d8aa04e39d1a68397de0c4cd6fb9dce8ec1 # 4.6.1 diff --git a/Cargo.lock b/Cargo.lock index 985c3549e..f0001b979 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1357,7 +1357,6 @@ dependencies = [ "httpmock", "hyper 1.6.0", "hyper-util", - "log", "rand 0.8.5", "regex", "rmp-serde", @@ -1367,6 +1366,7 @@ dependencies = [ "tinybytes", "tokio", "tokio-util", + "tracing", "uuid", ] @@ -1436,7 +1436,6 @@ dependencies = [ "ddcommon-ffi", "function_name", "libc", - "log", "serde", "serde_json", "symbolic-common", @@ -1558,6 +1557,26 @@ dependencies = [ "uuid", ] +[[package]] +name = "datadog-log" +version = "18.1.0" +dependencies = [ + "ddcommon-ffi", + "tempfile", + "tracing", + "tracing-appender", + "tracing-subscriber", +] + +[[package]] +name = "datadog-log-ffi" +version = "18.1.0" +dependencies = [ + "build_common", + "datadog-log", + "ddcommon-ffi", +] + [[package]] name = "datadog-profiling" version = "18.1.0" @@ -1598,6 +1617,7 @@ dependencies = [ "data-pipeline-ffi", "datadog-crashtracker-ffi", "datadog-library-config-ffi", + "datadog-log-ffi", "datadog-profiling", "ddcommon", "ddcommon-ffi", @@ -1806,7 +1826,6 @@ dependencies = [ "httpmock", "hyper 1.6.0", "hyper-http-proxy", - "log", "prost", "rand 0.8.5", "rmp", @@ -1818,6 +1837,7 @@ dependencies = [ "testcontainers", "tinybytes", "tokio", + "tracing", "urlencoding", "zstd", ] @@ -1852,7 +1872,6 @@ dependencies = [ "hyper-util", "indexmap 2.6.0", "libc", - "log", "maplit", "nix", "pin-project", @@ -5714,9 +5733,9 @@ checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" -version = "0.1.40" +version = "0.1.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" +checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" dependencies = [ "log", "pin-project-lite", @@ -5724,11 +5743,23 @@ dependencies = [ "tracing-core", ] +[[package]] +name = "tracing-appender" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3566e8ce28cc0a3fe42519fc80e6b4c943cc4c8cef275620eb8dac2d3d4e06cf" +dependencies = [ + "crossbeam-channel", + "thiserror", + "time", + "tracing-subscriber", +] + [[package]] name = "tracing-attributes" -version = "0.1.27" +version = "0.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" +checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" dependencies = [ "proc-macro2", "quote", @@ -5737,9 +5768,9 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.32" +version = "0.1.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" +checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" dependencies = [ "once_cell", "valuable", @@ -5769,22 +5800,35 @@ dependencies = [ "tracing-subscriber", ] +[[package]] +name = "tracing-serde" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "704b1aeb7be0d0a84fc9828cae51dab5970fee5088f83d1dd7ee6f6246fc6ff1" +dependencies = [ + "serde", + "tracing-core", +] + [[package]] name = "tracing-subscriber" -version = "0.3.18" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" +checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" dependencies = [ "matchers", "nu-ansi-term", "once_cell", "regex", + "serde", + "serde_json", "sharded-slab", "smallvec", "thread_local", "tracing", "tracing-core", "tracing-log", + "tracing-serde", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 28ce8938f..cf841863a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -41,6 +41,8 @@ members = [ "ddsketch", "tinybytes", "dogstatsd-client", + "datadog-log", + "datadog-log-ffi" ] # https://doc.rust-lang.org/cargo/reference/resolver.html#feature-resolver-version-2 diff --git a/LICENSE-3rdparty.yml b/LICENSE-3rdparty.yml index 1bf996831..ec04c231a 100644 --- a/LICENSE-3rdparty.yml +++ b/LICENSE-3rdparty.yml @@ -1,4 +1,4 @@ -root_name: builder, build_common, tools, datadog-alloc, datadog-crashtracker, ddcommon, ddtelemetry, datadog-ddsketch, datadog-crashtracker-ffi, ddcommon-ffi, datadog-ipc, datadog-ipc-macros, tarpc, tarpc-plugins, tinybytes, spawn_worker, cc_utils, datadog-library-config, datadog-library-config-ffi, datadog-live-debugger, datadog-live-debugger-ffi, datadog-profiling, datadog-profiling-protobuf, datadog-profiling-ffi, data-pipeline-ffi, data-pipeline, datadog-trace-protobuf, datadog-trace-utils, datadog-trace-normalization, dogstatsd-client, ddtelemetry-ffi, symbolizer-ffi, datadog-profiling-replayer, datadog-remote-config, datadog-sidecar, datadog-sidecar-macros, datadog-sidecar-ffi, datadog-trace-obfuscation, datadog-tracer-flare, sidecar_mockgen, test_spawn_from_lib +root_name: builder, build_common, tools, datadog-alloc, datadog-crashtracker, ddcommon, ddtelemetry, datadog-ddsketch, datadog-crashtracker-ffi, ddcommon-ffi, datadog-ipc, datadog-ipc-macros, tarpc, tarpc-plugins, tinybytes, spawn_worker, cc_utils, datadog-library-config, datadog-library-config-ffi, datadog-live-debugger, datadog-live-debugger-ffi, datadog-profiling, datadog-profiling-protobuf, datadog-profiling-ffi, data-pipeline-ffi, data-pipeline, datadog-trace-protobuf, datadog-trace-utils, datadog-trace-normalization, dogstatsd-client, datadog-log-ffi, datadog-log, ddtelemetry-ffi, symbolizer-ffi, datadog-profiling-replayer, datadog-remote-config, datadog-sidecar, datadog-sidecar-macros, datadog-sidecar-ffi, datadog-trace-obfuscation, datadog-tracer-flare, sidecar_mockgen, test_spawn_from_lib third_party_libraries: - package_name: addr2line package_version: 0.24.2 @@ -32012,7 +32012,39 @@ third_party_libraries: IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - package_name: tracing - package_version: 0.1.40 + package_version: 0.1.41 + repository: https://github.com/tokio-rs/tracing + license: MIT + licenses: + - license: MIT + text: | + Copyright (c) 2019 Tokio Contributors + + Permission is hereby granted, free of charge, to any + person obtaining a copy of this software and associated + documentation files (the "Software"), to deal in the + Software without restriction, including without + limitation the rights to use, copy, modify, merge, + publish, distribute, sublicense, and/or sell copies of + the Software, and to permit persons to whom the Software + is furnished to do so, subject to the following + conditions: + + The above copyright notice and this permission notice + shall be included in all copies or substantial portions + of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF + ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED + TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A + PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT + SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR + IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. +- package_name: tracing-appender + package_version: 0.2.3 repository: https://github.com/tokio-rs/tracing license: MIT licenses: @@ -32044,7 +32076,7 @@ third_party_libraries: IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - package_name: tracing-attributes - package_version: 0.1.27 + package_version: 0.1.28 repository: https://github.com/tokio-rs/tracing license: MIT licenses: @@ -32076,7 +32108,7 @@ third_party_libraries: IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - package_name: tracing-core - package_version: 0.1.32 + package_version: 0.1.33 repository: https://github.com/tokio-rs/tracing license: MIT licenses: @@ -32139,8 +32171,40 @@ third_party_libraries: OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +- package_name: tracing-serde + package_version: 0.2.0 + repository: https://github.com/tokio-rs/tracing + license: MIT + licenses: + - license: MIT + text: | + Copyright (c) 2019 Tokio Contributors + + Permission is hereby granted, free of charge, to any + person obtaining a copy of this software and associated + documentation files (the "Software"), to deal in the + Software without restriction, including without + limitation the rights to use, copy, modify, merge, + publish, distribute, sublicense, and/or sell copies of + the Software, and to permit persons to whom the Software + is furnished to do so, subject to the following + conditions: + + The above copyright notice and this permission notice + shall be included in all copies or substantial portions + of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF + ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED + TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A + PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT + SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR + IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. - package_name: tracing-subscriber - package_version: 0.3.18 + package_version: 0.3.19 repository: https://github.com/tokio-rs/tracing license: MIT licenses: diff --git a/build-profiling-ffi.sh b/build-profiling-ffi.sh index 3111c5515..d9f30f16d 100755 --- a/build-profiling-ffi.sh +++ b/build-profiling-ffi.sh @@ -152,6 +152,7 @@ FEATURES=( "datadog-profiling-ffi/ddtelemetry-ffi" "datadog-profiling-ffi/demangler" "datadog-library-config-ffi" + "datadog-log-ffi" ) if [[ "$symbolizer" -eq 1 ]]; then FEATURES+=("symbolizer") @@ -236,7 +237,7 @@ echo "Generating $destdir/include/libdatadog headers..." rm -r $destdir/include/datadog/ mkdir $destdir/include/datadog/ -CBINDGEN_HEADERS="common.h profiling.h telemetry.h crashtracker.h data-pipeline.h library-config.h" +CBINDGEN_HEADERS="common.h profiling.h telemetry.h crashtracker.h data-pipeline.h library-config.h log.h" # When optional features are added, don't forget to also include the headers here case $ARG_FEATURES in esac diff --git a/builder/Cargo.toml b/builder/Cargo.toml index 4ed47823a..d0dc88d44 100644 --- a/builder/Cargo.toml +++ b/builder/Cargo.toml @@ -14,6 +14,7 @@ telemetry = [] data-pipeline = [] symbolizer = [] library-config = [] +log = [] [lib] bench = false diff --git a/builder/src/bin/release.rs b/builder/src/bin/release.rs index 715ce5e32..6f96a3f1b 100644 --- a/builder/src/bin/release.rs +++ b/builder/src/bin/release.rs @@ -68,6 +68,8 @@ pub fn main() { f.push("symbolizer".to_string()); #[cfg(feature = "library-config")] f.push("datadog-library-config-ffi".to_string()); + #[cfg(feature = "log")] + f.push("datadog-log-ffi".to_string()); f }; diff --git a/builder/src/profiling.rs b/builder/src/profiling.rs index 478b7f526..4517b5685 100644 --- a/builder/src/profiling.rs +++ b/builder/src/profiling.rs @@ -53,6 +53,8 @@ impl Profiling { headers.push("blazesym.h"); #[cfg(feature = "library-config")] headers.push("library-config.h"); + #[cfg(feature = "log")] + headers.push("log.h"); let mut origin_path: PathBuf = [&self.source_include, "dummy.h"].iter().collect(); let mut target_path: PathBuf = [&self.target_include, "dummy.h"].iter().collect(); diff --git a/data-pipeline/Cargo.toml b/data-pipeline/Cargo.toml index 0520dd105..fe5366883 100644 --- a/data-pipeline/Cargo.toml +++ b/data-pipeline/Cargo.toml @@ -16,7 +16,7 @@ hyper = { version = "1.6", features = ["http1", "client"] } hyper-util = { version = "0.1", features = ["client", "client-legacy"] } http = "1.0" http-body-util = "0.1" -log = "0.4" +tracing = { version = "0.1", default-features = false } rmp-serde = "1.1.1" serde = "1.0.209" serde_json = "1.0.127" diff --git a/data-pipeline/src/agent_info/fetcher.rs b/data-pipeline/src/agent_info/fetcher.rs index 3b27371b5..135daba09 100644 --- a/data-pipeline/src/agent_info/fetcher.rs +++ b/data-pipeline/src/agent_info/fetcher.rs @@ -10,10 +10,10 @@ use ddcommon::hyper_migration; use ddcommon::Endpoint; use http_body_util::BodyExt; use hyper::{self, body::Buf, header::HeaderName}; -use log::{error, info}; use std::sync::Arc; use std::time::Duration; use tokio::time::sleep; +use tracing::{error, info}; #[allow(clippy::declare_interior_mutable_const)] const DATADOG_AGENT_STATE: HeaderName = HeaderName::from_static("datadog-agent-state"); @@ -157,7 +157,7 @@ impl AgentInfoFetcher { info!("Agent info is up-to-date") } Err(err) => { - error!("Error while fetching /info: {}", err); + error!(?err, "Error while fetching /info"); } } sleep(self.refresh_interval).await; diff --git a/data-pipeline/src/stats_exporter.rs b/data-pipeline/src/stats_exporter.rs index 67057f1b0..d434ace74 100644 --- a/data-pipeline/src/stats_exporter.rs +++ b/data-pipeline/src/stats_exporter.rs @@ -11,15 +11,14 @@ use std::{ time, }; +use crate::{span_concentrator::SpanConcentrator, trace_exporter::TracerMetadata}; use datadog_trace_protobuf::pb; use datadog_trace_utils::send_with_retry::{send_with_retry, RetryStrategy}; use ddcommon::Endpoint; use hyper; -use log::error; use tokio::select; use tokio_util::sync::CancellationToken; - -use crate::{span_concentrator::SpanConcentrator, trace_exporter::TracerMetadata}; +use tracing::error; const STATS_ENDPOINT_PATH: &str = "/v0.6/stats"; @@ -101,7 +100,7 @@ impl StatsExporter { match result { Ok(_) => Ok(()), Err(err) => { - error!("Error with the StateExporter when sending: {err}"); + error!(?err, "Error with the StateExporter when sending stats"); anyhow::bail!("Failed to send stats: {err}"); } } diff --git a/data-pipeline/src/trace_exporter/mod.rs b/data-pipeline/src/trace_exporter/mod.rs index f6d201550..823e59899 100644 --- a/data-pipeline/src/trace_exporter/mod.rs +++ b/data-pipeline/src/trace_exporter/mod.rs @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 pub mod agent_response; pub mod error; +use self::agent_response::AgentResponse; use crate::agent_info::{AgentInfoArc, AgentInfoFetcher}; use crate::telemetry::{SendPayloadTelemetry, TelemetryClient, TelemetryClientBuilder}; use crate::trace_exporter::error::{RequestError, TraceExporterError}; @@ -27,15 +28,13 @@ use error::BuilderErrorKind; use http_body_util::BodyExt; use hyper::http::uri::PathAndQuery; use hyper::{header::CONTENT_TYPE, Method, Uri}; -use log::{error, info}; use std::io; use std::sync::{Arc, Mutex}; use std::time::Duration; use std::{borrow::Borrow, collections::HashMap, str::FromStr, time}; use tokio::{runtime::Runtime, task::JoinHandle}; use tokio_util::sync::CancellationToken; - -use self::agent_response::AgentResponse; +use tracing::{error, info}; const DEFAULT_STATS_ELIGIBLE_SPAN_KINDS: [&str; 4] = ["client", "server", "producer", "consumer"]; const STATS_ENDPOINT: &str = "/v0.6/stats"; @@ -512,7 +511,7 @@ impl TraceExporter { // This should really never happen as response_status is a // `NonZeroU16`, but if the response status or tag requirements // ever change in the future we still don't want to panic. - error!("Failed to serialize response_code to tag {}", tag_err) + error!(?tag_err, "Failed to serialize response_code to tag") } } return Err(TraceExporterError::Request(RequestError::new( @@ -700,7 +699,7 @@ impl TraceExporter { payload_len as u64, chunks as u64, )) { - error!("Error sending telemetry: {}", e.to_string()); + error!(?e, "Error sending telemetry"); } } @@ -711,7 +710,7 @@ impl TraceExporter { let body = match response.into_body().collect().await { Ok(body) => String::from_utf8_lossy(&body.to_bytes()).to_string(), Err(err) => { - error!("Error reading agent response body: {err}"); + error!(?err, "Error reading agent response body"); self.emit_metric( HealthMetric::Count(health_metrics::STAT_SEND_TRACES_ERRORS, 1), None, @@ -737,7 +736,7 @@ impl TraceExporter { } } Err(err) => { - error!("Error sending traces: {err}"); + error!(?err, "Error sending traces"); self.emit_metric( HealthMetric::Count(health_metrics::STAT_SEND_TRACES_ERRORS, 1), None, @@ -748,7 +747,7 @@ impl TraceExporter { let body = match response.into_body().collect().await { Ok(body) => body.to_bytes(), Err(err) => { - error!("Error reading agent response body: {err}"); + error!(?err, "Error reading agent response body"); return Err(TraceExporterError::from(err)); } }; diff --git a/datadog-crashtracker-ffi/Cargo.toml b/datadog-crashtracker-ffi/Cargo.toml index 9c74f0611..96a4e3caf 100644 --- a/datadog-crashtracker-ffi/Cargo.toml +++ b/datadog-crashtracker-ffi/Cargo.toml @@ -45,7 +45,6 @@ function_name = "0.3.0" libc = "0.2.167" serde_json = "1.0.132" serde = { version = "1.0.214", features = ["derive"] } -log = "0.4.22" [target.'cfg(windows)'.dependencies] windows = { version = "0.59.0", features = ["Win32_System_Diagnostics_Debug", "Win32_System_ErrorReporting"] } diff --git a/datadog-log-ffi/Cargo.toml b/datadog-log-ffi/Cargo.toml new file mode 100644 index 000000000..0ce2351fd --- /dev/null +++ b/datadog-log-ffi/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "datadog-log-ffi" +rust-version.workspace = true +edition.workspace = true +version.workspace = true +license.workspace = true + +[lib] +crate-type = ["lib", "staticlib", "cdylib"] +bench = false + +[features] +default = ["cbindgen", "expanded_builder_macros"] +cbindgen = ["build_common/cbindgen", "ddcommon-ffi/cbindgen", "expanded_builder_macros"] +expanded_builder_macros = [] + +[build-dependencies] +build_common = { path = "../build-common" } + +[dependencies] +datadog-log = { path = "../datadog-log" } +ddcommon-ffi = { path = "../ddcommon-ffi" } \ No newline at end of file diff --git a/datadog-log-ffi/build.rs b/datadog-log-ffi/build.rs new file mode 100644 index 000000000..5494d77b3 --- /dev/null +++ b/datadog-log-ffi/build.rs @@ -0,0 +1,11 @@ +// Copyright 2025-Present Datadog, Inc. https://www.datadoghq.com/ +// SPDX-License-Identifier: Apache-2.0 + +extern crate build_common; + +use build_common::generate_and_configure_header; + +fn main() { + let header_name = "log.h"; + generate_and_configure_header(header_name); +} diff --git a/datadog-log-ffi/cbindgen.toml b/datadog-log-ffi/cbindgen.toml new file mode 100644 index 000000000..452baa9ba --- /dev/null +++ b/datadog-log-ffi/cbindgen.toml @@ -0,0 +1,37 @@ +# Copyright 2025-Present Datadog, Inc. https://www.datadoghq.com/ +# SPDX-License-Identifier: Apache-2.0 + +language = "C" +tab_width = 2 +header = """// Copyright 2025-Present Datadog, Inc. https://www.datadoghq.com/ +// SPDX-License-Identifier: Apache-2.0 +""" +include_guard = "DDOG_LOG_H" +style = "both" + +no_includes = true +sys_includes = ["stdbool.h", "stddef.h", "stdint.h", "stdio.h"] +includes = ["common.h"] + +[export] +prefix = "ddog_" +renaming_overrides_prefixing = true + +[export.mangle] +rename_types = "PascalCase" + +[export.rename] +"ParseTagsResult" = "ddog_Vec_Tag_ParseResult" +"PushTagResult" = "ddog_Vec_Tag_PushResult" +"FILE" = "FILE" + +[enum] +prefix_with_name = true +rename_variants = "ScreamingSnakeCase" + +[fn] +must_use = "DDOG_CHECK_RETURN" + +[parse] +parse_deps = true +include = ["ddcommon-ffi", "datadog-log"] diff --git a/datadog-log-ffi/src/lib.rs b/datadog-log-ffi/src/lib.rs new file mode 100644 index 000000000..ad3f57641 --- /dev/null +++ b/datadog-log-ffi/src/lib.rs @@ -0,0 +1,102 @@ +// Copyright 2025-Present Datadog, Inc. https://www.datadoghq.com/ +// SPDX-License-Identifier: Apache-2.0 + +use datadog_log::logger; +use datadog_log::logger::{ + logger_configure_file, logger_configure_std, logger_disable_file, logger_disable_std, + logger_set_log_level, +}; +use ddcommon_ffi::{CharSlice, Error}; + +/// Configuration for standard stream output. +#[repr(C)] +pub struct StdConfig { + /// Target stream (stdout or stderr) + pub target: logger::StdTarget, +} + +impl From for logger::StdConfig { + fn from(config: StdConfig) -> Self { + logger::StdConfig { + target: config.target, + } + } +} + +/// Configures the logger to write to stdout or stderr with the specified configuration. +/// +/// # Arguments +/// * `config` - Configuration for standard stream logging including target +/// +/// # Errors +/// Returns an error if the logger cannot be configured. +#[no_mangle] +pub extern "C" fn ddog_logger_configure_std(config: StdConfig) -> Option> { + let config = logger::StdConfig::from(config); + logger_configure_std(config).err().map(Box::new) +} + +/// Disables logging by configuring a no-op logger. +/// +/// # Errors +/// Returns an error if the logger cannot be configured. +#[no_mangle] +pub extern "C" fn ddog_logger_disable_std() -> Option> { + logger_disable_std().err().map(Box::new) +} + +/// Configuration for file output. +#[repr(C)] +pub struct FileConfig<'a> { + /// Path to the log file + pub path: CharSlice<'a>, +} + +impl<'a> From> for logger::FileConfig { + fn from(config: FileConfig<'a>) -> Self { + logger::FileConfig { + path: config.path.to_string(), + } + } +} + +/// Configures the logger to write to a file with the specified configuration. +/// +/// # Arguments +/// * `config` - Configuration for file logging including path +/// +/// # Errors +/// Returns an error if the logger cannot be configured. +#[no_mangle] +pub extern "C" fn ddog_logger_configure_file(config: FileConfig) -> Option> { + let config = logger::FileConfig::from(config); + logger_configure_file(config).err().map(Box::new) +} + +/// Disables file logging by configuring a no-op file writer. +/// +/// # Errors +/// Returns an error if the logger cannot be configured. +#[no_mangle] +pub extern "C" fn ddog_logger_disable_file() -> Option> { + logger_disable_file().err().map(Box::new) +} + +/// Sets the global log level. +/// +/// # Arguments +/// * `log_level` - The minimum level for events to be logged +/// +/// # Errors +/// Returns an error if the log level cannot be set. +#[no_mangle] +pub extern "C" fn ddog_logger_set_log_level( + log_level: logger::LogEventLevel, +) -> Option> { + let level_value = log_level as i8; + if level_value >= 0 && level_value <= logger::LogEventLevel::Error as i8 { + logger_set_log_level(log_level).err().map(Box::new) + } else { + Some(Box::new(Error::from("Invalid log level"))) + } +} diff --git a/datadog-log/Cargo.toml b/datadog-log/Cargo.toml new file mode 100644 index 000000000..047c3480e --- /dev/null +++ b/datadog-log/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "datadog-log" +description = "A bridge from Rust to {language} logging" +rust-version.workspace = true +edition.workspace = true +version.workspace = true +license.workspace = true + +[lib] +bench = false + +[dependencies] +ddcommon-ffi = { path = "../ddcommon-ffi", default-features = false } +tracing = { version = "0.1", default-features = false } +tracing-subscriber = { version = "0.3.18", default-features = false, features = ["json", "env-filter"] } +tracing-appender = "0.2.3" + +[dev-dependencies] +tempfile = "3.10" diff --git a/datadog-log/src/lib.rs b/datadog-log/src/lib.rs new file mode 100644 index 000000000..6fac49c57 --- /dev/null +++ b/datadog-log/src/lib.rs @@ -0,0 +1,10 @@ +// Copyright 2025-Present Datadog, Inc. https://www.datadoghq.com/ +// SPDX-License-Identifier: Apache-2.0 +#![cfg_attr(not(test), deny(clippy::panic))] +#![cfg_attr(not(test), deny(clippy::unwrap_used))] +#![cfg_attr(not(test), deny(clippy::expect_used))] +#![cfg_attr(not(test), deny(clippy::todo))] +#![cfg_attr(not(test), deny(clippy::unimplemented))] + +pub mod logger; +pub mod writers; diff --git a/datadog-log/src/logger.rs b/datadog-log/src/logger.rs new file mode 100644 index 000000000..715edec50 --- /dev/null +++ b/datadog-log/src/logger.rs @@ -0,0 +1,704 @@ +// Copyright 2025-Present Datadog, Inc. https://www.datadoghq.com/ +// SPDX-License-Identifier: Apache-2.0 + +use crate::writers::{FileWriter, StdWriter}; +use ddcommon_ffi::Error; +use std::path::Path; +use std::sync::Mutex; +use std::sync::OnceLock; +use tracing::subscriber::DefaultGuard; +use tracing_subscriber::filter::LevelFilter; +use tracing_subscriber::layer::{Layered, SubscriberExt}; +use tracing_subscriber::reload::Handle; +use tracing_subscriber::{fmt, reload, EnvFilter, Layer, Registry}; + +/// Log level for filtering log events. +#[repr(C)] +#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] +pub enum LogEventLevel { + /// The "trace" level. + /// + /// Designates very low priority, often extremely verbose, information. + Trace = 0, + /// The "debug" level. + /// + /// Designates lower priority information. + Debug = 1, + /// The "info" level. + /// + /// Designates useful information. + Info = 2, + /// The "warn" level. + /// + /// Designates hazardous situations. + Warn = 3, + /// The "error" level. + /// + /// Designates very serious errors. + Error = 4, +} + +/// Configuration for file-based logging. +pub struct FileConfig { + /// Path where log files will be written. + pub path: String, +} + +/// Target for standard stream output. +#[repr(C)] +#[derive(Debug, Clone, Copy)] +pub enum StdTarget { + /// Write to standard output (stdout). + Out, + /// Write to standard error (stderr). + Err, +} + +/// Configuration for standard stream logging. +pub struct StdConfig { + /// Target stream (stdout or stderr). + pub target: StdTarget, +} + +/// Logger with layer-based architecture. +struct Logger { + /// Handle for modifying the log layers at runtime. + /// Complex type definition causes issues with cbindgen, so we suppress clippy's type + /// complexity warning. + #[allow(clippy::type_complexity)] + layer_handle: Handle< + Vec, Registry>> + Send + Sync>>, + Layered, Registry>, + >, + /// Handle for modifying the log filter at runtime. + filter_handle: Handle, + /// Guard is for local subscriber which is not used in the global logger. + #[allow(dead_code)] + _guard: Option, + /// File configuration. + file_config: Option, + /// Standard stream configuration. + std_config: Option, +} + +impl Logger { + #[cfg(test)] + fn setup() -> Result { + Self::setup_with_global(false) + } + + fn setup_global() -> Result { + Self::setup_with_global(true) + } + + fn setup_with_global(global: bool) -> Result { + let layers = vec![]; + let env_filter = env_filter(); + let (filter_layer, filter_handle) = reload::Layer::new(env_filter); + let (layers_layer, layer_handle) = reload::Layer::new(layers); + + let subscriber = tracing_subscriber::registry() + .with(filter_layer) + .with(layers_layer); + + if global { + match tracing::subscriber::set_global_default(subscriber) { + Ok(_) => Ok(Self { + layer_handle, + filter_handle, + _guard: None, + file_config: None, + std_config: None, + }), + Err(_e) => Err(Error::from("Failed to set global default subscriber")), + } + } else { + Ok(Self { + layer_handle, + filter_handle, + _guard: Some(tracing::subscriber::set_default(subscriber)), + file_config: None, + std_config: None, + }) + } + } + + fn configure(&self) -> Result<(), Error> { + self.layer_handle + .modify(|layers| { + // Clear existing layers first + // since we can't selectively replace them because of the dynamic nature of the + // layers. This is necessary to avoid accumulating layers on each + // configuration call. + layers.clear(); + + // Add file layer if configured + if let Some(file_config) = &self.file_config { + if let Ok(file_layer) = file_layer(&file_config.path) { + layers.push(file_layer); + } + } + + if let Some(std_config) = &self.std_config { + if let Ok(std_layer) = std_layer(std_config) { + layers.push(std_layer); + } + } + }) + .map_err(|e| Error::from(format!("Failed to update logger configuration: {}", e)))?; + + Ok(()) + } + + fn disable_file(&mut self) -> Result<(), Error> { + self.file_config = None; + self.configure() + } + + fn configure_file(&mut self, file_config: FileConfig) -> Result<(), Error> { + self.file_config = Some(file_config); + self.configure() + } + + fn disable_std(&mut self) -> Result<(), Error> { + self.std_config = None; + self.configure() + } + + fn configure_std(&mut self, std_config: StdConfig) -> Result<(), Error> { + self.std_config = Some(std_config); + self.configure() + } + + /// Set the log level for the logger. + fn set_log_level(&self, log_level: LogEventLevel) -> Result<(), Error> { + let level_filter = LevelFilter::from(log_level); + let new_filter = EnvFilter::try_from_default_env() + .unwrap_or_else(|_| EnvFilter::new(level_filter.to_string().to_lowercase())); + + self.filter_handle + .modify(|filter| { + *filter = new_filter; + }) + .map_err(|e| Error::from(format!("Failed to update log level: {}", e)))?; + + Ok(()) + } +} + +/// Create environment filter with default to INFO level. +fn env_filter() -> EnvFilter { + EnvFilter::try_from_default_env() + .unwrap_or_else(|_| EnvFilter::new(LevelFilter::INFO.to_string().to_lowercase())) +} + +/// Create standard output layer. +#[allow(clippy::type_complexity)] +fn std_layer( + config: &StdConfig, +) -> Result< + Box, Registry>> + Send + Sync + 'static>, + Error, +> { + let writer = StdWriter::new(config.target); + + Ok(fmt::layer() + .with_writer(writer) + .with_thread_ids(true) + .with_thread_names(true) + .with_target(true) + .with_file(true) + .with_line_number(true) + .with_ansi(false) + .boxed()) +} + +#[allow(clippy::type_complexity)] +fn file_layer( + path: &str, +) -> Result< + Box, Registry>> + Send + Sync + 'static>, + Error, +> { + let file_path = Path::new(path); + let writer = FileWriter::new(file_path) + .map_err(|e| Error::from(format!("Failed to create file writer: {}", e)))?; + + Ok(fmt::layer() + .with_writer(writer) + .with_thread_ids(true) + .with_thread_names(true) + .with_target(true) + .with_file(true) + .with_line_number(true) + .with_ansi(false) + .json() + .boxed()) +} + +impl From for LevelFilter { + fn from(level: LogEventLevel) -> Self { + match level { + LogEventLevel::Trace => LevelFilter::TRACE, + LogEventLevel::Debug => LevelFilter::DEBUG, + LogEventLevel::Info => LevelFilter::INFO, + LogEventLevel::Warn => LevelFilter::WARN, + LogEventLevel::Error => LevelFilter::ERROR, + } + } +} + +static LOGGER: OnceLock>> = OnceLock::new(); + +/// Configures the global logger to write to a file in JSON format. +/// +/// # Arguments +/// * `file_config` - Configuration specifying the file path +pub fn logger_configure_file(file_config: FileConfig) -> Result<(), Error> { + let logger_mutex = LOGGER.get_or_init(|| Mutex::new(None)); + let mut logger_guard = logger_mutex + .lock() + .map_err(|e| Error::from(format!("Failed to acquire logger lock: {}", e)))?; + + if let Some(logger) = logger_guard.as_mut() { + logger.configure_file(file_config) + } else { + let mut logger = Logger::setup_global()?; + logger.configure_file(file_config)?; + *logger_guard = Some(logger); + Ok(()) + } +} + +/// Disables file logging for the global logger. +/// +/// Removes file logging configuration while keeping other outputs (like std streams) active. +pub fn logger_disable_file() -> Result<(), Error> { + let logger_mutex = LOGGER.get_or_init(|| Mutex::new(None)); + let mut logger_guard = logger_mutex + .lock() + .map_err(|e| Error::from(format!("Failed to acquire logger lock: {}", e)))?; + + if let Some(logger) = logger_guard.as_mut() { + logger.disable_file() + } else { + Err(Error::from("Logger not initialized")) + } +} + +/// Configures the global logger to write to stdout or stderr in compact format. +/// +/// # Arguments +/// * `std_config` - Configuration specifying stdout or stderr +pub fn logger_configure_std(std_config: StdConfig) -> Result<(), Error> { + let logger_mutex = LOGGER.get_or_init(|| Mutex::new(None)); + let mut logger_guard = logger_mutex + .lock() + .map_err(|e| Error::from(format!("Failed to acquire logger lock: {}", e)))?; + + if let Some(logger) = logger_guard.as_mut() { + logger.configure_std(std_config) + } else { + let mut logger = Logger::setup_global()?; + logger.configure_std(std_config)?; + *logger_guard = Some(logger); + Ok(()) + } +} + +/// Disables standard stream logging for the global logger. +/// +/// Removes std stream logging configuration while keeping other outputs (like file) active. +pub fn logger_disable_std() -> Result<(), Error> { + let logger_mutex = LOGGER.get_or_init(|| Mutex::new(None)); + let mut logger_guard = logger_mutex + .lock() + .map_err(|e| Error::from(format!("Failed to acquire logger lock: {}", e)))?; + + if let Some(logger) = logger_guard.as_mut() { + logger.disable_std() + } else { + Err(Error::from("Logger not initialized")) + } +} + +/// Sets the minimum log level for the global logger. +/// +/// # Arguments +/// * `log_level` - Minimum level (Trace, Debug, Info, Warn, Error) +pub fn logger_set_log_level(log_level: LogEventLevel) -> Result<(), Error> { + let logger_mutex = LOGGER.get_or_init(|| Mutex::new(None)); + let logger_guard = logger_mutex + .lock() + .map_err(|e| Error::from(format!("Failed to acquire logger lock: {}", e)))?; + + if let Some(logger) = logger_guard.as_ref() { + logger.set_log_level(log_level) + } else { + Err(Error::from("Logger not initialized")) + } +} + +#[cfg(test)] +mod tests { + use std::sync::{Arc, Mutex}; + use tempfile::TempDir; + use tracing::field::{Field, Visit}; + use tracing::subscriber::Interest; + use tracing::{debug, error, info, trace, warn, Event, Metadata, Subscriber}; + use tracing_subscriber::layer::{Context, Layer}; + + use super::*; + + #[derive(Default)] + struct MessageVisitor { + message: Option, + all_fields: std::collections::HashMap, + } + + impl Visit for MessageVisitor { + fn record_i64(&mut self, field: &Field, value: i64) { + let field_name = field.name(); + let field_value = value.to_string(); + self.all_fields + .insert(field_name.to_string(), field_value.clone()); + + if field_name == "message" { + self.message = Some(field_value); + } + } + + fn record_u64(&mut self, field: &Field, value: u64) { + let field_name = field.name(); + let field_value = value.to_string(); + self.all_fields + .insert(field_name.to_string(), field_value.clone()); + + if field_name == "message" { + self.message = Some(field_value); + } + } + + fn record_bool(&mut self, field: &Field, value: bool) { + let field_name = field.name(); + let field_value = value.to_string(); + self.all_fields + .insert(field_name.to_string(), field_value.clone()); + + if field_name == "message" { + self.message = Some(field_value); + } + } + + fn record_str(&mut self, field: &Field, value: &str) { + let field_name = field.name(); + self.all_fields + .insert(field_name.to_string(), value.to_string()); + + if field_name == "message" { + self.message = Some(value.to_string()); + } + } + + fn record_debug(&mut self, field: &Field, value: &dyn std::fmt::Debug) { + let field_name = field.name(); + let field_value = format!("{:?}", value); + self.all_fields + .insert(field_name.to_string(), field_value.clone()); + + if field_name == "message" { + self.message = Some(field_value); + } + } + } + + #[derive(Default)] + struct RecordingLayer { + events: Arc>>, + _subscriber: std::marker::PhantomData, + } + + impl RecordingLayer { + fn new(events: Arc>>) -> Self { + RecordingLayer { + events, + _subscriber: std::marker::PhantomData, + } + } + } + + impl Layer for RecordingLayer + where + S: Subscriber + for<'a> tracing_subscriber::registry::LookupSpan<'a>, + { + fn register_callsite(&self, _metadata: &'static Metadata<'static>) -> Interest { + Interest::always() + } + + fn enabled(&self, _metadata: &Metadata<'_>, _ctx: Context<'_, S>) -> bool { + true + } + + fn on_event(&self, event: &Event<'_>, _ctx: Context<'_, S>) { + let mut visitor = MessageVisitor::default(); + event.record(&mut visitor); + + let mut events = self.events.lock().unwrap(); + let message = visitor.message.unwrap_or_else(|| { + // If no explicit message field, try to reconstruct from all fields + if !visitor.all_fields.is_empty() { + format!("Fields: {:?}", visitor.all_fields) + } else { + format!( + "Event: {} - {}", + event.metadata().target(), + event.metadata().name() + ) + } + }); + events.push(message); + } + } + #[test] + #[cfg_attr(miri, ignore)] + fn test_logger_setup() { + let logger = Logger::setup(); + assert!(logger.is_ok(), "Logger setup should succeed"); + } + + #[test] + #[cfg_attr(miri, ignore)] + fn test_logger_with_std() { + let events: Arc>> = Default::default(); + let mut logger = Logger::setup().expect("Should setup logger successfully"); + + let std_config = StdConfig { + target: StdTarget::Out, + }; + + logger + .configure_std(std_config) + .expect("Should configure std output"); + + // Add recording layer after configuration + logger + .layer_handle + .modify(|layers| { + layers.push(Box::new(RecordingLayer::new(Arc::clone(&events)))); + }) + .expect("Should be able to add recording layer"); + + logger + .set_log_level(LogEventLevel::Info) + .expect("Should set log level to Info"); + + info!(message = "Std output test message"); + + let captured_events = events.lock().unwrap(); + assert_eq!( + captured_events.len(), + 1, + "Should capture message with std output" + ); + assert_eq!(captured_events[0], "Std output test message"); + + drop(logger); + } + + #[test] + #[cfg_attr(miri, ignore)] + fn test_logger_with_file() { + let events: Arc>> = Default::default(); + let mut logger = Logger::setup().expect("Should setup logger successfully"); + + let temp_dir = TempDir::new().expect("Should create temp directory"); + let log_path = temp_dir.path().join("test.log"); + + let file_config = FileConfig { + path: log_path.to_string_lossy().to_string(), + }; + + logger + .configure_file(file_config) + .expect("Should configure file output"); + + // Add recording layer after configuration + logger + .layer_handle + .modify(|layers| { + layers.push(Box::new(RecordingLayer::new(Arc::clone(&events)))); + }) + .expect("Should be able to add recording layer"); + + logger + .set_log_level(LogEventLevel::Info) + .expect("Should set log level to Info"); + + info!(message = "File output test message"); + + let captured_events = events.lock().unwrap(); + assert_eq!( + captured_events.len(), + 1, + "Should capture message with file output" + ); + assert_eq!(captured_events[0], "File output test message"); + drop(captured_events); + + assert!( + log_path.exists(), + "Log file should be created at {:?}", + log_path + ); + + // add delay to ensure file is written + std::thread::sleep(std::time::Duration::from_millis(100)); + + if let Ok(content) = std::fs::read_to_string(&log_path) { + assert!( + !content.is_empty(), + "Log file should contain some log output" + ); + } + + drop(logger); + } + + #[test] + #[cfg_attr(miri, ignore)] + fn test_logger_with_std_and_file() { + let events: Arc>> = Default::default(); + let mut logger = Logger::setup().expect("Should setup logger successfully"); + + // Configure std output + let std_config = StdConfig { + target: StdTarget::Err, + }; + logger + .configure_std(std_config) + .expect("Should configure std output"); + + let temp_dir = TempDir::new().expect("Should create temp directory"); + let log_path = temp_dir.path().join("test.log"); + let file_config = FileConfig { + path: log_path.to_string_lossy().to_string(), + }; + logger + .configure_file(file_config) + .expect("Should configure file output"); + + // Add recording layer after configuration + logger + .layer_handle + .modify(|layers| { + layers.push(Box::new(RecordingLayer::new(Arc::clone(&events)))); + }) + .expect("Should be able to add recording layer"); + + logger + .set_log_level(LogEventLevel::Info) + .expect("Should set log level to Info"); + + warn!(message = "Std and file output test message"); + + let captured_events = events.lock().unwrap(); + assert_eq!( + captured_events.len(), + 1, + "Should capture message with std and file output" + ); + assert_eq!(captured_events[0], "Std and file output test message"); + drop(captured_events); + + // Verify that the log file was created + assert!( + log_path.exists(), + "Log file should be created at {:?}", + log_path + ); + + drop(logger); + } + + #[test] + #[cfg_attr(miri, ignore)] + fn test_logger_level_change() { + let events: Arc>> = Default::default(); + let logger = Logger::setup().expect("Should setup logger successfully"); + + // Add recording layer + logger + .layer_handle + .modify(|layers| { + layers.push(Box::new(RecordingLayer::new(Arc::clone(&events)))); + }) + .expect("Should be able to add recording layer"); + + // Test TRACE level (captures everything) + logger + .set_log_level(LogEventLevel::Trace) + .expect("Should set log level to Trace"); + + trace!(message = "Trace message"); + debug!(message = "Debug message"); + info!(message = "Info message"); + warn!(message = "Warn message"); + error!(message = "Error message"); + + { + let captured_events = events.lock().unwrap(); + assert_eq!( + captured_events.len(), + 5, + "Should capture all 5 messages at TRACE level" + ); + } + + // Clear and test WARN level (only WARN and ERROR) + events.lock().unwrap().clear(); + logger + .set_log_level(LogEventLevel::Warn) + .expect("Should set log level to Warn"); + + trace!(message = "Trace filtered"); + debug!(message = "Debug filtered"); + info!(message = "Info filtered"); + warn!(message = "Warn message"); + error!(message = "Error message"); + + { + let captured_events = events.lock().unwrap(); + assert_eq!( + captured_events.len(), + 2, + "Should capture only WARN and ERROR messages" + ); + assert_eq!(captured_events[0], "Warn message"); + assert_eq!(captured_events[1], "Error message"); + } + + // Clear and test ERROR level (only ERROR) + events.lock().unwrap().clear(); + logger + .set_log_level(LogEventLevel::Error) + .expect("Should set log level to Error"); + + trace!(message = "Trace filtered"); + debug!(message = "Debug filtered"); + info!(message = "Info filtered"); + warn!(message = "Warn filtered"); + error!(message = "Error message"); + + { + let captured_events = events.lock().unwrap(); + assert_eq!( + captured_events.len(), + 1, + "Should capture only ERROR message" + ); + assert_eq!(captured_events[0], "Error message"); + } + + drop(logger); + } +} diff --git a/datadog-log/src/writers.rs b/datadog-log/src/writers.rs new file mode 100644 index 000000000..07f1bf1c4 --- /dev/null +++ b/datadog-log/src/writers.rs @@ -0,0 +1,111 @@ +// Copyright 2025-Present Datadog, Inc. https://www.datadoghq.com/ +// SPDX-License-Identifier: Apache-2.0 + +use crate::logger::StdTarget; +use std::io::Write; +use std::path::Path; +use std::{fs, io}; +use tracing_appender::non_blocking::{NonBlocking, WorkerGuard}; +use tracing_subscriber::fmt::MakeWriter; + +/// A non-blocking writer that writes log output to a file. +/// +/// Uses a background thread to handle writes asynchronously, which improves +/// performance by not blocking the logging thread. The background thread is +/// managed by the internal `WorkerGuard`. +pub struct FileWriter { + non_blocking: NonBlocking, + /// The WorkerGuard is crucial for the non-blocking writer's functionality. + /// + /// The guard represents ownership of the background worker thread that processes + /// writes asynchronously. When the guard is dropped, it ensures: + /// 1. All pending writes are flushed + /// 2. The worker thread is properly shut down + /// 3. No writes are lost + /// + /// If we don't keep the guard alive for the entire lifetime of the writer: + /// - The worker thread might be shut down prematurely + /// - Pending writes could be lost + /// - The non-blocking writer would stop functioning + /// + /// That's why we store it in the struct and name it with a leading underscore + /// to indicate it's intentionally unused but must be kept alive. + _guard: WorkerGuard, +} + +impl FileWriter { + /// Creates a new file writer that writes to the specified path. + /// + /// If the parent directory doesn't exist, it will be created. + /// The file will be opened in append mode. + pub fn new(path: &Path) -> io::Result { + if let Some(parent) = path.parent() { + fs::create_dir_all(parent)?; + } + + let file_appender = tracing_appender::rolling::never( + path.parent().unwrap_or_else(|| Path::new(".")), + path.file_name().unwrap_or_default(), + ); + let (non_blocking, guard) = tracing_appender::non_blocking(file_appender); + + Ok(Self { + non_blocking, + _guard: guard, + }) + } +} + +impl Write for FileWriter { + fn write(&mut self, buf: &[u8]) -> io::Result { + self.non_blocking.write(buf) + } + + fn flush(&mut self) -> io::Result<()> { + self.non_blocking.flush() + } +} + +impl<'a> MakeWriter<'a> for FileWriter { + type Writer = NonBlocking; + + fn make_writer(&'a self) -> Self::Writer { + self.non_blocking.clone() + } +} + +/// A writer that writes log output to standard output or standard error. +pub struct StdWriter { + target: StdTarget, +} + +impl StdWriter { + /// Creates a new writer that writes to the specified standard stream. + pub fn new(target: StdTarget) -> Self { + Self { target } + } +} + +impl Write for StdWriter { + fn write(&mut self, buf: &[u8]) -> io::Result { + match self.target { + StdTarget::Out => io::stdout().write(buf), + StdTarget::Err => io::stderr().write(buf), + } + } + + fn flush(&mut self) -> io::Result<()> { + match self.target { + StdTarget::Out => io::stdout().flush(), + StdTarget::Err => io::stderr().flush(), + } + } +} + +impl<'a> MakeWriter<'a> for StdWriter { + type Writer = StdWriter; + + fn make_writer(&'a self) -> Self::Writer { + StdWriter::new(self.target) + } +} diff --git a/datadog-profiling-ffi/Cargo.toml b/datadog-profiling-ffi/Cargo.toml index dc665df1a..275959d42 100644 --- a/datadog-profiling-ffi/Cargo.toml +++ b/datadog-profiling-ffi/Cargo.toml @@ -18,6 +18,7 @@ bench = false default = ["ddcommon-ffi"] cbindgen = ["build_common/cbindgen", "ddcommon-ffi/cbindgen"] ddtelemetry-ffi = ["dep:ddtelemetry-ffi"] +datadog-log-ffi = ["dep:datadog-log-ffi"] symbolizer = ["symbolizer-ffi"] data-pipeline-ffi = ["dep:data-pipeline-ffi"] crashtracker-ffi = ["dep:datadog-crashtracker-ffi"] @@ -41,6 +42,7 @@ datadog-profiling = { path = "../datadog-profiling" } ddcommon = { path = "../ddcommon" } ddcommon-ffi = { path = "../ddcommon-ffi", default-features = false, optional = true } ddtelemetry-ffi = { path = "../ddtelemetry-ffi", default-features = false, optional = true, features = ["expanded_builder_macros"] } +datadog-log-ffi = { path = "../datadog-log-ffi", default-features = false, optional = true } function_name = "0.3.0" futures = { version = "0.3", default-features = false } http-body-util = "0.1" diff --git a/datadog-profiling-ffi/src/lib.rs b/datadog-profiling-ffi/src/lib.rs index 17b9a70c5..b070cce1a 100644 --- a/datadog-profiling-ffi/src/lib.rs +++ b/datadog-profiling-ffi/src/lib.rs @@ -30,6 +30,10 @@ pub use data_pipeline_ffi::*; #[cfg(feature = "datadog-library-config-ffi")] pub use datadog_library_config_ffi::*; +// re-export log ffi +#[cfg(feature = "datadog-log-ffi")] +pub use datadog_log_ffi::*; + // re-export tracer metadata functions #[cfg(feature = "ddcommon-ffi")] pub use ddcommon_ffi::*; diff --git a/datadog-trace-utils/Cargo.toml b/datadog-trace-utils/Cargo.toml index 64fa6acb1..bbb97bd70 100644 --- a/datadog-trace-utils/Cargo.toml +++ b/datadog-trace-utils/Cargo.toml @@ -22,7 +22,7 @@ http-body-util = "0.1" serde = { version = "1.0.145", features = ["derive"] } prost = "0.13.5" rmp-serde = "1.1.1" -log = "0.4" +tracing = { version = "0.1", default-features = false } serde_json = "1.0" futures = { version = "0.3", default-features = false } ddcommon = { path = "../ddcommon" } diff --git a/datadog-trace-utils/src/stats_utils.rs b/datadog-trace-utils/src/stats_utils.rs index 3f1756827..0e2d22311 100644 --- a/datadog-trace-utils/src/stats_utils.rs +++ b/datadog-trace-utils/src/stats_utils.rs @@ -6,14 +6,13 @@ pub use mini_agent::*; #[cfg(feature = "mini_agent")] mod mini_agent { + use datadog_trace_protobuf::pb; use ddcommon::hyper_migration; + use ddcommon::Endpoint; use http_body_util::BodyExt; use hyper::{body::Buf, Method, Request, StatusCode}; - use log::debug; use std::io::Write; - - use datadog_trace_protobuf::pb; - use ddcommon::Endpoint; + use tracing::debug; pub async fn get_stats_from_request_body( body: hyper_migration::Body, diff --git a/datadog-trace-utils/src/trace_utils.rs b/datadog-trace-utils/src/trace_utils.rs index c77014cab..ba75f63ef 100644 --- a/datadog-trace-utils/src/trace_utils.rs +++ b/datadog-trace-utils/src/trace_utils.rs @@ -15,13 +15,13 @@ use datadog_trace_protobuf::pb; use ddcommon::{azure_app_services, hyper_migration}; use http_body_util::BodyExt; use hyper::body::Buf; -use log::error; use rmp::decode::read_array_len; use rmpv::decode::read_value; use rmpv::{Integer, Value}; use std::cmp::Ordering; use std::collections::{HashMap, HashSet}; use std::env; +use tracing::error; /// The maximum payload size for a single request that can be sent to the trace agent. Payloads /// larger than this size will be dropped and the agent will return a 413 error if @@ -365,8 +365,8 @@ pub fn get_root_span_index(trace: &[pb::Span]) -> anyhow::Result { if !span_ids.contains(&span.parent_id) { if root_span_id.is_some() { error!( - "trace has multiple root spans trace_id: {}", - &trace[0].trace_id + trace_id = &trace[0].trace_id, + "trace has multiple root spans" ); } root_span_id = Some(i); @@ -376,8 +376,8 @@ pub fn get_root_span_index(trace: &[pb::Span]) -> anyhow::Result { Some(i) => i, None => { error!( - "Could not find the root span for trace with trace_id: {}", - &trace[0].trace_id, + trace_id = &trace[0].trace_id, + "Could not find the root span for trace" ); trace.len() - 1 } diff --git a/ddcommon/Cargo.toml b/ddcommon/Cargo.toml index 3ed7983f0..33c388962 100644 --- a/ddcommon/Cargo.toml +++ b/ddcommon/Cargo.toml @@ -29,7 +29,6 @@ http-body = "1.0" http-body-util = "0.1" tower-service = "0.3" cc = "1.1.31" -log = { version = "0.4" } pin-project = "1" regex = "1.5" rustls = { version = "0.23", default-features = false, optional = true } diff --git a/docs/RFCs/0010-logging-in-libdatadog.md b/docs/RFCs/0010-logging-in-libdatadog.md new file mode 100644 index 000000000..9093a2812 --- /dev/null +++ b/docs/RFCs/0010-logging-in-libdatadog.md @@ -0,0 +1,169 @@ +# RFC: logging in libdatadog + +## Overview + +**Document Purpose:** + +This document outlines the design for adding logging sink capabilities to libdatadog. Currently, logs generated within libdatadog are not visible to the calling systems, making it difficult for Datadog to diagnose issues in customer environments. This document proposes implementing logging directly within libdatadog, with support for multiple output destinations including stdout and file-based output. These logs are intended solely for Datadog's internal troubleshooting purposes and are not meant to be used by customers for their application logging needs. + +**Background** + +The libdatadog `TraceExporter` is responsible for exporting traces to the Datadog agent. While processing and exporting traces, it generates important diagnostic logs which are currently not visible in production environments. This makes the `TraceExporter` integration backward incompatible with existing Trace SDKs. + +Some Trace SDKs, like .NET, only allow file-based logging, while others like Python follow a bring-your-own-logging (BYOL) approach where the SDK does not provide any logging capabilities and relies on the application to handle logging. These logs are typically separated from the application logs to avoid confusion for the end users and to ensure that the logs are only used for Datadog's internal troubleshooting purposes. + +This RFC proposes two common logging sinks for libdatadog while keeping the APIs flexible enough to allow future extensions. This design builds upon libdatadog's existing architecture, leveraging established error handling patterns and type definitions. It uses existing primitives like `CharSlice` for string handling and `Error` for error handling. + +## Goals + +* **Primary Goals:** + * Support multiple output destinations: + * No output (Noop) for when logging is not needed + * Standard output (Stdout) for console logging + * File-based output for persistent logging + * Provide configurable log levels at runtime +* **Non-Goals:** + * Automatic log collection (i.e., telemetry) + +## Technical Design Summary + +The logging system provides a simple and flexible public interface for configuring logging behavior in libdatadog. The interface consists of: + +* Five primary configuration functions: + * `ddog_logger_configure_std` - For setting up console logging to stdout or stderr + * `ddog_logger_disable_std` - For disabling standard stream logging + * `ddog_logger_configure_file` - For setting up file-based logging + * `ddog_logger_disable_file` - For disabling file logging + * `ddog_logger_set_log_level` - For updating the minimum log level at runtime + * These methods must be implemented in a thread-safe manner +* Supported output destinations: + * Std - For console output (stdout or stderr) + * File - For writing to a specified file +* Configuration structures that provide: + * Standard stream target selection (stdout or stderr) + * Output destination selection + * File path configuration when using file output + * Separate log level configuration through `ddog_logger_set_log_level` + +The public API is designed to be simple to use while providing the necessary flexibility for different logging needs. + +## Detailed Design + +The integration exposes five primary functions through FFI for configuring logging behavior. Separate configure and disable methods for each logging destination allow independent control over each logging destination. This allows us to support features like tracer flare in cases where a customer has std logging configured, the tracer can enable file logging without affecting the customer setup, we can collect the logs and disable the file logging after without affecting the original configuration. + +`ddog_logger_set_log_level` allows updating the log level at runtime without affecting the existing logging configuration, which is useful for runtime reconfiguration. The log level applies globally to all configured logging destinations. + +### Public APIs + +```rust +/// Sets the global log level. +/// +/// # Arguments +/// * `log_level` - The minimum level for events to be logged +/// +/// # Errors +/// Returns an error if the log level cannot be set. +#[no_mangle] +pub extern "C" fn ddog_logger_set_log_level( + log_level: LogEventLevel, +) -> Option>; + +/// Target for standard stream output. +#[repr(C)] +#[derive(Debug, Clone, Copy)] +pub enum StdTarget { + /// Write to standard output (stdout). + Out, + /// Write to standard error (stderr). + Err, +} + +/// Configuration for standard stream output. +#[repr(C)] +pub struct StdConfig { + /// Target stream (stdout or stderr) + pub target: StdTarget, +} + +/// Configures the logger to write to stdout or stderr with the specified configuration. +/// +/// # Arguments +/// * `config` - Configuration for standard stream logging including target +/// +/// # Errors +/// Returns an error if the logger cannot be configured. +#[no_mangle] +pub extern "C" fn ddog_logger_configure_std( + config: StdConfig, +) -> Option>; + +/// Configuration for file output. +#[repr(C)] +pub struct FileConfig<'a> { + /// Path to the log file + pub path: CharSlice<'a>, +} + +/// Disables standard stream logging. +/// +/// # Errors +/// Returns an error if the logger cannot be configured. +#[no_mangle] +pub extern "C" fn ddog_logger_disable_std() -> Option>; + +/// Configures the logger to write to a file with the specified configuration. +/// +/// # Arguments +/// * `config` - Configuration for file logging including path +/// +/// # Errors +/// Returns an error if the logger cannot be configured. +#[no_mangle] +pub extern "C" fn ddog_logger_configure_file( + config: FileConfig, +) -> Option>; + +/// Disables file logging. +/// +/// # Errors +/// Returns an error if the logger cannot be configured. +#[no_mangle] +pub extern "C" fn ddog_logger_disable_file() -> Option>; +``` + +### Example Usage + +Check the [example usage](../../examples/ffi/trace_exporter.c) in the `trace_exporter.c` file. + +### Performance and Scalability + +The logging implementation follows established patterns for logging in other APM libraries as outlined in the [tracer logging RFC](https://github.com/DataDog/architecture/blob/891eda680d70b9825fec58dc90553c5d4557058a/rfcs/apm/integrations/tracer-logging/rfc.md). + +It also uses structured logging to make it easier to parse and analyze logs. + +## Alternatives Considered + +### Callback-based API + +An alternative design considered exposing a callback-based API where users would provide their own logging function. This was rejected because: + +1. It would make the API more complex, requiring native code to call into the managed code for final logging +2. This is particularly complex for languages like Python where the Global Interpreter Lock (GIL) must be held to call into the managed code +3. Error handling across FFI boundaries would be more complicated +4. Performance overhead of crossing FFI boundaries for each log message + +### Environment Variable Configuration + +Another alternative considered was using environment variables for logger configuration. This was rejected because: + +1. It would make runtime reconfiguration more difficult +2. Environment variables are global state and could lead to conflicts +3. Some deployment environments restrict environment variable access +4. More difficult to validate configuration at runtime + +## Appendix 1 + +References: + +* [Rust log crate documentation](https://docs.rs/log/0.4.26/log/fn.set_logger.html) +* POC Implementation: [libdatadog PR #XXX](https://github.com/DataDog/libdatadog/compare/main...ganeshnj/poc/logging) \ No newline at end of file diff --git a/dogstatsd-client/src/lib.rs b/dogstatsd-client/src/lib.rs index 8a549394a..a4f3fadf3 100644 --- a/dogstatsd-client/src/lib.rs +++ b/dogstatsd-client/src/lib.rs @@ -98,7 +98,7 @@ impl Client { let client_opt = match self.get_or_init_client() { Ok(client) => client, Err(e) => { - error!("Failed to get client: {}", e); + error!(?e, "Failed to get client"); return; } }; @@ -122,7 +122,7 @@ impl Client { do_send(client.set_with_tags(metric.as_ref(), value), &tags) } } { - error!("Error while sending metric: {}", err); + error!(?err, "Error while sending metric"); } } } @@ -137,7 +137,7 @@ impl Client { let client_opt = match self.get_or_init_client() { Ok(client) => client, Err(e) => { - error!("Failed to get client: {}", e); + error!(?e, "Failed to get client"); return; } }; @@ -161,7 +161,7 @@ impl Client { do_send(client.set_with_tags(metric.as_ref(), value), tags) } } { - error!("Error while sending metric: {}", err); + error!(?err, "Error while sending metric"); } } } diff --git a/examples/ffi/README.md b/examples/ffi/README.md index 01e10e159..0acc71ff9 100644 --- a/examples/ffi/README.md +++ b/examples/ffi/README.md @@ -3,7 +3,7 @@ In order to be able to run FFI examples, you need to build the shared library and headers with the command: ```bash -cargo run --bin release --features profiling,telemetry,data-pipeline,symbolizer,crashtracker --release -- --out +cargo run --bin release --features profiling,telemetry,data-pipeline,symbolizer,crashtracker,library-config,log --release -- --out ``` You can then build the examples with: diff --git a/examples/ffi/trace_exporter.c b/examples/ffi/trace_exporter.c index 7c344fec8..12b696b94 100644 --- a/examples/ffi/trace_exporter.c +++ b/examples/ffi/trace_exporter.c @@ -4,8 +4,10 @@ #include #include #include +#include #include #include +#include enum { SUCCESS, @@ -17,8 +19,56 @@ void handle_error(ddog_TraceExporterError *err) { ddog_trace_exporter_error_free(err); } +void handle_log_error(ddog_Error *err) { + fprintf(stderr, "Operation failed with error: %s\n", (char *)err->message.ptr); + ddog_Error_drop(err); +} + +int log_init(const char* log_path) { + // Always configure console logging to stdout + struct ddog_StdConfig std_config = { + .target = DDOG_STD_TARGET_OUT + }; + struct ddog_Error *err = ddog_logger_configure_std(std_config); + if (err) { + handle_log_error(err); + return 1; + } + + // Additionally configure file logging if path is provided + if (log_path != NULL) { + struct ddog_FileConfig file_config = { + .path = (ddog_CharSlice){ + .ptr = log_path, + .len = strlen(log_path) + } + }; + err = ddog_logger_configure_file(file_config); + if (err) { + handle_log_error(err); + return 1; + } + } + + // Set the log level to TRACE for maximum verbosity + err = ddog_logger_set_log_level(DDOG_LOG_EVENT_LEVEL_TRACE); + if (err) { + handle_log_error(err); + return 1; + } + + return 0; +} + int main(int argc, char** argv) { + // Initialize logger with optional path from command line + const char* log_path = (argc > 1) ? argv[1] : NULL; + if (log_init(log_path) != 0) { + fprintf(stderr, "Failed to initialize logger\n"); + return 1; + } + int error; ddog_TraceExporter* trace_exporter; @@ -32,7 +82,6 @@ int main(int argc, char** argv) ddog_CharSlice version = DDOG_CHARSLICE_C("1.0"); ddog_CharSlice service = DDOG_CHARSLICE_C("test_app"); - ddog_TraceExporterError *ret; ddog_TraceExporterConfig *config; @@ -41,7 +90,6 @@ int main(int argc, char** argv) ddog_trace_exporter_config_set_tracer_version(config, tracer_version); ddog_trace_exporter_config_set_language(config, language); - ret = ddog_trace_exporter_new(&trace_exporter, config); assert(ret == NULL); @@ -63,6 +111,20 @@ int main(int argc, char** argv) ddog_trace_exporter_free(trace_exporter); ddog_trace_exporter_config_free(config); + // Disable file logging if it was enabled + if (log_path != NULL) { + struct ddog_Error *err = ddog_logger_disable_file(); + if (err) { + handle_log_error(err); + } + } + + // disable std logging as well + struct ddog_Error *err = ddog_logger_disable_std(); + if (err) { + handle_log_error(err); + } + return SUCCESS; error: diff --git a/tools/docker/Dockerfile.build b/tools/docker/Dockerfile.build index d3ece0b96..45c65c8cc 100644 --- a/tools/docker/Dockerfile.build +++ b/tools/docker/Dockerfile.build @@ -76,6 +76,8 @@ COPY "ddcommon/Cargo.toml" "ddcommon/" COPY "ddcommon-ffi/Cargo.toml" "ddcommon-ffi/" COPY "ddtelemetry/Cargo.toml" "ddtelemetry/" COPY "ddtelemetry-ffi/Cargo.toml" "ddtelemetry-ffi/" +COPY "datadog-log/Cargo.toml" "datadog-log/" +COPY "datadog-log-ffi/Cargo.toml" "datadog-log-ffi/" COPY "ddsketch/Cargo.toml" "ddsketch/" COPY "dogstatsd-client/Cargo.toml" "dogstatsd-client/" COPY "datadog-library-config-ffi/Cargo.toml" "datadog-library-config-ffi/"