diff --git a/Cargo.lock b/Cargo.lock index 9e081822c..5f3a62b30 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -604,8 +604,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "145052bdd345b87320e369255277e3fb5152762ad123a901ef5c262dd38fe8d2" dependencies = [ "iana-time-zone", + "js-sys", "num-traits", "serde", + "wasm-bindgen", "windows-link 0.2.1", ] @@ -1880,6 +1882,7 @@ dependencies = [ "arbitrary", "byte-unit", "bytes", + "chrono", "criterion", "opentelemetry-proto", "proptest", @@ -1890,6 +1893,7 @@ dependencies = [ "serde", "serde_json", "serde_tuple", + "tempfile", "thiserror 2.0.17", "time", "tokio", diff --git a/Dockerfile b/Dockerfile index 2856e36a1..aab6280aa 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,15 @@ +FROM docker.io/rust:1.90.0-bookworm AS chef +RUN cargo install cargo-chef +WORKDIR /app + +FROM chef AS planner +COPY . . +RUN cargo chef prepare --recipe-path recipe.json + # Update the rust version in-sync with the version in rust-toolchain.toml -FROM docker.io/rust:1.90.0-bookworm AS builder +FROM chef AS builder +COPY --from=planner /app/recipe.json recipe.json +RUN cargo chef cook --release --recipe-path recipe.json RUN apt-get update && apt-get install -y \ protobuf-compiler fuse3 libfuse3-dev \ @@ -10,9 +20,37 @@ COPY . /app RUN cargo build --release --locked --bin lading --features logrotate_fs FROM docker.io/debian:bookworm-20241202-slim -RUN apt-get update && apt-get install -y libfuse3-dev=3.14.0-4 fuse3=3.14.0-4 && rm -rf /var/lib/apt/lists/* +RUN apt-get update && apt-get install -y \ + libfuse3-dev=3.14.0-4 \ + fuse3=3.14.0-4 \ + tcpdump \ + procps \ + lsof \ + net-tools \ + iproute2 \ + curl \ + strace \ + && rm -rf /var/lib/apt/lists/* COPY --from=builder /app/target/release/lading /usr/bin/lading +# Create wrapper script that starts tcpdump before lading +RUN echo '#!/bin/bash\n\ + set -e\n\ + \n\ + echo "Starting tcpdump on port 11538..."\n\ + # Full-packet capture with large buffer to avoid drops/truncation\n\ + tcpdump -i any -nn -s0 -B 512000 -w /captures/grpc-traffic.pcap tcp port 11538 &\n\ + TCPDUMP_PID=$!\n\ + echo $TCPDUMP_PID > /tmp/tcpdump.pid\n\ + echo "tcpdump started with PID $TCPDUMP_PID, capturing to /captures/grpc-traffic.pcap"\n\ + \n\ + # Give tcpdump time to initialize\n\ + sleep 2\n\ + \n\ + # Start lading with all arguments\n\ + exec /usr/bin/lading "$@"\n\ + ' > /usr/bin/lading-wrapper && chmod +x /usr/bin/lading-wrapper + # smoke test RUN ["/usr/bin/lading", "--help"] -ENTRYPOINT ["/usr/bin/lading"] +ENTRYPOINT ["/usr/bin/lading-wrapper"] diff --git a/lading/src/generator/file_gen/traditional.rs b/lading/src/generator/file_gen/traditional.rs index 72ea23fd3..1d6157484 100644 --- a/lading/src/generator/file_gen/traditional.rs +++ b/lading/src/generator/file_gen/traditional.rs @@ -30,6 +30,7 @@ use tokio::{ fs, io::{AsyncWriteExt, BufWriter}, task::{JoinError, JoinSet}, + time::{Duration, Instant}, }; use tracing::{error, info}; @@ -120,6 +121,18 @@ pub struct Config { rotate: bool, /// The load throttle configuration pub throttle: Option, + /// Optional fixed interval between blocks. When set, the generator waits + /// this duration before emitting the next block, regardless of byte size. + pub block_interval_millis: Option, + /// Flush after each block. Useful when block intervals are large and the + /// buffered writer would otherwise delay writes to disk. + #[serde(default)] + pub flush_each_block: bool, + /// Optional starting line offset into the block cache. This advances + /// through blocks until the cumulative line count reaches this value, + /// then begins emitting from that block. If data point counts are not + /// available for a payload, this setting is effectively ignored. + pub start_line_index: Option, } #[derive(Debug)] @@ -195,6 +208,9 @@ impl Server { file_index: Arc::clone(&file_index), rotate: config.rotate, shutdown: shutdown.clone(), + block_interval: config.block_interval_millis.map(Duration::from_millis), + flush_each_block: config.flush_each_block, + start_line_index: config.start_line_index.unwrap_or(0), }; handles.spawn(child.spin()); @@ -269,6 +285,9 @@ struct Child { rotate: bool, file_index: Arc, shutdown: lading_signal::Watcher, + block_interval: Option, + flush_each_block: bool, + start_line_index: u64, } impl Child { @@ -300,9 +319,29 @@ impl Child { ); let mut handle = self.block_cache.handle(); - + if self.start_line_index > 0 { + let mut remaining = self.start_line_index; + // Walk blocks until we reach or surpass the requested line offset. + // If metadata is missing, assume at least one data point to ensure progress. + loop { + let md = self.block_cache.peek_next_metadata(&handle); + let mut lines = md.data_points.unwrap_or(1); + if lines == 0 { + lines = 1; + } + if lines >= remaining { + break; + } + remaining = remaining.saturating_sub(lines); + let _ = self.block_cache.advance(&mut handle); + } + } let shutdown_wait = self.shutdown.recv(); tokio::pin!(shutdown_wait); + let mut next_tick = self + .block_interval + .as_ref() + .map(|dur| Instant::now() + *dur); loop { let total_bytes = self.block_cache.peek_next_size(&handle); @@ -310,6 +349,22 @@ impl Child { result = self.throttle.wait_for(total_bytes) => { match result { Ok(()) => { + if let Some(dur) = self.block_interval { + if let Some(deadline) = next_tick { + tokio::select! { + _ = tokio::time::sleep_until(deadline) => {}, + () = &mut shutdown_wait => { + fp.flush().await?; + info!("shutdown signal received"); + return Ok(()); + }, + } + next_tick = Some(deadline + dur); + } else { + next_tick = Some(Instant::now() + dur); + } + } + let block = self.block_cache.advance(&mut handle); let total_bytes = u64::from(total_bytes.get()); @@ -318,6 +373,9 @@ impl Child { counter!("bytes_written").increment(total_bytes); total_bytes_written += total_bytes; } + if self.flush_each_block { + fp.flush().await?; + } if total_bytes_written > maximum_bytes_per_file { fp.flush().await?; diff --git a/lading_payload/Cargo.toml b/lading_payload/Cargo.toml index f1cf08679..1d40d186d 100644 --- a/lading_payload/Cargo.toml +++ b/lading_payload/Cargo.toml @@ -28,6 +28,7 @@ rand = { workspace = true, features = [ "std", "std_rng", ] } +chrono = { version = "0.4", default-features = true, features = ["std"] } rmp-serde = { version = "1.1", default-features = false } serde = { workspace = true } serde_json = { workspace = true } @@ -42,6 +43,7 @@ arbitrary = { version = "1", optional = true, features = ["derive"] } proptest = { workspace = true } proptest-derive = { workspace = true } criterion = { version = "0.7", features = ["html_reports"] } +tempfile = { workspace = true } [features] default = [] diff --git a/lading_payload/src/block.rs b/lading_payload/src/block.rs index c52c5a833..255d30859 100644 --- a/lading_payload/src/block.rs +++ b/lading_payload/src/block.rs @@ -23,6 +23,12 @@ pub enum SpinError { /// Static payload creation error #[error(transparent)] Static(#[from] crate::statik::Error), + /// Static line-rate payload creation error + #[error(transparent)] + StaticLinesPerSecond(#[from] crate::statik_line_rate::Error), + /// Static second-grouped payload creation error + #[error(transparent)] + StaticSecond(#[from] crate::statik_second::Error), /// rng slice is Empty #[error("RNG slice is empty")] EmptyRng, @@ -55,6 +61,12 @@ pub enum Error { /// Static payload creation error #[error(transparent)] Static(#[from] crate::statik::Error), + /// Static line-rate payload creation error + #[error(transparent)] + StaticLinesPerSecond(#[from] crate::statik_line_rate::Error), + /// Static second-grouped payload creation error + #[error(transparent)] + StaticSecond(#[from] crate::statik_second::Error), /// Error for crate deserialization #[error("Deserialization error: {0}")] Deserialize(#[from] crate::Error), @@ -337,6 +349,42 @@ impl Cache { total_bytes.get(), )? } + crate::Config::StaticLinesPerSecond { + static_path, + lines_per_second, + } => { + let span = span!(Level::INFO, "fixed", payload = "static-lines-per-second"); + let _guard = span.enter(); + let mut serializer = + crate::StaticLinesPerSecond::new(static_path, *lines_per_second)?; + construct_block_cache_inner( + &mut rng, + &mut serializer, + maximum_block_bytes, + total_bytes.get(), + )? + } + crate::Config::StaticSecond { + static_path, + timestamp_format, + emit_placeholder, + start_line_index, + } => { + let span = span!(Level::INFO, "fixed", payload = "static-second"); + let _guard = span.enter(); + let mut serializer = crate::StaticSecond::new( + static_path, + ×tamp_format, + *emit_placeholder, + start_line_index.unwrap_or(0), + )?; + construct_block_cache_inner( + &mut rng, + &mut serializer, + maximum_block_bytes, + total_bytes.get(), + )? + } crate::Config::OpentelemetryTraces => { let mut pyld = crate::OpentelemetryTraces::new(&mut rng); let span = span!(Level::INFO, "fixed", payload = "otel-traces"); @@ -409,6 +457,14 @@ impl Cache { } } + /// Get the number of blocks in the cache. + #[must_use] + pub fn len(&self) -> usize { + match self { + Self::Fixed { blocks, .. } => blocks.len(), + } + } + /// Get metadata of the next block without advancing. #[must_use] pub fn peek_next_metadata(&self, handle: &Handle) -> BlockMetadata { diff --git a/lading_payload/src/lib.rs b/lading_payload/src/lib.rs index cd58ee1f3..1bf921ed4 100644 --- a/lading_payload/src/lib.rs +++ b/lading_payload/src/lib.rs @@ -27,6 +27,8 @@ pub use opentelemetry::metric::OpentelemetryMetrics; pub use opentelemetry::trace::OpentelemetryTraces; pub use splunk_hec::SplunkHec; pub use statik::Static; +pub use statik_line_rate::StaticLinesPerSecond; +pub use statik_second::StaticSecond; pub use syslog::Syslog5424; pub mod apache_common; @@ -40,6 +42,8 @@ pub mod opentelemetry; pub mod procfs; pub mod splunk_hec; pub mod statik; +pub mod statik_line_rate; +pub mod statik_second; pub mod syslog; pub mod trace_agent; @@ -129,6 +133,31 @@ pub enum Config { /// assumed to be line-oriented but no other claim is made on the file. static_path: PathBuf, }, + /// Generates static data but limits the number of lines emitted per block + StaticLinesPerSecond { + /// Defines the file path to read static variant data from. Content is + /// assumed to be line-oriented but no other claim is made on the file. + static_path: PathBuf, + /// Number of lines to emit in each generated block + lines_per_second: u32, + }, + /// Generates static data grouped by second; each block contains one + /// second's worth of logs as determined by a parsed timestamp prefix. + StaticSecond { + /// Defines the file path to read static variant data from. Content is + /// assumed to be line-oriented. + static_path: PathBuf, + /// Chrono-compatible timestamp format string used to parse the leading + /// timestamp in each line. + timestamp_format: String, + /// Emit a minimal placeholder block (single newline) for seconds with + /// no lines. When false, empty seconds are skipped. + #[serde(default)] + emit_placeholder: bool, + /// Optional starting line offset; lines before this index are skipped. + #[serde(default)] + start_line_index: Option, + }, /// Generates a line of printable ascii characters Ascii, /// Generates a json encoded line @@ -167,6 +196,10 @@ pub enum Payload { SplunkHec(splunk_hec::SplunkHec), /// Static file content Static(Static), + /// Static file content with a fixed number of lines emitted per block + StaticLinesPerSecond(StaticLinesPerSecond), + /// Static file content grouped into one-second blocks based on timestamps + StaticSecond(StaticSecond), /// Syslog RFC 5424 format Syslog(Syslog5424), /// OpenTelemetry traces @@ -195,6 +228,8 @@ impl Serialize for Payload { Payload::Json(ser) => ser.to_bytes(rng, max_bytes, writer), Payload::SplunkHec(ser) => ser.to_bytes(rng, max_bytes, writer), Payload::Static(ser) => ser.to_bytes(rng, max_bytes, writer), + Payload::StaticLinesPerSecond(ser) => ser.to_bytes(rng, max_bytes, writer), + Payload::StaticSecond(ser) => ser.to_bytes(rng, max_bytes, writer), Payload::Syslog(ser) => ser.to_bytes(rng, max_bytes, writer), Payload::OtelTraces(ser) => ser.to_bytes(rng, max_bytes, writer), Payload::OtelLogs(ser) => ser.to_bytes(rng, max_bytes, writer), @@ -207,6 +242,8 @@ impl Serialize for Payload { fn data_points_generated(&self) -> Option { match self { Payload::OtelMetrics(ser) => ser.data_points_generated(), + Payload::StaticLinesPerSecond(ser) => ser.data_points_generated(), + Payload::StaticSecond(ser) => ser.data_points_generated(), // Other implementations use the default None _ => None, } diff --git a/lading_payload/src/statik_line_rate.rs b/lading_payload/src/statik_line_rate.rs new file mode 100644 index 000000000..92ad4ed4e --- /dev/null +++ b/lading_payload/src/statik_line_rate.rs @@ -0,0 +1,178 @@ +//! Static file payload that replays a limited number of lines per block. + +use std::{ + fs::{self, OpenOptions}, + io::{BufRead, BufReader, Write}, + num::NonZeroU32, + path::Path, +}; + +use rand::{Rng, seq::IndexedMutRandom}; +use tracing::debug; + +#[derive(Debug)] +struct Source { + lines: Vec>, + next_idx: usize, +} + +#[derive(Debug)] +/// Static payload that emits a fixed number of lines each time it is asked to +/// serialize. +pub struct StaticLinesPerSecond { + sources: Vec, + lines_per_block: NonZeroU32, + last_lines_generated: u64, +} + +#[derive(thiserror::Error, Debug)] +/// Errors produced by [`StaticLinesPerSecond`]. +pub enum Error { + /// IO error + #[error(transparent)] + Io(#[from] std::io::Error), + /// No lines were discovered in the provided path + #[error("No lines found in static path")] + NoLines, + /// The provided lines_per_second value was zero + #[error("lines_per_second must be greater than zero")] + ZeroLinesPerSecond, +} + +impl StaticLinesPerSecond { + /// Create a new instance of `StaticLinesPerSecond` + /// + /// # Errors + /// + /// See documentation for [`Error`] + pub fn new(path: &Path, lines_per_second: u32) -> Result { + let lines_per_block = NonZeroU32::new(lines_per_second).ok_or(Error::ZeroLinesPerSecond)?; + + let mut sources = Vec::with_capacity(16); + + let metadata = fs::metadata(path)?; + if metadata.is_file() { + debug!("Static path {} is a file.", path.display()); + let lines = read_lines(path)?; + sources.push(Source { next_idx: 0, lines }); + } else if metadata.is_dir() { + debug!("Static path {} is a directory.", path.display()); + for entry in fs::read_dir(path)? { + let entry = entry?; + let entry_pth = entry.path(); + debug!("Attempting to open {} as file.", entry_pth.display()); + if let Ok(file) = OpenOptions::new().read(true).open(&entry_pth) { + let lines = read_lines_from_reader(file)?; + sources.push(Source { next_idx: 0, lines }); + } + } + } + + if sources.iter().all(|s| s.lines.is_empty()) { + return Err(Error::NoLines); + } + + Ok(Self { + sources, + lines_per_block, + last_lines_generated: 0, + }) + } +} + +impl crate::Serialize for StaticLinesPerSecond { + fn to_bytes( + &mut self, + mut rng: R, + max_bytes: usize, + writer: &mut W, + ) -> Result<(), crate::Error> + where + R: Rng + Sized, + W: Write, + { + self.last_lines_generated = 0; + + let Some(source) = self.sources.choose_mut(&mut rng) else { + return Ok(()); + }; + if source.lines.is_empty() { + return Ok(()); + } + + let mut bytes_written = 0usize; + for _ in 0..self.lines_per_block.get() { + let line = &source.lines[source.next_idx % source.lines.len()]; + let needed = line.len() + 1; // newline + if bytes_written + needed > max_bytes { + break; + } + + writer.write_all(line)?; + writer.write_all(b"\n")?; + bytes_written += needed; + self.last_lines_generated += 1; + source.next_idx = (source.next_idx + 1) % source.lines.len(); + } + + Ok(()) + } + + fn data_points_generated(&self) -> Option { + Some(self.last_lines_generated) + } +} + +fn read_lines(path: &Path) -> Result>, std::io::Error> { + let file = OpenOptions::new().read(true).open(path)?; + read_lines_from_reader(file) +} + +fn read_lines_from_reader(reader: R) -> Result>, std::io::Error> { + let mut out = Vec::new(); + let mut reader = BufReader::new(reader); + let mut buf = String::new(); + while { + buf.clear(); + reader.read_line(&mut buf)? + } != 0 + { + if buf.ends_with('\n') { + buf.pop(); + if buf.ends_with('\r') { + buf.pop(); + } + } + out.push(buf.as_bytes().to_vec()); + } + Ok(out) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::Serialize; + use rand::{SeedableRng, rngs::StdRng}; + use std::{env, fs::File, io::Write as IoWrite}; + + #[test] + fn writes_requested_number_of_lines() { + let mut path = env::temp_dir(); + path.push("static_line_rate_test.txt"); + { + let mut f = File::create(&path).unwrap(); + writeln!(f, "alpha").unwrap(); + writeln!(f, "beta").unwrap(); + writeln!(f, "gamma").unwrap(); + } + + let mut serializer = StaticLinesPerSecond::new(&path, 2).unwrap(); + let mut buf = Vec::new(); + let mut rng = StdRng::seed_from_u64(42); + + serializer.to_bytes(&mut rng, 1024, &mut buf).unwrap(); + assert_eq!(buf, b"alpha\nbeta\n"); + // Clean up + let _ = std::fs::remove_file(&path); + } +} diff --git a/lading_payload/src/statik_second.rs b/lading_payload/src/statik_second.rs new file mode 100644 index 000000000..c23ffb0f4 --- /dev/null +++ b/lading_payload/src/statik_second.rs @@ -0,0 +1,254 @@ +//! Static file payload that emits one second of log lines per block, based on +//! parsing a timestamp at the start of each line. The parsed timestamp is +//! stripped from emitted lines; only the message body is replayed. + +use std::{ + fs::File, + io::{BufRead, BufReader, Write}, + path::Path, +}; + +use chrono::{NaiveDateTime, TimeZone, Utc}; +use rand::Rng; +use tracing::debug; + +#[derive(Debug)] +struct BlockLines { + lines: Vec>, +} + +#[derive(thiserror::Error, Debug)] +/// Errors produced by [`StaticSecond`]. +pub enum Error { + /// IO error + #[error(transparent)] + Io(#[from] std::io::Error), + /// No lines were discovered in the provided path + #[error("No lines found in static path")] + NoLines, + /// Timestamp parsing failed for a line + #[error("Failed to parse timestamp from line: {0}")] + Timestamp(String), +} + +#[derive(Debug)] +/// Static payload grouped by second boundaries. +pub struct StaticSecond { + blocks: Vec, + idx: usize, + last_lines_generated: u64, + emit_placeholder: bool, +} + +impl StaticSecond { + /// Create a new instance of `StaticSecond` + /// + /// Lines are grouped into blocks by the second of their timestamp. The + /// timestamp is parsed from the start of the line up to the first + /// whitespace, using `timestamp_format` (chrono strftime syntax). The + /// parsed timestamp is removed from the emitted line, leaving only the + /// remainder of the message. + pub fn new( + path: &Path, + timestamp_format: &str, + emit_placeholder: bool, + start_line_index: u64, + ) -> Result { + let file = File::open(path)?; + let reader = BufReader::new(file); + + let mut blocks: Vec = Vec::new(); + let mut current_sec: Option = None; + let mut current_lines: Vec> = Vec::new(); + + for line_res in reader.lines() { + let line = line_res?; + if line.trim().is_empty() { + continue; + } + + // Take prefix until first whitespace as the timestamp segment and + // drop it from the payload we store. + let mut parts = line.splitn(2, char::is_whitespace); + let ts_token = parts.next().unwrap_or(""); + let payload = parts.next().unwrap_or("").trim_start().as_bytes().to_vec(); + let ts = NaiveDateTime::parse_from_str(ts_token, timestamp_format) + .map_err(|_| Error::Timestamp(line.clone()))?; + let sec = Utc.from_utc_datetime(&ts).timestamp(); + + match current_sec { + Some(s) if s == sec => { + current_lines.push(payload); + } + Some(s) if s < sec => { + // Close out the previous second. + blocks.push(BlockLines { + lines: current_lines, + }); + // Fill missing seconds with empty buckets when placeholders + // are requested. + if emit_placeholder { + let mut missing = s + 1; + while missing < sec { + blocks.push(BlockLines { lines: Vec::new() }); + missing += 1; + } + } + current_lines = vec![payload]; + current_sec = Some(sec); + } + Some(s) => { + // Unexpected time travel backwards; treat as new bucket to + // preserve ordering. + blocks.push(BlockLines { + lines: current_lines, + }); + current_lines = vec![payload]; + current_sec = Some(sec); + debug!("Encountered out-of-order timestamp: current {s}, new {sec}"); + } + None => { + current_sec = Some(sec); + current_lines.push(payload); + } + } + } + + if !current_lines.is_empty() { + blocks.push(BlockLines { + lines: current_lines, + }); + } else if emit_placeholder && current_sec.is_some() { + // If the file ended right after emitting placeholders, ensure the + // last bucket is represented. + blocks.push(BlockLines { lines: Vec::new() }); + } + + if blocks.is_empty() { + return Err(Error::NoLines); + } + + // Apply starting line offset by trimming leading lines across buckets. + let total_lines: u64 = blocks.iter().map(|b| b.lines.len() as u64).sum(); + let mut start_idx = 0usize; + if total_lines > 0 && start_line_index > 0 { + let mut remaining = start_line_index % total_lines; + if remaining > 0 { + for (idx, block) in blocks.iter_mut().enumerate() { + let len = block.lines.len() as u64; + if len == 0 { + continue; + } + if remaining >= len { + remaining -= len; + continue; + } else { + let cut = remaining as usize; + block.lines.drain(0..cut); + start_idx = idx; + break; + } + } + } + } + + debug!( + "StaticSecond loaded {} second-buckets from {}", + blocks.len(), + path.display() + ); + + Ok(Self { + blocks, + idx: start_idx, + last_lines_generated: 0, + emit_placeholder, + }) + } +} + +impl crate::Serialize for StaticSecond { + fn to_bytes( + &mut self, + _rng: R, + max_bytes: usize, + writer: &mut W, + ) -> Result<(), crate::Error> + where + R: Rng + Sized, + W: Write, + { + self.last_lines_generated = 0; + if self.blocks.is_empty() { + return Ok(()); + } + + // Choose blocks strictly sequentially to preserve chronological replay (no rng based on seed) + let block = &self.blocks[self.idx]; + + let mut bytes_written = 0usize; + if block.lines.is_empty() { + // When requested, emit a minimal placeholder (one newline) for + // empty seconds to preserve timing gaps without breaking the + // non-zero block invariant. + if self.emit_placeholder && max_bytes > 0 { + writer.write_all(b"\n")?; + } + } else { + for line in &block.lines { + let needed = line.len() + 1; // newline + if bytes_written + needed > max_bytes { + break; + } + writer.write_all(line)?; + writer.write_all(b"\n")?; + bytes_written += needed; + self.last_lines_generated += 1; + } + } + + self.idx = (self.idx + 1) % self.blocks.len(); + Ok(()) + } + + fn data_points_generated(&self) -> Option { + Some(self.last_lines_generated) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::Serialize; + use rand::{SeedableRng, rngs::StdRng}; + use std::{fs::File, io::Write as IoWrite}; + use tempfile::tempdir; + + #[test] + fn removes_timestamp_from_output() { + let temp_dir = tempdir().unwrap(); + let path = temp_dir.path().join("static_second_test.log"); + { + let mut f = File::create(&path).unwrap(); + writeln!(f, "2024-01-01T00:00:00 first").unwrap(); + writeln!(f, "2024-01-01T00:00:00 second").unwrap(); + writeln!(f, "2024-01-01T00:00:01 third").unwrap(); + } + + let mut serializer = StaticSecond::new( + &path, + "%Y-%m-%dT%H:%M:%S", + /* emit_placeholder */ false, + ) + .unwrap(); + let mut rng = StdRng::seed_from_u64(7); + let mut buf = Vec::new(); + + serializer.to_bytes(&mut rng, 1024, &mut buf).unwrap(); + assert_eq!(buf, b"first\nsecond\n"); + + buf.clear(); + serializer.to_bytes(&mut rng, 1024, &mut buf).unwrap(); + assert_eq!(buf, b"third\n"); + } +}