build: Migrar a Zig 0.16

- Propagar io: std.Io en el sistema de auditoria (Writer, Log, Verify).
- Migrar de std.fs a std.Io.Dir y std.Io.File.
- Reemplazar ArrayList.writer por std.Io.Writer.Allocating.
- Corregir uso de clock_gettime y añadir helper microTimestamp.
- Actualizar tests para proporcionar std.testing.io.

Co-Authored-By: Gemini <noreply@google.com>
This commit is contained in:
R.Eugenio 2026-01-18 01:30:36 +01:00
parent 8c8646e765
commit 0f3edd6c30
7 changed files with 107 additions and 90 deletions

View file

@ -1,8 +1,8 @@
# Notas de Versión Zig # Notas de Versión Zig
## Versión actual: Zig 0.15.2 ## Versión actual: Zig 0.16.0-dev
Este proyecto está compilado con **Zig 0.15.2**. Este proyecto está compilado con **Zig 0.16.0-dev**.
## Sistema de notas compartido ## Sistema de notas compartido

View file

@ -116,10 +116,9 @@ pub const Entry = struct {
/// Serialize entry to JSON (without trailing newline) /// Serialize entry to JSON (without trailing newline)
pub fn toJson(self: *const Self, allocator: Allocator) ![]u8 { pub fn toJson(self: *const Self, allocator: Allocator) ![]u8 {
var list: std.ArrayListUnmanaged(u8) = .empty; var aw = std.Io.Writer.Allocating.init(allocator);
errdefer list.deinit(allocator); errdefer aw.deinit();
const writer = &aw.writer;
const writer = list.writer(allocator);
// Format timestamp // Format timestamp
var ts_buf: [30]u8 = undefined; var ts_buf: [30]u8 = undefined;
@ -198,15 +197,14 @@ pub const Entry = struct {
try writer.writeAll("}"); try writer.writeAll("}");
return list.toOwnedSlice(allocator); return aw.toOwnedSlice();
} }
/// Serialize entry to JSON for hashing (without hash field) /// Serialize entry to JSON for hashing (without hash field)
pub fn toJsonForHash(self: *const Self, allocator: Allocator) ![]u8 { pub fn toJsonForHash(self: *const Self, allocator: Allocator) ![]u8 {
var list: std.ArrayListUnmanaged(u8) = .empty; var aw = std.Io.Writer.Allocating.init(allocator);
errdefer list.deinit(allocator); errdefer aw.deinit();
const writer = &aw.writer;
const writer = list.writer(allocator);
// Format timestamp // Format timestamp
var ts_buf: [30]u8 = undefined; var ts_buf: [30]u8 = undefined;
@ -282,10 +280,15 @@ pub const Entry = struct {
try writer.writeAll("}"); try writer.writeAll("}");
return list.toOwnedSlice(allocator); return aw.toOwnedSlice();
} }
}; };
pub fn microTimestamp() i64 {
const ts = std.posix.clock_gettime(std.posix.CLOCK.REALTIME) catch return 0;
return ts.sec * 1_000_000 + @divTrunc(ts.nsec, 1_000);
}
/// Write a JSON-escaped string /// Write a JSON-escaped string
fn writeJsonString(writer: anytype, s: []const u8) !void { fn writeJsonString(writer: anytype, s: []const u8) !void {
try writer.writeByte('"'); try writer.writeByte('"');
@ -337,7 +340,7 @@ pub const EntryBuilder = struct {
} }
pub fn setTimestampNow(self: *Self) *Self { pub fn setTimestampNow(self: *Self) *Self {
self.timestamp_us = std.time.microTimestamp(); self.timestamp_us = microTimestamp();
return self; return self;
} }

View file

@ -4,6 +4,7 @@
const std = @import("std"); const std = @import("std");
const Allocator = std.mem.Allocator; const Allocator = std.mem.Allocator;
const entry_mod = @import("entry.zig");
/// Information about a single log file /// Information about a single log file
pub const FileInfo = struct { pub const FileInfo = struct {
@ -81,7 +82,7 @@ pub const Index = struct {
return Self{ return Self{
.allocator = allocator, .allocator = allocator,
.db_name = try allocator.dupe(u8, db_name), .db_name = try allocator.dupe(u8, db_name),
.created = std.time.microTimestamp(), .created = entry_mod.microTimestamp(),
.files = .empty, .files = .empty,
.rotation = .{}, .rotation = .{},
}; };
@ -240,10 +241,9 @@ pub const Index = struct {
/// Serialize to JSON /// Serialize to JSON
pub fn toJson(self: *const Self) ![]u8 { pub fn toJson(self: *const Self) ![]u8 {
var list: std.ArrayListUnmanaged(u8) = .empty; var aw = std.Io.Writer.Allocating.init(self.allocator);
errdefer list.deinit(self.allocator); errdefer aw.deinit();
const writer = &aw.writer;
const writer = list.writer(self.allocator);
try writer.writeAll("{\n"); try writer.writeAll("{\n");
try writer.print(" \"version\": {d},\n", .{self.version}); try writer.print(" \"version\": {d},\n", .{self.version});
@ -289,7 +289,7 @@ pub const Index = struct {
try writer.writeAll("}\n"); try writer.writeAll("}\n");
return list.toOwnedSlice(self.allocator); return aw.toOwnedSlice();
} }
}; };

View file

@ -8,9 +8,10 @@ const c = @import("../c.zig").c;
const Database = @import("../database.zig").Database; const Database = @import("../database.zig").Database;
const UpdateOperation = @import("../types.zig").UpdateOperation; const UpdateOperation = @import("../types.zig").UpdateOperation;
const Entry = @import("entry.zig").Entry; const entry_mod = @import("entry.zig");
const EntryBuilder = @import("entry.zig").EntryBuilder; const Entry = entry_mod.Entry;
const Operation = @import("entry.zig").Operation; const EntryBuilder = entry_mod.EntryBuilder;
const Operation = entry_mod.Operation;
const AuditContext = @import("context.zig").AuditContext; const AuditContext = @import("context.zig").AuditContext;
const ContextEntryContext = @import("context.zig").EntryContext; const ContextEntryContext = @import("context.zig").EntryContext;
const Writer = @import("writer.zig").Writer; const Writer = @import("writer.zig").Writer;
@ -100,6 +101,7 @@ const TxBuffer = struct {
/// Audit Log system /// Audit Log system
pub const AuditLog = struct { pub const AuditLog = struct {
io: std.Io,
allocator: Allocator, allocator: Allocator,
/// Database being audited /// Database being audited
db: *Database, db: *Database,
@ -137,12 +139,12 @@ pub const AuditLog = struct {
/// IMPORTANT: After init, you must call `start()` to activate the hooks. /// IMPORTANT: After init, you must call `start()` to activate the hooks.
/// This is required because SQLite hooks store a pointer to this struct, /// This is required because SQLite hooks store a pointer to this struct,
/// and the struct must be at its final memory location before hooks are installed. /// and the struct must be at its final memory location before hooks are installed.
pub fn init(allocator: Allocator, db: *Database, config: Config) !Self { pub fn init(io: std.Io, allocator: Allocator, db: *Database, config: Config) !Self {
// Extract db name from path (for index) // Extract db name from path (for index)
const db_path = db.filename("main") orelse "unknown.db"; const db_path = db.filename("main") orelse "unknown.db";
const db_name = std.fs.path.basename(db_path); const db_name = std.Io.Dir.path.basename(db_path);
var writer = try Writer.init(allocator, config.log_dir, db_name); var writer = try Writer.init(io, allocator, config.log_dir, db_name);
errdefer writer.deinit(); errdefer writer.deinit();
var context = AuditContext.init(allocator); var context = AuditContext.init(allocator);
@ -156,6 +158,7 @@ pub const AuditLog = struct {
} }
return Self{ return Self{
.io = io,
.allocator = allocator, .allocator = allocator,
.db = db, .db = db,
.writer = writer, .writer = writer,
@ -299,10 +302,10 @@ pub const AuditLog = struct {
if (col_count <= 0) return; if (col_count <= 0) return;
// Build JSON for old values // Build JSON for old values
var json_list: std.ArrayListUnmanaged(u8) = .empty; var aw = std.Io.Writer.Allocating.init(self.allocator);
defer json_list.deinit(self.allocator); defer aw.deinit();
const writer = &aw.writer;
const writer = json_list.writer(self.allocator);
writer.writeByte('{') catch return; writer.writeByte('{') catch return;
var first = true; var first = true;
@ -330,7 +333,7 @@ pub const AuditLog = struct {
self.pre_update_cache = PreUpdateCache{ self.pre_update_cache = PreUpdateCache{
.table = self.allocator.dupe(u8, table) catch return, .table = self.allocator.dupe(u8, table) catch return,
.rowid = old_rowid, .rowid = old_rowid,
.values_json = json_list.toOwnedSlice(self.allocator) catch return, .values_json = aw.toOwnedSlice() catch return,
}; };
} }
@ -376,7 +379,7 @@ pub const AuditLog = struct {
.sql = null, // SQL capture requires statement tracking .sql = null, // SQL capture requires statement tracking
.before = before_json, .before = before_json,
.after = null, // Would need post-update query .after = null, // Would need post-update query
.timestamp_us = std.time.microTimestamp(), .timestamp_us = entry_mod.microTimestamp(),
}; };
// Ensure we have a transaction buffer // Ensure we have a transaction buffer
@ -549,17 +552,18 @@ pub const Stats = struct {
test "AuditLog basic" { test "AuditLog basic" {
// This test needs a real database // This test needs a real database
const allocator = std.testing.allocator; const allocator = std.testing.allocator;
const io = std.testing.io;
// Create test directory // Create test directory
const tmp_dir = "/tmp/zcatsql_audit_log_test"; const tmp_dir = "/tmp/zcatsql_audit_log_test";
defer std.fs.cwd().deleteTree(tmp_dir) catch {}; defer std.Io.Dir.cwd().deleteTree(io, tmp_dir) catch {};
// Open database // Open database
var db = Database.open(":memory:") catch return; var db = Database.open(":memory:") catch return;
defer db.close(); defer db.close();
// Initialize audit log // Initialize audit log
var audit = try AuditLog.init(allocator, &db, .{ var audit = try AuditLog.init(io, allocator, &db, .{
.log_dir = tmp_dir, .log_dir = tmp_dir,
.app_name = "test_app", .app_name = "test_app",
}); });

View file

@ -29,12 +29,12 @@ pub const VerifyResult = struct {
}; };
/// Verify the integrity of audit logs in a directory /// Verify the integrity of audit logs in a directory
pub fn verifyChain(allocator: Allocator, log_dir: []const u8) !VerifyResult { pub fn verifyChain(io: std.Io, allocator: Allocator, log_dir: []const u8) !VerifyResult {
// Load index // Load index
const index_path = try std.fs.path.join(allocator, &.{ log_dir, "index.json" }); const index_path = try std.Io.Dir.path.join(allocator, &.{ log_dir, "index.json" });
defer allocator.free(index_path); defer allocator.free(index_path);
const index_data = std.fs.cwd().readFileAlloc(allocator, index_path, 10 * 1024 * 1024) catch |err| { const index_data = std.Io.Dir.cwd().readFileAlloc(io, index_path, allocator, @enumFromInt(10 * 1024 * 1024)) catch |err| {
return VerifyResult{ return VerifyResult{
.valid = false, .valid = false,
.entries_verified = 0, .entries_verified = 0,
@ -79,10 +79,10 @@ pub fn verifyChain(allocator: Allocator, log_dir: []const u8) !VerifyResult {
} }
// Verify entries in file // Verify entries in file
const file_path = try std.fs.path.join(allocator, &.{ log_dir, file_info.filename }); const file_path = try std.Io.Dir.path.join(allocator, &.{ log_dir, file_info.filename });
defer allocator.free(file_path); defer allocator.free(file_path);
const result = try verifyFile(allocator, file_path, expected_prev_hash); const result = try verifyFile(io, allocator, file_path, expected_prev_hash);
if (!result.valid) { if (!result.valid) {
return VerifyResult{ return VerifyResult{
.valid = false, .valid = false,
@ -131,8 +131,8 @@ const FileVerifyResult = struct {
}; };
/// Verify a single log file /// Verify a single log file
fn verifyFile(allocator: Allocator, file_path: []const u8, expected_prev_hash: [64]u8) !FileVerifyResult { fn verifyFile(io: std.Io, allocator: Allocator, file_path: []const u8, expected_prev_hash: [64]u8) !FileVerifyResult {
const file_contents = std.fs.cwd().readFileAlloc(allocator, file_path, 100 * 1024 * 1024) catch |err| { const file_contents = std.Io.Dir.cwd().readFileAlloc(io, file_path, allocator, @enumFromInt(100 * 1024 * 1024)) catch |err| {
return FileVerifyResult{ return FileVerifyResult{
.valid = false, .valid = false,
.entries_verified = 0, .entries_verified = 0,
@ -283,10 +283,10 @@ fn computeEntryHash(allocator: Allocator, json_line: []const u8) ![64]u8 {
defer parsed.deinit(); defer parsed.deinit();
// Build JSON without hash field // Build JSON without hash field
var list: std.ArrayListUnmanaged(u8) = .empty; var aw = std.Io.Writer.Allocating.init(allocator);
defer list.deinit(allocator); defer aw.deinit();
const writer = &aw.writer;
const writer = list.writer(allocator);
try writer.writeByte('{'); try writer.writeByte('{');
const root = parsed.value.object; const root = parsed.value.object;
@ -307,9 +307,11 @@ fn computeEntryHash(allocator: Allocator, json_line: []const u8) ![64]u8 {
try writer.writeByte('}'); try writer.writeByte('}');
const json_data = aw.written();
// Compute SHA-256 // Compute SHA-256
var hash: [32]u8 = undefined; var hash: [32]u8 = undefined;
std.crypto.hash.sha2.Sha256.hash(list.items, &hash, .{}); std.crypto.hash.sha2.Sha256.hash(json_data, &hash, .{});
return std.fmt.bytesToHex(hash, .lower); return std.fmt.bytesToHex(hash, .lower);
} }
@ -361,11 +363,11 @@ fn writeJsonValue(writer: anytype, value: std.json.Value) !void {
} }
/// Quick integrity check (just checks chain continuity in index) /// Quick integrity check (just checks chain continuity in index)
pub fn quickCheck(allocator: Allocator, log_dir: []const u8) !bool { pub fn quickCheck(io: std.Io, allocator: Allocator, log_dir: []const u8) !bool {
const index_path = try std.fs.path.join(allocator, &.{ log_dir, "index.json" }); const index_path = try std.Io.Dir.path.join(allocator, &.{ log_dir, "index.json" });
defer allocator.free(index_path); defer allocator.free(index_path);
const index_data = std.fs.cwd().readFileAlloc(allocator, index_path, 10 * 1024 * 1024) catch return false; const index_data = std.Io.Dir.cwd().readFileAlloc(io, index_path, allocator, @enumFromInt(10 * 1024 * 1024)) catch return false;
defer allocator.free(index_data); defer allocator.free(index_data);
var index = Index.load(allocator, index_data) catch return false; var index = Index.load(allocator, index_data) catch return false;
@ -385,12 +387,13 @@ pub fn quickCheck(allocator: Allocator, log_dir: []const u8) !bool {
test "verify empty log" { test "verify empty log" {
const allocator = std.testing.allocator; const allocator = std.testing.allocator;
const io = std.testing.io;
const tmp_dir = "/tmp/zcatsql_verify_test"; const tmp_dir = "/tmp/zcatsql_verify_test";
defer std.fs.cwd().deleteTree(tmp_dir) catch {}; defer std.Io.Dir.cwd().deleteTree(io, tmp_dir) catch {};
// Create directory with just index // Create directory with just index
std.fs.makeDirAbsolute(tmp_dir) catch {}; std.Io.Dir.createDirAbsolute(io, tmp_dir, .default_dir) catch {};
var index = try Index.init(allocator, "test.db"); var index = try Index.init(allocator, "test.db");
defer index.deinit(); defer index.deinit();
@ -398,15 +401,15 @@ test "verify empty log" {
const json = try index.toJson(); const json = try index.toJson();
defer allocator.free(json); defer allocator.free(json);
const index_path = try std.fs.path.join(allocator, &.{ tmp_dir, "index.json" }); const index_path = try std.Io.Dir.path.join(allocator, &.{ tmp_dir, "index.json" });
defer allocator.free(index_path); defer allocator.free(index_path);
const file = try std.fs.cwd().createFile(index_path, .{}); const file = try std.Io.Dir.cwd().createFile(io, index_path, .{});
defer file.close(); defer file.close(io);
try file.writeAll(json); try file.writeStreamingAll(io, json);
// Verify // Verify
var result = try verifyChain(allocator, tmp_dir); var result = try verifyChain(io, allocator, tmp_dir);
defer result.deinit(allocator); defer result.deinit(allocator);
try std.testing.expect(result.valid); try std.testing.expect(result.valid);
@ -415,11 +418,12 @@ test "verify empty log" {
test "quickCheck" { test "quickCheck" {
const allocator = std.testing.allocator; const allocator = std.testing.allocator;
const io = std.testing.io;
const tmp_dir = "/tmp/zcatsql_quickcheck_test"; const tmp_dir = "/tmp/zcatsql_quickcheck_test";
defer std.fs.cwd().deleteTree(tmp_dir) catch {}; defer std.Io.Dir.cwd().deleteTree(io, tmp_dir) catch {};
std.fs.makeDirAbsolute(tmp_dir) catch {}; std.Io.Dir.createDirAbsolute(io, tmp_dir, .default_dir) catch {};
var index = try Index.init(allocator, "test.db"); var index = try Index.init(allocator, "test.db");
defer index.deinit(); defer index.deinit();
@ -427,13 +431,13 @@ test "quickCheck" {
const json = try index.toJson(); const json = try index.toJson();
defer allocator.free(json); defer allocator.free(json);
const index_path = try std.fs.path.join(allocator, &.{ tmp_dir, "index.json" }); const index_path = try std.Io.Dir.path.join(allocator, &.{ tmp_dir, "index.json" });
defer allocator.free(index_path); defer allocator.free(index_path);
const file = try std.fs.cwd().createFile(index_path, .{}); const file = try std.Io.Dir.cwd().createFile(io, index_path, .{});
defer file.close(); defer file.close(io);
try file.writeAll(json); try file.writeStreamingAll(io, json);
const ok = try quickCheck(allocator, tmp_dir); const ok = try quickCheck(io, allocator, tmp_dir);
try std.testing.expect(ok); try std.testing.expect(ok);
} }

View file

@ -4,7 +4,8 @@
const std = @import("std"); const std = @import("std");
const Allocator = std.mem.Allocator; const Allocator = std.mem.Allocator;
const Entry = @import("entry.zig").Entry; const entry_mod = @import("entry.zig");
const Entry = entry_mod.Entry;
const Index = @import("index.zig").Index; const Index = @import("index.zig").Index;
const FileInfo = @import("index.zig").FileInfo; const FileInfo = @import("index.zig").FileInfo;
const RotationConfig = @import("index.zig").RotationConfig; const RotationConfig = @import("index.zig").RotationConfig;
@ -20,15 +21,16 @@ pub const WriterError = error{
FileCorrupted, FileCorrupted,
OutOfMemory, OutOfMemory,
InvalidPath, InvalidPath,
} || std.fs.File.OpenError || std.fs.File.WriteError || std.posix.RealPathError; } || std.Io.File.OpenError || std.Io.File.Writer.Error || std.Io.Dir.RealPathError;
/// Log file writer /// Log file writer
pub const Writer = struct { pub const Writer = struct {
io: std.Io,
allocator: Allocator, allocator: Allocator,
/// Log directory path /// Log directory path
log_dir: []const u8, log_dir: []const u8,
/// Current log file handle /// Current log file handle
current_file: ?std.fs.File, current_file: ?std.Io.File,
/// Current file path /// Current file path
current_path: ?[]const u8, current_path: ?[]const u8,
/// Index tracking all files /// Index tracking all files
@ -45,22 +47,22 @@ pub const Writer = struct {
const Self = @This(); const Self = @This();
/// Initialize writer with a log directory /// Initialize writer with a log directory
pub fn init(allocator: Allocator, log_dir: []const u8, db_name: []const u8) !Self { pub fn init(io: std.Io, allocator: Allocator, log_dir: []const u8, db_name: []const u8) !Self {
// Create log directory if it doesn't exist // Create log directory if it doesn't exist
std.fs.makeDirAbsolute(log_dir) catch |err| switch (err) { std.Io.Dir.createDirAbsolute(io, log_dir, .default_dir) catch |err| switch (err) {
error.PathAlreadyExists => {}, error.PathAlreadyExists => {},
else => return WriterError.CannotCreateLogDir, else => return WriterError.CannotCreateLogDir,
}; };
// Try to load existing index or create new one // Try to load existing index or create new one
const index_path = try std.fs.path.join(allocator, &.{ log_dir, "index.json" }); const index_path = try std.Io.Dir.path.join(allocator, &.{ log_dir, "index.json" });
defer allocator.free(index_path); defer allocator.free(index_path);
var index: Index = undefined; var index: Index = undefined;
var last_hash: [64]u8 = [_]u8{'0'} ** 64; var last_hash: [64]u8 = [_]u8{'0'} ** 64;
var current_seq: u64 = 0; var current_seq: u64 = 0;
if (std.fs.cwd().readFileAlloc(allocator, index_path, 10 * 1024 * 1024)) |data| { if (std.Io.Dir.cwd().readFileAlloc(io, index_path, allocator, @enumFromInt(10 * 1024 * 1024))) |data| {
defer allocator.free(data); defer allocator.free(data);
index = try Index.load(allocator, data); index = try Index.load(allocator, data);
last_hash = index.getLastHash(); last_hash = index.getLastHash();
@ -70,6 +72,7 @@ pub const Writer = struct {
} }
return Self{ return Self{
.io = io,
.allocator = allocator, .allocator = allocator,
.log_dir = try allocator.dupe(u8, log_dir), .log_dir = try allocator.dupe(u8, log_dir),
.current_file = null, .current_file = null,
@ -84,7 +87,7 @@ pub const Writer = struct {
pub fn deinit(self: *Self) void { pub fn deinit(self: *Self) void {
if (self.current_file) |f| { if (self.current_file) |f| {
f.close(); f.close(self.io);
} }
if (self.current_path) |p| { if (self.current_path) |p| {
self.allocator.free(p); self.allocator.free(p);
@ -105,8 +108,8 @@ pub const Writer = struct {
// Write JSON line // Write JSON line
const file = self.current_file.?; const file = self.current_file.?;
try file.writeAll(json); try file.writeStreamingAll(self.io, json);
try file.writeAll("\n"); try file.writeStreamingAll(self.io, "\n");
// Update counters // Update counters
self.current_bytes += json.len + 1; self.current_bytes += json.len + 1;
@ -143,7 +146,7 @@ pub const Writer = struct {
pub fn rotate(self: *Self) !void { pub fn rotate(self: *Self) !void {
// Close current file // Close current file
if (self.current_file) |f| { if (self.current_file) |f| {
f.close(); f.close(self.io);
self.current_file = null; self.current_file = null;
} }
@ -165,7 +168,7 @@ pub const Writer = struct {
/// Flush any buffered data /// Flush any buffered data
pub fn flush(self: *Self) !void { pub fn flush(self: *Self) !void {
if (self.current_file) |f| { if (self.current_file) |f| {
try f.sync(); try f.sync(self.io);
} }
try self.saveIndex(); try self.saveIndex();
} }
@ -195,15 +198,15 @@ pub const Writer = struct {
if (self.current_file != null) return; if (self.current_file != null) return;
// Create new file // Create new file
const now = std.time.microTimestamp(); const now = entry_mod.microTimestamp();
const file_id = self.index.getNextFileId(); const file_id = self.index.getNextFileId();
const filename = try generateFilename(self.allocator, file_id, now); const filename = try generateFilename(self.allocator, file_id, now);
defer self.allocator.free(filename); defer self.allocator.free(filename);
const file_path = try std.fs.path.join(self.allocator, &.{ self.log_dir, filename }); const file_path = try std.Io.Dir.path.join(self.allocator, &.{ self.log_dir, filename });
// Create and open file // Create and open file
const file = std.fs.cwd().createFile(file_path, .{ .exclusive = true }) catch { const file = std.Io.Dir.cwd().createFile(self.io, file_path, .{ .exclusive = true }) catch {
self.allocator.free(file_path); self.allocator.free(file_path);
return WriterError.CannotCreateFile; return WriterError.CannotCreateFile;
}; };
@ -241,7 +244,7 @@ pub const Writer = struct {
// Check age (if we have entries) // Check age (if we have entries)
if (self.current_entries > 0) { if (self.current_entries > 0) {
if (self.index.getCurrentFile()) |file| { if (self.index.getCurrentFile()) |file| {
const now = std.time.microTimestamp(); const now = entry_mod.microTimestamp();
const age_us = now - file.ts_start; const age_us = now - file.ts_start;
const max_age_us: i64 = @as(i64, @intCast(self.index.rotation.max_age_days)) * 24 * 60 * 60 * 1_000_000; const max_age_us: i64 = @as(i64, @intCast(self.index.rotation.max_age_days)) * 24 * 60 * 60 * 1_000_000;
if (age_us >= max_age_us) { if (age_us >= max_age_us) {
@ -257,15 +260,15 @@ pub const Writer = struct {
const json = try self.index.toJson(); const json = try self.index.toJson();
defer self.allocator.free(json); defer self.allocator.free(json);
const index_path = try std.fs.path.join(self.allocator, &.{ self.log_dir, "index.json" }); const index_path = try std.Io.Dir.path.join(self.allocator, &.{ self.log_dir, "index.json" });
defer self.allocator.free(index_path); defer self.allocator.free(index_path);
const file = std.fs.cwd().createFile(index_path, .{}) catch { const file = std.Io.Dir.cwd().createFile(self.io, index_path, .{}) catch {
return WriterError.CannotWriteIndex; return WriterError.CannotWriteIndex;
}; };
defer file.close(); defer file.close(self.io);
file.writeAll(json) catch { file.writeStreamingAll(self.io, json) catch {
return WriterError.CannotWriteIndex; return WriterError.CannotWriteIndex;
}; };
} }
@ -282,12 +285,13 @@ pub const Stats = struct {
test "Writer basic operations" { test "Writer basic operations" {
const allocator = std.testing.allocator; const allocator = std.testing.allocator;
const io = std.testing.io;
// Use temp directory // Use temp directory
const tmp_dir = "/tmp/zcatsql_audit_test"; const tmp_dir = "/tmp/zcatsql_audit_test";
defer std.fs.cwd().deleteTree(tmp_dir) catch {}; defer std.Io.Dir.cwd().deleteTree(io, tmp_dir) catch {};
var writer = try Writer.init(allocator, tmp_dir, "test.db"); var writer = try Writer.init(io, allocator, tmp_dir, "test.db");
defer writer.deinit(); defer writer.deinit();
// Create and write an entry // Create and write an entry
@ -317,17 +321,17 @@ test "Writer basic operations" {
// Flush and verify file exists // Flush and verify file exists
try writer.flush(); try writer.flush();
const index_exists = std.fs.cwd().access(tmp_dir ++ "/index.json", .{}); try std.Io.Dir.cwd().access(io, tmp_dir ++ "/index.json", .{});
try std.testing.expect(index_exists != error.FileNotFound);
} }
test "Writer rotation" { test "Writer rotation" {
const allocator = std.testing.allocator; const allocator = std.testing.allocator;
const io = std.testing.io;
const tmp_dir = "/tmp/zcatsql_audit_rotation_test"; const tmp_dir = "/tmp/zcatsql_audit_rotation_test";
defer std.fs.cwd().deleteTree(tmp_dir) catch {}; defer std.Io.Dir.cwd().deleteTree(io, tmp_dir) catch {};
var writer = try Writer.init(allocator, tmp_dir, "test.db"); var writer = try Writer.init(io, allocator, tmp_dir, "test.db");
defer writer.deinit(); defer writer.deinit();
// Set very small rotation threshold // Set very small rotation threshold

View file

@ -1098,12 +1098,13 @@ test "optimize" {
test "connection pool basic" { test "connection pool basic" {
const allocator = std.testing.allocator; const allocator = std.testing.allocator;
const io = std.testing.io;
// Use a temp file instead of shared memory cache // Use a temp file instead of shared memory cache
// (shared cache is disabled with -DSQLITE_OMIT_SHARED_CACHE) // (shared cache is disabled with -DSQLITE_OMIT_SHARED_CACHE)
const tmp_path = "/tmp/zcatsql_pool_test.db"; const tmp_path = "/tmp/zcatsql_pool_test.db";
std.fs.cwd().deleteFile(tmp_path) catch {}; std.Io.Dir.cwd().deleteFile(io, tmp_path) catch {};
defer std.fs.cwd().deleteFile(tmp_path) catch {}; defer std.Io.Dir.cwd().deleteFile(io, tmp_path) catch {};
var pool = try ConnectionPool.init(allocator, tmp_path, 3); var pool = try ConnectionPool.init(allocator, tmp_path, 3);
defer pool.deinit(); defer pool.deinit();
@ -1600,6 +1601,7 @@ test "serialize clone to memory" {
// ============================================================================ // ============================================================================
test "vacuum into file" { test "vacuum into file" {
const io = std.testing.io;
var db = try openMemory(); var db = try openMemory();
defer db.close(); defer db.close();
@ -1610,7 +1612,7 @@ test "vacuum into file" {
const tmp_path = "/tmp/zcatsql_vacuum_test.db"; const tmp_path = "/tmp/zcatsql_vacuum_test.db";
// Clean up any existing file // Clean up any existing file
std.fs.cwd().deleteFile(tmp_path) catch {}; std.Io.Dir.cwd().deleteFile(io, tmp_path) catch {};
// VACUUM INTO the file // VACUUM INTO the file
try db.vacuumInto(tmp_path); try db.vacuumInto(tmp_path);
@ -1630,7 +1632,7 @@ test "vacuum into file" {
} }
// Clean up // Clean up
std.fs.cwd().deleteFile(tmp_path) catch {}; std.Io.Dir.cwd().deleteFile(io, tmp_path) catch {};
} }
test "vacuum in place" { test "vacuum in place" {