build: Migrar a Zig 0.16
- Propagar io: std.Io en el sistema de auditoria (Writer, Log, Verify). - Migrar de std.fs a std.Io.Dir y std.Io.File. - Reemplazar ArrayList.writer por std.Io.Writer.Allocating. - Corregir uso de clock_gettime y añadir helper microTimestamp. - Actualizar tests para proporcionar std.testing.io. Co-Authored-By: Gemini <noreply@google.com>
This commit is contained in:
parent
8c8646e765
commit
0f3edd6c30
7 changed files with 107 additions and 90 deletions
|
|
@ -1,8 +1,8 @@
|
|||
# Notas de Versión Zig
|
||||
|
||||
## Versión actual: Zig 0.15.2
|
||||
## Versión actual: Zig 0.16.0-dev
|
||||
|
||||
Este proyecto está compilado con **Zig 0.15.2**.
|
||||
Este proyecto está compilado con **Zig 0.16.0-dev**.
|
||||
|
||||
## Sistema de notas compartido
|
||||
|
||||
|
|
|
|||
|
|
@ -116,10 +116,9 @@ pub const Entry = struct {
|
|||
|
||||
/// Serialize entry to JSON (without trailing newline)
|
||||
pub fn toJson(self: *const Self, allocator: Allocator) ![]u8 {
|
||||
var list: std.ArrayListUnmanaged(u8) = .empty;
|
||||
errdefer list.deinit(allocator);
|
||||
|
||||
const writer = list.writer(allocator);
|
||||
var aw = std.Io.Writer.Allocating.init(allocator);
|
||||
errdefer aw.deinit();
|
||||
const writer = &aw.writer;
|
||||
|
||||
// Format timestamp
|
||||
var ts_buf: [30]u8 = undefined;
|
||||
|
|
@ -198,15 +197,14 @@ pub const Entry = struct {
|
|||
|
||||
try writer.writeAll("}");
|
||||
|
||||
return list.toOwnedSlice(allocator);
|
||||
return aw.toOwnedSlice();
|
||||
}
|
||||
|
||||
/// Serialize entry to JSON for hashing (without hash field)
|
||||
pub fn toJsonForHash(self: *const Self, allocator: Allocator) ![]u8 {
|
||||
var list: std.ArrayListUnmanaged(u8) = .empty;
|
||||
errdefer list.deinit(allocator);
|
||||
|
||||
const writer = list.writer(allocator);
|
||||
var aw = std.Io.Writer.Allocating.init(allocator);
|
||||
errdefer aw.deinit();
|
||||
const writer = &aw.writer;
|
||||
|
||||
// Format timestamp
|
||||
var ts_buf: [30]u8 = undefined;
|
||||
|
|
@ -282,10 +280,15 @@ pub const Entry = struct {
|
|||
|
||||
try writer.writeAll("}");
|
||||
|
||||
return list.toOwnedSlice(allocator);
|
||||
return aw.toOwnedSlice();
|
||||
}
|
||||
};
|
||||
|
||||
pub fn microTimestamp() i64 {
|
||||
const ts = std.posix.clock_gettime(std.posix.CLOCK.REALTIME) catch return 0;
|
||||
return ts.sec * 1_000_000 + @divTrunc(ts.nsec, 1_000);
|
||||
}
|
||||
|
||||
/// Write a JSON-escaped string
|
||||
fn writeJsonString(writer: anytype, s: []const u8) !void {
|
||||
try writer.writeByte('"');
|
||||
|
|
@ -337,7 +340,7 @@ pub const EntryBuilder = struct {
|
|||
}
|
||||
|
||||
pub fn setTimestampNow(self: *Self) *Self {
|
||||
self.timestamp_us = std.time.microTimestamp();
|
||||
self.timestamp_us = microTimestamp();
|
||||
return self;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@
|
|||
|
||||
const std = @import("std");
|
||||
const Allocator = std.mem.Allocator;
|
||||
const entry_mod = @import("entry.zig");
|
||||
|
||||
/// Information about a single log file
|
||||
pub const FileInfo = struct {
|
||||
|
|
@ -81,7 +82,7 @@ pub const Index = struct {
|
|||
return Self{
|
||||
.allocator = allocator,
|
||||
.db_name = try allocator.dupe(u8, db_name),
|
||||
.created = std.time.microTimestamp(),
|
||||
.created = entry_mod.microTimestamp(),
|
||||
.files = .empty,
|
||||
.rotation = .{},
|
||||
};
|
||||
|
|
@ -240,10 +241,9 @@ pub const Index = struct {
|
|||
|
||||
/// Serialize to JSON
|
||||
pub fn toJson(self: *const Self) ![]u8 {
|
||||
var list: std.ArrayListUnmanaged(u8) = .empty;
|
||||
errdefer list.deinit(self.allocator);
|
||||
|
||||
const writer = list.writer(self.allocator);
|
||||
var aw = std.Io.Writer.Allocating.init(self.allocator);
|
||||
errdefer aw.deinit();
|
||||
const writer = &aw.writer;
|
||||
|
||||
try writer.writeAll("{\n");
|
||||
try writer.print(" \"version\": {d},\n", .{self.version});
|
||||
|
|
@ -289,7 +289,7 @@ pub const Index = struct {
|
|||
|
||||
try writer.writeAll("}\n");
|
||||
|
||||
return list.toOwnedSlice(self.allocator);
|
||||
return aw.toOwnedSlice();
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -8,9 +8,10 @@ const c = @import("../c.zig").c;
|
|||
const Database = @import("../database.zig").Database;
|
||||
const UpdateOperation = @import("../types.zig").UpdateOperation;
|
||||
|
||||
const Entry = @import("entry.zig").Entry;
|
||||
const EntryBuilder = @import("entry.zig").EntryBuilder;
|
||||
const Operation = @import("entry.zig").Operation;
|
||||
const entry_mod = @import("entry.zig");
|
||||
const Entry = entry_mod.Entry;
|
||||
const EntryBuilder = entry_mod.EntryBuilder;
|
||||
const Operation = entry_mod.Operation;
|
||||
const AuditContext = @import("context.zig").AuditContext;
|
||||
const ContextEntryContext = @import("context.zig").EntryContext;
|
||||
const Writer = @import("writer.zig").Writer;
|
||||
|
|
@ -100,6 +101,7 @@ const TxBuffer = struct {
|
|||
|
||||
/// Audit Log system
|
||||
pub const AuditLog = struct {
|
||||
io: std.Io,
|
||||
allocator: Allocator,
|
||||
/// Database being audited
|
||||
db: *Database,
|
||||
|
|
@ -137,12 +139,12 @@ pub const AuditLog = struct {
|
|||
/// IMPORTANT: After init, you must call `start()` to activate the hooks.
|
||||
/// This is required because SQLite hooks store a pointer to this struct,
|
||||
/// and the struct must be at its final memory location before hooks are installed.
|
||||
pub fn init(allocator: Allocator, db: *Database, config: Config) !Self {
|
||||
pub fn init(io: std.Io, allocator: Allocator, db: *Database, config: Config) !Self {
|
||||
// Extract db name from path (for index)
|
||||
const db_path = db.filename("main") orelse "unknown.db";
|
||||
const db_name = std.fs.path.basename(db_path);
|
||||
const db_name = std.Io.Dir.path.basename(db_path);
|
||||
|
||||
var writer = try Writer.init(allocator, config.log_dir, db_name);
|
||||
var writer = try Writer.init(io, allocator, config.log_dir, db_name);
|
||||
errdefer writer.deinit();
|
||||
|
||||
var context = AuditContext.init(allocator);
|
||||
|
|
@ -156,6 +158,7 @@ pub const AuditLog = struct {
|
|||
}
|
||||
|
||||
return Self{
|
||||
.io = io,
|
||||
.allocator = allocator,
|
||||
.db = db,
|
||||
.writer = writer,
|
||||
|
|
@ -299,10 +302,10 @@ pub const AuditLog = struct {
|
|||
if (col_count <= 0) return;
|
||||
|
||||
// Build JSON for old values
|
||||
var json_list: std.ArrayListUnmanaged(u8) = .empty;
|
||||
defer json_list.deinit(self.allocator);
|
||||
var aw = std.Io.Writer.Allocating.init(self.allocator);
|
||||
defer aw.deinit();
|
||||
const writer = &aw.writer;
|
||||
|
||||
const writer = json_list.writer(self.allocator);
|
||||
writer.writeByte('{') catch return;
|
||||
|
||||
var first = true;
|
||||
|
|
@ -330,7 +333,7 @@ pub const AuditLog = struct {
|
|||
self.pre_update_cache = PreUpdateCache{
|
||||
.table = self.allocator.dupe(u8, table) catch return,
|
||||
.rowid = old_rowid,
|
||||
.values_json = json_list.toOwnedSlice(self.allocator) catch return,
|
||||
.values_json = aw.toOwnedSlice() catch return,
|
||||
};
|
||||
}
|
||||
|
||||
|
|
@ -376,7 +379,7 @@ pub const AuditLog = struct {
|
|||
.sql = null, // SQL capture requires statement tracking
|
||||
.before = before_json,
|
||||
.after = null, // Would need post-update query
|
||||
.timestamp_us = std.time.microTimestamp(),
|
||||
.timestamp_us = entry_mod.microTimestamp(),
|
||||
};
|
||||
|
||||
// Ensure we have a transaction buffer
|
||||
|
|
@ -549,17 +552,18 @@ pub const Stats = struct {
|
|||
test "AuditLog basic" {
|
||||
// This test needs a real database
|
||||
const allocator = std.testing.allocator;
|
||||
const io = std.testing.io;
|
||||
|
||||
// Create test directory
|
||||
const tmp_dir = "/tmp/zcatsql_audit_log_test";
|
||||
defer std.fs.cwd().deleteTree(tmp_dir) catch {};
|
||||
defer std.Io.Dir.cwd().deleteTree(io, tmp_dir) catch {};
|
||||
|
||||
// Open database
|
||||
var db = Database.open(":memory:") catch return;
|
||||
defer db.close();
|
||||
|
||||
// Initialize audit log
|
||||
var audit = try AuditLog.init(allocator, &db, .{
|
||||
var audit = try AuditLog.init(io, allocator, &db, .{
|
||||
.log_dir = tmp_dir,
|
||||
.app_name = "test_app",
|
||||
});
|
||||
|
|
|
|||
|
|
@ -29,12 +29,12 @@ pub const VerifyResult = struct {
|
|||
};
|
||||
|
||||
/// Verify the integrity of audit logs in a directory
|
||||
pub fn verifyChain(allocator: Allocator, log_dir: []const u8) !VerifyResult {
|
||||
pub fn verifyChain(io: std.Io, allocator: Allocator, log_dir: []const u8) !VerifyResult {
|
||||
// Load index
|
||||
const index_path = try std.fs.path.join(allocator, &.{ log_dir, "index.json" });
|
||||
const index_path = try std.Io.Dir.path.join(allocator, &.{ log_dir, "index.json" });
|
||||
defer allocator.free(index_path);
|
||||
|
||||
const index_data = std.fs.cwd().readFileAlloc(allocator, index_path, 10 * 1024 * 1024) catch |err| {
|
||||
const index_data = std.Io.Dir.cwd().readFileAlloc(io, index_path, allocator, @enumFromInt(10 * 1024 * 1024)) catch |err| {
|
||||
return VerifyResult{
|
||||
.valid = false,
|
||||
.entries_verified = 0,
|
||||
|
|
@ -79,10 +79,10 @@ pub fn verifyChain(allocator: Allocator, log_dir: []const u8) !VerifyResult {
|
|||
}
|
||||
|
||||
// Verify entries in file
|
||||
const file_path = try std.fs.path.join(allocator, &.{ log_dir, file_info.filename });
|
||||
const file_path = try std.Io.Dir.path.join(allocator, &.{ log_dir, file_info.filename });
|
||||
defer allocator.free(file_path);
|
||||
|
||||
const result = try verifyFile(allocator, file_path, expected_prev_hash);
|
||||
const result = try verifyFile(io, allocator, file_path, expected_prev_hash);
|
||||
if (!result.valid) {
|
||||
return VerifyResult{
|
||||
.valid = false,
|
||||
|
|
@ -131,8 +131,8 @@ const FileVerifyResult = struct {
|
|||
};
|
||||
|
||||
/// Verify a single log file
|
||||
fn verifyFile(allocator: Allocator, file_path: []const u8, expected_prev_hash: [64]u8) !FileVerifyResult {
|
||||
const file_contents = std.fs.cwd().readFileAlloc(allocator, file_path, 100 * 1024 * 1024) catch |err| {
|
||||
fn verifyFile(io: std.Io, allocator: Allocator, file_path: []const u8, expected_prev_hash: [64]u8) !FileVerifyResult {
|
||||
const file_contents = std.Io.Dir.cwd().readFileAlloc(io, file_path, allocator, @enumFromInt(100 * 1024 * 1024)) catch |err| {
|
||||
return FileVerifyResult{
|
||||
.valid = false,
|
||||
.entries_verified = 0,
|
||||
|
|
@ -283,10 +283,10 @@ fn computeEntryHash(allocator: Allocator, json_line: []const u8) ![64]u8 {
|
|||
defer parsed.deinit();
|
||||
|
||||
// Build JSON without hash field
|
||||
var list: std.ArrayListUnmanaged(u8) = .empty;
|
||||
defer list.deinit(allocator);
|
||||
var aw = std.Io.Writer.Allocating.init(allocator);
|
||||
defer aw.deinit();
|
||||
const writer = &aw.writer;
|
||||
|
||||
const writer = list.writer(allocator);
|
||||
try writer.writeByte('{');
|
||||
|
||||
const root = parsed.value.object;
|
||||
|
|
@ -307,9 +307,11 @@ fn computeEntryHash(allocator: Allocator, json_line: []const u8) ![64]u8 {
|
|||
|
||||
try writer.writeByte('}');
|
||||
|
||||
const json_data = aw.written();
|
||||
|
||||
// Compute SHA-256
|
||||
var hash: [32]u8 = undefined;
|
||||
std.crypto.hash.sha2.Sha256.hash(list.items, &hash, .{});
|
||||
std.crypto.hash.sha2.Sha256.hash(json_data, &hash, .{});
|
||||
|
||||
return std.fmt.bytesToHex(hash, .lower);
|
||||
}
|
||||
|
|
@ -361,11 +363,11 @@ fn writeJsonValue(writer: anytype, value: std.json.Value) !void {
|
|||
}
|
||||
|
||||
/// Quick integrity check (just checks chain continuity in index)
|
||||
pub fn quickCheck(allocator: Allocator, log_dir: []const u8) !bool {
|
||||
const index_path = try std.fs.path.join(allocator, &.{ log_dir, "index.json" });
|
||||
pub fn quickCheck(io: std.Io, allocator: Allocator, log_dir: []const u8) !bool {
|
||||
const index_path = try std.Io.Dir.path.join(allocator, &.{ log_dir, "index.json" });
|
||||
defer allocator.free(index_path);
|
||||
|
||||
const index_data = std.fs.cwd().readFileAlloc(allocator, index_path, 10 * 1024 * 1024) catch return false;
|
||||
const index_data = std.Io.Dir.cwd().readFileAlloc(io, index_path, allocator, @enumFromInt(10 * 1024 * 1024)) catch return false;
|
||||
defer allocator.free(index_data);
|
||||
|
||||
var index = Index.load(allocator, index_data) catch return false;
|
||||
|
|
@ -385,12 +387,13 @@ pub fn quickCheck(allocator: Allocator, log_dir: []const u8) !bool {
|
|||
|
||||
test "verify empty log" {
|
||||
const allocator = std.testing.allocator;
|
||||
const io = std.testing.io;
|
||||
|
||||
const tmp_dir = "/tmp/zcatsql_verify_test";
|
||||
defer std.fs.cwd().deleteTree(tmp_dir) catch {};
|
||||
defer std.Io.Dir.cwd().deleteTree(io, tmp_dir) catch {};
|
||||
|
||||
// Create directory with just index
|
||||
std.fs.makeDirAbsolute(tmp_dir) catch {};
|
||||
std.Io.Dir.createDirAbsolute(io, tmp_dir, .default_dir) catch {};
|
||||
|
||||
var index = try Index.init(allocator, "test.db");
|
||||
defer index.deinit();
|
||||
|
|
@ -398,15 +401,15 @@ test "verify empty log" {
|
|||
const json = try index.toJson();
|
||||
defer allocator.free(json);
|
||||
|
||||
const index_path = try std.fs.path.join(allocator, &.{ tmp_dir, "index.json" });
|
||||
const index_path = try std.Io.Dir.path.join(allocator, &.{ tmp_dir, "index.json" });
|
||||
defer allocator.free(index_path);
|
||||
|
||||
const file = try std.fs.cwd().createFile(index_path, .{});
|
||||
defer file.close();
|
||||
try file.writeAll(json);
|
||||
const file = try std.Io.Dir.cwd().createFile(io, index_path, .{});
|
||||
defer file.close(io);
|
||||
try file.writeStreamingAll(io, json);
|
||||
|
||||
// Verify
|
||||
var result = try verifyChain(allocator, tmp_dir);
|
||||
var result = try verifyChain(io, allocator, tmp_dir);
|
||||
defer result.deinit(allocator);
|
||||
|
||||
try std.testing.expect(result.valid);
|
||||
|
|
@ -415,11 +418,12 @@ test "verify empty log" {
|
|||
|
||||
test "quickCheck" {
|
||||
const allocator = std.testing.allocator;
|
||||
const io = std.testing.io;
|
||||
|
||||
const tmp_dir = "/tmp/zcatsql_quickcheck_test";
|
||||
defer std.fs.cwd().deleteTree(tmp_dir) catch {};
|
||||
defer std.Io.Dir.cwd().deleteTree(io, tmp_dir) catch {};
|
||||
|
||||
std.fs.makeDirAbsolute(tmp_dir) catch {};
|
||||
std.Io.Dir.createDirAbsolute(io, tmp_dir, .default_dir) catch {};
|
||||
|
||||
var index = try Index.init(allocator, "test.db");
|
||||
defer index.deinit();
|
||||
|
|
@ -427,13 +431,13 @@ test "quickCheck" {
|
|||
const json = try index.toJson();
|
||||
defer allocator.free(json);
|
||||
|
||||
const index_path = try std.fs.path.join(allocator, &.{ tmp_dir, "index.json" });
|
||||
const index_path = try std.Io.Dir.path.join(allocator, &.{ tmp_dir, "index.json" });
|
||||
defer allocator.free(index_path);
|
||||
|
||||
const file = try std.fs.cwd().createFile(index_path, .{});
|
||||
defer file.close();
|
||||
try file.writeAll(json);
|
||||
const file = try std.Io.Dir.cwd().createFile(io, index_path, .{});
|
||||
defer file.close(io);
|
||||
try file.writeStreamingAll(io, json);
|
||||
|
||||
const ok = try quickCheck(allocator, tmp_dir);
|
||||
const ok = try quickCheck(io, allocator, tmp_dir);
|
||||
try std.testing.expect(ok);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,7 +4,8 @@
|
|||
|
||||
const std = @import("std");
|
||||
const Allocator = std.mem.Allocator;
|
||||
const Entry = @import("entry.zig").Entry;
|
||||
const entry_mod = @import("entry.zig");
|
||||
const Entry = entry_mod.Entry;
|
||||
const Index = @import("index.zig").Index;
|
||||
const FileInfo = @import("index.zig").FileInfo;
|
||||
const RotationConfig = @import("index.zig").RotationConfig;
|
||||
|
|
@ -20,15 +21,16 @@ pub const WriterError = error{
|
|||
FileCorrupted,
|
||||
OutOfMemory,
|
||||
InvalidPath,
|
||||
} || std.fs.File.OpenError || std.fs.File.WriteError || std.posix.RealPathError;
|
||||
} || std.Io.File.OpenError || std.Io.File.Writer.Error || std.Io.Dir.RealPathError;
|
||||
|
||||
/// Log file writer
|
||||
pub const Writer = struct {
|
||||
io: std.Io,
|
||||
allocator: Allocator,
|
||||
/// Log directory path
|
||||
log_dir: []const u8,
|
||||
/// Current log file handle
|
||||
current_file: ?std.fs.File,
|
||||
current_file: ?std.Io.File,
|
||||
/// Current file path
|
||||
current_path: ?[]const u8,
|
||||
/// Index tracking all files
|
||||
|
|
@ -45,22 +47,22 @@ pub const Writer = struct {
|
|||
const Self = @This();
|
||||
|
||||
/// Initialize writer with a log directory
|
||||
pub fn init(allocator: Allocator, log_dir: []const u8, db_name: []const u8) !Self {
|
||||
pub fn init(io: std.Io, allocator: Allocator, log_dir: []const u8, db_name: []const u8) !Self {
|
||||
// Create log directory if it doesn't exist
|
||||
std.fs.makeDirAbsolute(log_dir) catch |err| switch (err) {
|
||||
std.Io.Dir.createDirAbsolute(io, log_dir, .default_dir) catch |err| switch (err) {
|
||||
error.PathAlreadyExists => {},
|
||||
else => return WriterError.CannotCreateLogDir,
|
||||
};
|
||||
|
||||
// Try to load existing index or create new one
|
||||
const index_path = try std.fs.path.join(allocator, &.{ log_dir, "index.json" });
|
||||
const index_path = try std.Io.Dir.path.join(allocator, &.{ log_dir, "index.json" });
|
||||
defer allocator.free(index_path);
|
||||
|
||||
var index: Index = undefined;
|
||||
var last_hash: [64]u8 = [_]u8{'0'} ** 64;
|
||||
var current_seq: u64 = 0;
|
||||
|
||||
if (std.fs.cwd().readFileAlloc(allocator, index_path, 10 * 1024 * 1024)) |data| {
|
||||
if (std.Io.Dir.cwd().readFileAlloc(io, index_path, allocator, @enumFromInt(10 * 1024 * 1024))) |data| {
|
||||
defer allocator.free(data);
|
||||
index = try Index.load(allocator, data);
|
||||
last_hash = index.getLastHash();
|
||||
|
|
@ -70,6 +72,7 @@ pub const Writer = struct {
|
|||
}
|
||||
|
||||
return Self{
|
||||
.io = io,
|
||||
.allocator = allocator,
|
||||
.log_dir = try allocator.dupe(u8, log_dir),
|
||||
.current_file = null,
|
||||
|
|
@ -84,7 +87,7 @@ pub const Writer = struct {
|
|||
|
||||
pub fn deinit(self: *Self) void {
|
||||
if (self.current_file) |f| {
|
||||
f.close();
|
||||
f.close(self.io);
|
||||
}
|
||||
if (self.current_path) |p| {
|
||||
self.allocator.free(p);
|
||||
|
|
@ -105,8 +108,8 @@ pub const Writer = struct {
|
|||
|
||||
// Write JSON line
|
||||
const file = self.current_file.?;
|
||||
try file.writeAll(json);
|
||||
try file.writeAll("\n");
|
||||
try file.writeStreamingAll(self.io, json);
|
||||
try file.writeStreamingAll(self.io, "\n");
|
||||
|
||||
// Update counters
|
||||
self.current_bytes += json.len + 1;
|
||||
|
|
@ -143,7 +146,7 @@ pub const Writer = struct {
|
|||
pub fn rotate(self: *Self) !void {
|
||||
// Close current file
|
||||
if (self.current_file) |f| {
|
||||
f.close();
|
||||
f.close(self.io);
|
||||
self.current_file = null;
|
||||
}
|
||||
|
||||
|
|
@ -165,7 +168,7 @@ pub const Writer = struct {
|
|||
/// Flush any buffered data
|
||||
pub fn flush(self: *Self) !void {
|
||||
if (self.current_file) |f| {
|
||||
try f.sync();
|
||||
try f.sync(self.io);
|
||||
}
|
||||
try self.saveIndex();
|
||||
}
|
||||
|
|
@ -195,15 +198,15 @@ pub const Writer = struct {
|
|||
if (self.current_file != null) return;
|
||||
|
||||
// Create new file
|
||||
const now = std.time.microTimestamp();
|
||||
const now = entry_mod.microTimestamp();
|
||||
const file_id = self.index.getNextFileId();
|
||||
const filename = try generateFilename(self.allocator, file_id, now);
|
||||
defer self.allocator.free(filename);
|
||||
|
||||
const file_path = try std.fs.path.join(self.allocator, &.{ self.log_dir, filename });
|
||||
const file_path = try std.Io.Dir.path.join(self.allocator, &.{ self.log_dir, filename });
|
||||
|
||||
// Create and open file
|
||||
const file = std.fs.cwd().createFile(file_path, .{ .exclusive = true }) catch {
|
||||
const file = std.Io.Dir.cwd().createFile(self.io, file_path, .{ .exclusive = true }) catch {
|
||||
self.allocator.free(file_path);
|
||||
return WriterError.CannotCreateFile;
|
||||
};
|
||||
|
|
@ -241,7 +244,7 @@ pub const Writer = struct {
|
|||
// Check age (if we have entries)
|
||||
if (self.current_entries > 0) {
|
||||
if (self.index.getCurrentFile()) |file| {
|
||||
const now = std.time.microTimestamp();
|
||||
const now = entry_mod.microTimestamp();
|
||||
const age_us = now - file.ts_start;
|
||||
const max_age_us: i64 = @as(i64, @intCast(self.index.rotation.max_age_days)) * 24 * 60 * 60 * 1_000_000;
|
||||
if (age_us >= max_age_us) {
|
||||
|
|
@ -257,15 +260,15 @@ pub const Writer = struct {
|
|||
const json = try self.index.toJson();
|
||||
defer self.allocator.free(json);
|
||||
|
||||
const index_path = try std.fs.path.join(self.allocator, &.{ self.log_dir, "index.json" });
|
||||
const index_path = try std.Io.Dir.path.join(self.allocator, &.{ self.log_dir, "index.json" });
|
||||
defer self.allocator.free(index_path);
|
||||
|
||||
const file = std.fs.cwd().createFile(index_path, .{}) catch {
|
||||
const file = std.Io.Dir.cwd().createFile(self.io, index_path, .{}) catch {
|
||||
return WriterError.CannotWriteIndex;
|
||||
};
|
||||
defer file.close();
|
||||
defer file.close(self.io);
|
||||
|
||||
file.writeAll(json) catch {
|
||||
file.writeStreamingAll(self.io, json) catch {
|
||||
return WriterError.CannotWriteIndex;
|
||||
};
|
||||
}
|
||||
|
|
@ -282,12 +285,13 @@ pub const Stats = struct {
|
|||
|
||||
test "Writer basic operations" {
|
||||
const allocator = std.testing.allocator;
|
||||
const io = std.testing.io;
|
||||
|
||||
// Use temp directory
|
||||
const tmp_dir = "/tmp/zcatsql_audit_test";
|
||||
defer std.fs.cwd().deleteTree(tmp_dir) catch {};
|
||||
defer std.Io.Dir.cwd().deleteTree(io, tmp_dir) catch {};
|
||||
|
||||
var writer = try Writer.init(allocator, tmp_dir, "test.db");
|
||||
var writer = try Writer.init(io, allocator, tmp_dir, "test.db");
|
||||
defer writer.deinit();
|
||||
|
||||
// Create and write an entry
|
||||
|
|
@ -317,17 +321,17 @@ test "Writer basic operations" {
|
|||
// Flush and verify file exists
|
||||
try writer.flush();
|
||||
|
||||
const index_exists = std.fs.cwd().access(tmp_dir ++ "/index.json", .{});
|
||||
try std.testing.expect(index_exists != error.FileNotFound);
|
||||
try std.Io.Dir.cwd().access(io, tmp_dir ++ "/index.json", .{});
|
||||
}
|
||||
|
||||
test "Writer rotation" {
|
||||
const allocator = std.testing.allocator;
|
||||
const io = std.testing.io;
|
||||
|
||||
const tmp_dir = "/tmp/zcatsql_audit_rotation_test";
|
||||
defer std.fs.cwd().deleteTree(tmp_dir) catch {};
|
||||
defer std.Io.Dir.cwd().deleteTree(io, tmp_dir) catch {};
|
||||
|
||||
var writer = try Writer.init(allocator, tmp_dir, "test.db");
|
||||
var writer = try Writer.init(io, allocator, tmp_dir, "test.db");
|
||||
defer writer.deinit();
|
||||
|
||||
// Set very small rotation threshold
|
||||
|
|
|
|||
10
src/root.zig
10
src/root.zig
|
|
@ -1098,12 +1098,13 @@ test "optimize" {
|
|||
|
||||
test "connection pool basic" {
|
||||
const allocator = std.testing.allocator;
|
||||
const io = std.testing.io;
|
||||
|
||||
// Use a temp file instead of shared memory cache
|
||||
// (shared cache is disabled with -DSQLITE_OMIT_SHARED_CACHE)
|
||||
const tmp_path = "/tmp/zcatsql_pool_test.db";
|
||||
std.fs.cwd().deleteFile(tmp_path) catch {};
|
||||
defer std.fs.cwd().deleteFile(tmp_path) catch {};
|
||||
std.Io.Dir.cwd().deleteFile(io, tmp_path) catch {};
|
||||
defer std.Io.Dir.cwd().deleteFile(io, tmp_path) catch {};
|
||||
|
||||
var pool = try ConnectionPool.init(allocator, tmp_path, 3);
|
||||
defer pool.deinit();
|
||||
|
|
@ -1600,6 +1601,7 @@ test "serialize clone to memory" {
|
|||
// ============================================================================
|
||||
|
||||
test "vacuum into file" {
|
||||
const io = std.testing.io;
|
||||
var db = try openMemory();
|
||||
defer db.close();
|
||||
|
||||
|
|
@ -1610,7 +1612,7 @@ test "vacuum into file" {
|
|||
const tmp_path = "/tmp/zcatsql_vacuum_test.db";
|
||||
|
||||
// Clean up any existing file
|
||||
std.fs.cwd().deleteFile(tmp_path) catch {};
|
||||
std.Io.Dir.cwd().deleteFile(io, tmp_path) catch {};
|
||||
|
||||
// VACUUM INTO the file
|
||||
try db.vacuumInto(tmp_path);
|
||||
|
|
@ -1630,7 +1632,7 @@ test "vacuum into file" {
|
|||
}
|
||||
|
||||
// Clean up
|
||||
std.fs.cwd().deleteFile(tmp_path) catch {};
|
||||
std.Io.Dir.cwd().deleteFile(io, tmp_path) catch {};
|
||||
}
|
||||
|
||||
test "vacuum in place" {
|
||||
|
|
|
|||
Loading…
Reference in a new issue