std: experiment with using restricted function pointers in vtables

This commit is contained in:
Jacob Young
2026-04-14 06:31:13 -04:00
parent 221fb30b3c
commit 79cc29e238
9 changed files with 193 additions and 186 deletions
+134 -124
View File
@@ -58,7 +58,7 @@ pub const VTable = struct {
/// a unit of concurrency has been assigned to the returned task.
///
/// Thread-safe.
async: *const fn (
async: @Restricted(*const fn (
/// Corresponds to `Io.userdata`.
userdata: ?*anyopaque,
/// The pointer of this slice is an "eager" result value.
@@ -69,10 +69,10 @@ pub const VTable = struct {
/// Copied and then passed to `start`.
context: []const u8,
context_alignment: std.mem.Alignment,
start: *const fn (context: *const anyopaque, result: *anyopaque) void,
) ?*AnyFuture,
start: AnyFuture.Start,
) ?*AnyFuture),
/// Thread-safe.
concurrent: *const fn (
concurrent: @Restricted(*const fn (
/// Corresponds to `Io.userdata`.
userdata: ?*anyopaque,
result_len: usize,
@@ -80,12 +80,12 @@ pub const VTable = struct {
/// Copied and then passed to `start`.
context: []const u8,
context_alignment: std.mem.Alignment,
start: *const fn (context: *const anyopaque, result: *anyopaque) void,
) ConcurrentError!*AnyFuture,
start: AnyFuture.Start,
) ConcurrentError!*AnyFuture),
/// This function is only called when `async` returns a non-null value.
///
/// Thread-safe.
await: *const fn (
await: @Restricted(*const fn (
/// Corresponds to `Io.userdata`.
userdata: ?*anyopaque,
/// The same value that was returned from `async`.
@@ -94,13 +94,13 @@ pub const VTable = struct {
/// The length is equal to size in bytes of result type.
result: []u8,
result_alignment: std.mem.Alignment,
) void,
) void),
/// Equivalent to `await` but initiates cancel request.
///
/// This function is only called when `async` returns a non-null value.
///
/// Thread-safe.
cancel: *const fn (
cancel: @Restricted(*const fn (
/// Corresponds to `Io.userdata`.
userdata: ?*anyopaque,
/// The same value that was returned from `async`.
@@ -109,14 +109,14 @@ pub const VTable = struct {
/// The length is equal to size in bytes of result type.
result: []u8,
result_alignment: std.mem.Alignment,
) void,
) void),
/// When this function returns, implementation guarantees that `start` has
/// either already been called, or a unit of concurrency has been assigned
/// to the task of calling the function.
///
/// Thread-safe.
groupAsync: *const fn (
groupAsync: @Restricted(*const fn (
/// Corresponds to `Io.userdata`.
userdata: ?*anyopaque,
/// Owner of the spawned async task.
@@ -124,10 +124,10 @@ pub const VTable = struct {
/// Copied and then passed to `start`.
context: []const u8,
context_alignment: std.mem.Alignment,
start: *const fn (context: *const anyopaque) void,
) void,
start: Group.Start,
) void),
/// Thread-safe.
groupConcurrent: *const fn (
groupConcurrent: @Restricted(*const fn (
/// Corresponds to `Io.userdata`.
userdata: ?*anyopaque,
/// Owner of the spawned async task.
@@ -135,124 +135,124 @@ pub const VTable = struct {
/// Copied and then passed to `start`.
context: []const u8,
context_alignment: std.mem.Alignment,
start: *const fn (context: *const anyopaque) void,
) ConcurrentError!void,
groupAwait: *const fn (?*anyopaque, *Group, token: *anyopaque) Cancelable!void,
groupCancel: *const fn (?*anyopaque, *Group, token: *anyopaque) void,
start: Group.Start,
) ConcurrentError!void),
groupAwait: @Restricted(*const fn (?*anyopaque, *Group, token: *anyopaque) Cancelable!void),
groupCancel: @Restricted(*const fn (?*anyopaque, *Group, token: *anyopaque) void),
recancel: *const fn (?*anyopaque) void,
swapCancelProtection: *const fn (?*anyopaque, new: CancelProtection) CancelProtection,
checkCancel: *const fn (?*anyopaque) Cancelable!void,
recancel: @Restricted(*const fn (?*anyopaque) void),
swapCancelProtection: @Restricted(*const fn (?*anyopaque, new: CancelProtection) CancelProtection),
checkCancel: @Restricted(*const fn (?*anyopaque) Cancelable!void),
futexWait: *const fn (?*anyopaque, ptr: *const u32, expected: u32, Timeout) Cancelable!void,
futexWaitUncancelable: *const fn (?*anyopaque, ptr: *const u32, expected: u32) void,
futexWake: *const fn (?*anyopaque, ptr: *const u32, max_waiters: u32) void,
futexWait: @Restricted(*const fn (?*anyopaque, ptr: *const u32, expected: u32, Timeout) Cancelable!void),
futexWaitUncancelable: @Restricted(*const fn (?*anyopaque, ptr: *const u32, expected: u32) void),
futexWake: @Restricted(*const fn (?*anyopaque, ptr: *const u32, max_waiters: u32) void),
operate: *const fn (?*anyopaque, Operation) Cancelable!Operation.Result,
batchAwaitAsync: *const fn (?*anyopaque, *Batch) Cancelable!void,
batchAwaitConcurrent: *const fn (?*anyopaque, *Batch, Timeout) Batch.AwaitConcurrentError!void,
batchCancel: *const fn (?*anyopaque, *Batch) void,
operate: @Restricted(*const fn (?*anyopaque, Operation) Cancelable!Operation.Result),
batchAwaitAsync: @Restricted(*const fn (?*anyopaque, *Batch) Cancelable!void),
batchAwaitConcurrent: @Restricted(*const fn (?*anyopaque, *Batch, Timeout) Batch.AwaitConcurrentError!void),
batchCancel: @Restricted(*const fn (?*anyopaque, *Batch) void),
dirCreateDir: *const fn (?*anyopaque, Dir, []const u8, Dir.Permissions) Dir.CreateDirError!void,
dirCreateDirPath: *const fn (?*anyopaque, Dir, []const u8, Dir.Permissions) Dir.CreateDirPathError!Dir.CreatePathStatus,
dirCreateDirPathOpen: *const fn (?*anyopaque, Dir, []const u8, Dir.Permissions, Dir.OpenOptions) Dir.CreateDirPathOpenError!Dir,
dirOpenDir: *const fn (?*anyopaque, Dir, []const u8, Dir.OpenOptions) Dir.OpenError!Dir,
dirStat: *const fn (?*anyopaque, Dir) Dir.StatError!Dir.Stat,
dirStatFile: *const fn (?*anyopaque, Dir, []const u8, Dir.StatFileOptions) Dir.StatFileError!File.Stat,
dirAccess: *const fn (?*anyopaque, Dir, []const u8, Dir.AccessOptions) Dir.AccessError!void,
dirCreateFile: *const fn (?*anyopaque, Dir, []const u8, Dir.CreateFileOptions) File.OpenError!File,
dirCreateFileAtomic: *const fn (?*anyopaque, Dir, []const u8, Dir.CreateFileAtomicOptions) Dir.CreateFileAtomicError!File.Atomic,
dirOpenFile: *const fn (?*anyopaque, Dir, []const u8, Dir.OpenFileOptions) File.OpenError!File,
dirClose: *const fn (?*anyopaque, []const Dir) void,
dirRead: *const fn (?*anyopaque, *Dir.Reader, []Dir.Entry) Dir.Reader.Error!usize,
dirRealPath: *const fn (?*anyopaque, Dir, out_buffer: []u8) Dir.RealPathError!usize,
dirRealPathFile: *const fn (?*anyopaque, Dir, path_name: []const u8, out_buffer: []u8) Dir.RealPathFileError!usize,
dirDeleteFile: *const fn (?*anyopaque, Dir, []const u8) Dir.DeleteFileError!void,
dirDeleteDir: *const fn (?*anyopaque, Dir, []const u8) Dir.DeleteDirError!void,
dirRename: *const fn (?*anyopaque, old_dir: Dir, old_sub_path: []const u8, new_dir: Dir, new_sub_path: []const u8) Dir.RenameError!void,
dirRenamePreserve: *const fn (?*anyopaque, old_dir: Dir, old_sub_path: []const u8, new_dir: Dir, new_sub_path: []const u8) Dir.RenamePreserveError!void,
dirSymLink: *const fn (?*anyopaque, Dir, target_path: []const u8, sym_link_path: []const u8, Dir.SymLinkFlags) Dir.SymLinkError!void,
dirReadLink: *const fn (?*anyopaque, Dir, sub_path: []const u8, buffer: []u8) Dir.ReadLinkError!usize,
dirSetOwner: *const fn (?*anyopaque, Dir, ?File.Uid, ?File.Gid) Dir.SetOwnerError!void,
dirSetFileOwner: *const fn (?*anyopaque, Dir, []const u8, ?File.Uid, ?File.Gid, Dir.SetFileOwnerOptions) Dir.SetFileOwnerError!void,
dirSetPermissions: *const fn (?*anyopaque, Dir, Dir.Permissions) Dir.SetPermissionsError!void,
dirSetFilePermissions: *const fn (?*anyopaque, Dir, []const u8, File.Permissions, Dir.SetFilePermissionsOptions) Dir.SetFilePermissionsError!void,
dirSetTimestamps: *const fn (?*anyopaque, Dir, []const u8, Dir.SetTimestampsOptions) Dir.SetTimestampsError!void,
dirHardLink: *const fn (?*anyopaque, old_dir: Dir, old_sub_path: []const u8, new_dir: Dir, new_sub_path: []const u8, Dir.HardLinkOptions) Dir.HardLinkError!void,
dirCreateDir: @Restricted(*const fn (?*anyopaque, Dir, []const u8, Dir.Permissions) Dir.CreateDirError!void),
dirCreateDirPath: @Restricted(*const fn (?*anyopaque, Dir, []const u8, Dir.Permissions) Dir.CreateDirPathError!Dir.CreatePathStatus),
dirCreateDirPathOpen: @Restricted(*const fn (?*anyopaque, Dir, []const u8, Dir.Permissions, Dir.OpenOptions) Dir.CreateDirPathOpenError!Dir),
dirOpenDir: @Restricted(*const fn (?*anyopaque, Dir, []const u8, Dir.OpenOptions) Dir.OpenError!Dir),
dirStat: @Restricted(*const fn (?*anyopaque, Dir) Dir.StatError!Dir.Stat),
dirStatFile: @Restricted(*const fn (?*anyopaque, Dir, []const u8, Dir.StatFileOptions) Dir.StatFileError!File.Stat),
dirAccess: @Restricted(*const fn (?*anyopaque, Dir, []const u8, Dir.AccessOptions) Dir.AccessError!void),
dirCreateFile: @Restricted(*const fn (?*anyopaque, Dir, []const u8, Dir.CreateFileOptions) File.OpenError!File),
dirCreateFileAtomic: @Restricted(*const fn (?*anyopaque, Dir, []const u8, Dir.CreateFileAtomicOptions) Dir.CreateFileAtomicError!File.Atomic),
dirOpenFile: @Restricted(*const fn (?*anyopaque, Dir, []const u8, Dir.OpenFileOptions) File.OpenError!File),
dirClose: @Restricted(*const fn (?*anyopaque, []const Dir) void),
dirRead: @Restricted(*const fn (?*anyopaque, *Dir.Reader, []Dir.Entry) Dir.Reader.Error!usize),
dirRealPath: @Restricted(*const fn (?*anyopaque, Dir, out_buffer: []u8) Dir.RealPathError!usize),
dirRealPathFile: @Restricted(*const fn (?*anyopaque, Dir, path_name: []const u8, out_buffer: []u8) Dir.RealPathFileError!usize),
dirDeleteFile: @Restricted(*const fn (?*anyopaque, Dir, []const u8) Dir.DeleteFileError!void),
dirDeleteDir: @Restricted(*const fn (?*anyopaque, Dir, []const u8) Dir.DeleteDirError!void),
dirRename: @Restricted(*const fn (?*anyopaque, old_dir: Dir, old_sub_path: []const u8, new_dir: Dir, new_sub_path: []const u8) Dir.RenameError!void),
dirRenamePreserve: @Restricted(*const fn (?*anyopaque, old_dir: Dir, old_sub_path: []const u8, new_dir: Dir, new_sub_path: []const u8) Dir.RenamePreserveError!void),
dirSymLink: @Restricted(*const fn (?*anyopaque, Dir, target_path: []const u8, sym_link_path: []const u8, Dir.SymLinkFlags) Dir.SymLinkError!void),
dirReadLink: @Restricted(*const fn (?*anyopaque, Dir, sub_path: []const u8, buffer: []u8) Dir.ReadLinkError!usize),
dirSetOwner: @Restricted(*const fn (?*anyopaque, Dir, ?File.Uid, ?File.Gid) Dir.SetOwnerError!void),
dirSetFileOwner: @Restricted(*const fn (?*anyopaque, Dir, []const u8, ?File.Uid, ?File.Gid, Dir.SetFileOwnerOptions) Dir.SetFileOwnerError!void),
dirSetPermissions: @Restricted(*const fn (?*anyopaque, Dir, Dir.Permissions) Dir.SetPermissionsError!void),
dirSetFilePermissions: @Restricted(*const fn (?*anyopaque, Dir, []const u8, File.Permissions, Dir.SetFilePermissionsOptions) Dir.SetFilePermissionsError!void),
dirSetTimestamps: @Restricted(*const fn (?*anyopaque, Dir, []const u8, Dir.SetTimestampsOptions) Dir.SetTimestampsError!void),
dirHardLink: @Restricted(*const fn (?*anyopaque, old_dir: Dir, old_sub_path: []const u8, new_dir: Dir, new_sub_path: []const u8, Dir.HardLinkOptions) Dir.HardLinkError!void),
fileStat: *const fn (?*anyopaque, File) File.StatError!File.Stat,
fileLength: *const fn (?*anyopaque, File) File.LengthError!u64,
fileClose: *const fn (?*anyopaque, []const File) void,
fileWritePositional: *const fn (?*anyopaque, File, header: []const u8, data: []const []const u8, splat: usize, offset: u64) File.WritePositionalError!usize,
fileWriteFileStreaming: *const fn (?*anyopaque, File, header: []const u8, *Io.File.Reader, Io.Limit) File.Writer.WriteFileError!usize,
fileWriteFilePositional: *const fn (?*anyopaque, File, header: []const u8, *Io.File.Reader, Io.Limit, offset: u64) File.WriteFilePositionalError!usize,
fileStat: @Restricted(*const fn (?*anyopaque, File) File.StatError!File.Stat),
fileLength: @Restricted(*const fn (?*anyopaque, File) File.LengthError!u64),
fileClose: @Restricted(*const fn (?*anyopaque, []const File) void),
fileWritePositional: @Restricted(*const fn (?*anyopaque, File, header: []const u8, data: []const []const u8, splat: usize, offset: u64) File.WritePositionalError!usize),
fileWriteFileStreaming: @Restricted(*const fn (?*anyopaque, File, header: []const u8, *Io.File.Reader, Io.Limit) File.Writer.WriteFileError!usize),
fileWriteFilePositional: @Restricted(*const fn (?*anyopaque, File, header: []const u8, *Io.File.Reader, Io.Limit, offset: u64) File.WriteFilePositionalError!usize),
/// Returns 0 if reading at or past the end.
fileReadPositional: *const fn (?*anyopaque, File, data: []const []u8, offset: u64) File.ReadPositionalError!usize,
fileSeekBy: *const fn (?*anyopaque, File, relative_offset: i64) File.SeekError!void,
fileSeekTo: *const fn (?*anyopaque, File, absolute_offset: u64) File.SeekError!void,
fileSync: *const fn (?*anyopaque, File) File.SyncError!void,
fileIsTty: *const fn (?*anyopaque, File) Cancelable!bool,
fileEnableAnsiEscapeCodes: *const fn (?*anyopaque, File) File.EnableAnsiEscapeCodesError!void,
fileSupportsAnsiEscapeCodes: *const fn (?*anyopaque, File) Cancelable!bool,
fileSetLength: *const fn (?*anyopaque, File, u64) File.SetLengthError!void,
fileSetOwner: *const fn (?*anyopaque, File, ?File.Uid, ?File.Gid) File.SetOwnerError!void,
fileSetPermissions: *const fn (?*anyopaque, File, File.Permissions) File.SetPermissionsError!void,
fileSetTimestamps: *const fn (?*anyopaque, File, File.SetTimestampsOptions) File.SetTimestampsError!void,
fileLock: *const fn (?*anyopaque, File, File.Lock) File.LockError!void,
fileTryLock: *const fn (?*anyopaque, File, File.Lock) File.LockError!bool,
fileUnlock: *const fn (?*anyopaque, File) void,
fileDowngradeLock: *const fn (?*anyopaque, File) File.DowngradeLockError!void,
fileRealPath: *const fn (?*anyopaque, File, out_buffer: []u8) File.RealPathError!usize,
fileHardLink: *const fn (?*anyopaque, File, Dir, []const u8, File.HardLinkOptions) File.HardLinkError!void,
fileReadPositional: @Restricted(*const fn (?*anyopaque, File, data: []const []u8, offset: u64) File.ReadPositionalError!usize),
fileSeekBy: @Restricted(*const fn (?*anyopaque, File, relative_offset: i64) File.SeekError!void),
fileSeekTo: @Restricted(*const fn (?*anyopaque, File, absolute_offset: u64) File.SeekError!void),
fileSync: @Restricted(*const fn (?*anyopaque, File) File.SyncError!void),
fileIsTty: @Restricted(*const fn (?*anyopaque, File) Cancelable!bool),
fileEnableAnsiEscapeCodes: @Restricted(*const fn (?*anyopaque, File) File.EnableAnsiEscapeCodesError!void),
fileSupportsAnsiEscapeCodes: @Restricted(*const fn (?*anyopaque, File) Cancelable!bool),
fileSetLength: @Restricted(*const fn (?*anyopaque, File, u64) File.SetLengthError!void),
fileSetOwner: @Restricted(*const fn (?*anyopaque, File, ?File.Uid, ?File.Gid) File.SetOwnerError!void),
fileSetPermissions: @Restricted(*const fn (?*anyopaque, File, File.Permissions) File.SetPermissionsError!void),
fileSetTimestamps: @Restricted(*const fn (?*anyopaque, File, File.SetTimestampsOptions) File.SetTimestampsError!void),
fileLock: @Restricted(*const fn (?*anyopaque, File, File.Lock) File.LockError!void),
fileTryLock: @Restricted(*const fn (?*anyopaque, File, File.Lock) File.LockError!bool),
fileUnlock: @Restricted(*const fn (?*anyopaque, File) void),
fileDowngradeLock: @Restricted(*const fn (?*anyopaque, File) File.DowngradeLockError!void),
fileRealPath: @Restricted(*const fn (?*anyopaque, File, out_buffer: []u8) File.RealPathError!usize),
fileHardLink: @Restricted(*const fn (?*anyopaque, File, Dir, []const u8, File.HardLinkOptions) File.HardLinkError!void),
fileMemoryMapCreate: *const fn (?*anyopaque, File, File.MemoryMap.CreateOptions) File.MemoryMap.CreateError!File.MemoryMap,
fileMemoryMapDestroy: *const fn (?*anyopaque, *File.MemoryMap) void,
fileMemoryMapSetLength: *const fn (?*anyopaque, *File.MemoryMap, usize) File.MemoryMap.SetLengthError!void,
fileMemoryMapRead: *const fn (?*anyopaque, *File.MemoryMap) File.ReadPositionalError!void,
fileMemoryMapWrite: *const fn (?*anyopaque, *File.MemoryMap) File.WritePositionalError!void,
fileMemoryMapCreate: @Restricted(*const fn (?*anyopaque, File, File.MemoryMap.CreateOptions) File.MemoryMap.CreateError!File.MemoryMap),
fileMemoryMapDestroy: @Restricted(*const fn (?*anyopaque, *File.MemoryMap) void),
fileMemoryMapSetLength: @Restricted(*const fn (?*anyopaque, *File.MemoryMap, usize) File.MemoryMap.SetLengthError!void),
fileMemoryMapRead: @Restricted(*const fn (?*anyopaque, *File.MemoryMap) File.ReadPositionalError!void),
fileMemoryMapWrite: @Restricted(*const fn (?*anyopaque, *File.MemoryMap) File.WritePositionalError!void),
processExecutableOpen: *const fn (?*anyopaque, Dir.OpenFileOptions) std.process.OpenExecutableError!File,
processExecutablePath: *const fn (?*anyopaque, buffer: []u8) std.process.ExecutablePathError!usize,
lockStderr: *const fn (?*anyopaque, ?Terminal.Mode) Cancelable!LockedStderr,
tryLockStderr: *const fn (?*anyopaque, ?Terminal.Mode) Cancelable!?LockedStderr,
unlockStderr: *const fn (?*anyopaque) void,
processCurrentPath: *const fn (?*anyopaque, buffer: []u8) std.process.CurrentPathError!usize,
processSetCurrentDir: *const fn (?*anyopaque, Dir) std.process.SetCurrentDirError!void,
processSetCurrentPath: *const fn (?*anyopaque, []const u8) std.process.SetCurrentPathError!void,
processReplace: *const fn (?*anyopaque, std.process.ReplaceOptions) std.process.ReplaceError,
processReplacePath: *const fn (?*anyopaque, Dir, std.process.ReplaceOptions) std.process.ReplaceError,
processSpawn: *const fn (?*anyopaque, std.process.SpawnOptions) std.process.SpawnError!std.process.Child,
processSpawnPath: *const fn (?*anyopaque, Dir, std.process.SpawnOptions) std.process.SpawnError!std.process.Child,
childWait: *const fn (?*anyopaque, *std.process.Child) std.process.Child.WaitError!std.process.Child.Term,
childKill: *const fn (?*anyopaque, *std.process.Child) void,
processExecutableOpen: @Restricted(*const fn (?*anyopaque, Dir.OpenFileOptions) std.process.OpenExecutableError!File),
processExecutablePath: @Restricted(*const fn (?*anyopaque, buffer: []u8) std.process.ExecutablePathError!usize),
lockStderr: @Restricted(*const fn (?*anyopaque, ?Terminal.Mode) Cancelable!LockedStderr),
tryLockStderr: @Restricted(*const fn (?*anyopaque, ?Terminal.Mode) Cancelable!?LockedStderr),
unlockStderr: @Restricted(*const fn (?*anyopaque) void),
processCurrentPath: @Restricted(*const fn (?*anyopaque, buffer: []u8) std.process.CurrentPathError!usize),
processSetCurrentDir: @Restricted(*const fn (?*anyopaque, Dir) std.process.SetCurrentDirError!void),
processSetCurrentPath: @Restricted(*const fn (?*anyopaque, []const u8) std.process.SetCurrentPathError!void),
processReplace: @Restricted(*const fn (?*anyopaque, std.process.ReplaceOptions) std.process.ReplaceError),
processReplacePath: @Restricted(*const fn (?*anyopaque, Dir, std.process.ReplaceOptions) std.process.ReplaceError),
processSpawn: @Restricted(*const fn (?*anyopaque, std.process.SpawnOptions) std.process.SpawnError!std.process.Child),
processSpawnPath: @Restricted(*const fn (?*anyopaque, Dir, std.process.SpawnOptions) std.process.SpawnError!std.process.Child),
childWait: @Restricted(*const fn (?*anyopaque, *std.process.Child) std.process.Child.WaitError!std.process.Child.Term),
childKill: @Restricted(*const fn (?*anyopaque, *std.process.Child) void),
progressParentFile: *const fn (?*anyopaque) std.Progress.ParentFileError!File,
progressParentFile: @Restricted(*const fn (?*anyopaque) std.Progress.ParentFileError!File),
now: *const fn (?*anyopaque, Clock) Timestamp,
clockResolution: *const fn (?*anyopaque, Clock) Clock.ResolutionError!Duration,
sleep: *const fn (?*anyopaque, Timeout) Cancelable!void,
now: @Restricted(*const fn (?*anyopaque, Clock) Timestamp),
clockResolution: @Restricted(*const fn (?*anyopaque, Clock) Clock.ResolutionError!Duration),
sleep: @Restricted(*const fn (?*anyopaque, Timeout) Cancelable!void),
random: *const fn (?*anyopaque, buffer: []u8) void,
randomSecure: *const fn (?*anyopaque, buffer: []u8) RandomSecureError!void,
random: @Restricted(*const fn (?*anyopaque, buffer: []u8) void),
randomSecure: @Restricted(*const fn (?*anyopaque, buffer: []u8) RandomSecureError!void),
netListenIp: *const fn (?*anyopaque, address: *const net.IpAddress, net.IpAddress.ListenOptions) net.IpAddress.ListenError!net.Socket,
netAccept: *const fn (?*anyopaque, server: net.Socket.Handle, options: net.Server.AcceptOptions) net.Server.AcceptError!net.Socket,
netBindIp: *const fn (?*anyopaque, address: *const net.IpAddress, options: net.IpAddress.BindOptions) net.IpAddress.BindError!net.Socket,
netConnectIp: *const fn (?*anyopaque, address: *const net.IpAddress, options: net.IpAddress.ConnectOptions) net.IpAddress.ConnectError!net.Socket,
netListenUnix: *const fn (?*anyopaque, *const net.UnixAddress, net.UnixAddress.ListenOptions) net.UnixAddress.ListenError!net.Socket.Handle,
netConnectUnix: *const fn (?*anyopaque, *const net.UnixAddress) net.UnixAddress.ConnectError!net.Socket.Handle,
netSocketCreatePair: *const fn (?*anyopaque, net.Socket.CreatePairOptions) net.Socket.CreatePairError![2]net.Socket,
netSend: *const fn (?*anyopaque, net.Socket.Handle, []net.OutgoingMessage, net.SendFlags) struct { ?net.Socket.SendError, usize },
netWrite: *const fn (?*anyopaque, dest: net.Socket.Handle, header: []const u8, data: []const []const u8, splat: usize) net.Stream.Writer.Error!usize,
netWriteFile: *const fn (?*anyopaque, net.Socket.Handle, header: []const u8, *Io.File.Reader, Io.Limit) net.Stream.Writer.WriteFileError!usize,
netClose: *const fn (?*anyopaque, handle: []const net.Socket.Handle) void,
netShutdown: *const fn (?*anyopaque, handle: net.Socket.Handle, how: net.ShutdownHow) net.ShutdownError!void,
netInterfaceNameResolve: *const fn (?*anyopaque, *const net.Interface.Name) net.Interface.Name.ResolveError!net.Interface,
netInterfaceName: *const fn (?*anyopaque, net.Interface) net.Interface.NameError!net.Interface.Name,
netLookup: *const fn (?*anyopaque, net.HostName, *Queue(net.HostName.LookupResult), net.HostName.LookupOptions) net.HostName.LookupError!void,
netListenIp: @Restricted(*const fn (?*anyopaque, address: *const net.IpAddress, net.IpAddress.ListenOptions) net.IpAddress.ListenError!net.Socket),
netAccept: @Restricted(*const fn (?*anyopaque, server: net.Socket.Handle, options: net.Server.AcceptOptions) net.Server.AcceptError!net.Socket),
netBindIp: @Restricted(*const fn (?*anyopaque, address: *const net.IpAddress, options: net.IpAddress.BindOptions) net.IpAddress.BindError!net.Socket),
netConnectIp: @Restricted(*const fn (?*anyopaque, address: *const net.IpAddress, options: net.IpAddress.ConnectOptions) net.IpAddress.ConnectError!net.Socket),
netListenUnix: @Restricted(*const fn (?*anyopaque, *const net.UnixAddress, net.UnixAddress.ListenOptions) net.UnixAddress.ListenError!net.Socket.Handle),
netConnectUnix: @Restricted(*const fn (?*anyopaque, *const net.UnixAddress) net.UnixAddress.ConnectError!net.Socket.Handle),
netSocketCreatePair: @Restricted(*const fn (?*anyopaque, net.Socket.CreatePairOptions) net.Socket.CreatePairError![2]net.Socket),
netSend: @Restricted(*const fn (?*anyopaque, net.Socket.Handle, []net.OutgoingMessage, net.SendFlags) struct { ?net.Socket.SendError, usize }),
netWrite: @Restricted(*const fn (?*anyopaque, dest: net.Socket.Handle, header: []const u8, data: []const []const u8, splat: usize) net.Stream.Writer.Error!usize),
netWriteFile: @Restricted(*const fn (?*anyopaque, net.Socket.Handle, header: []const u8, *Io.File.Reader, Io.Limit) net.Stream.Writer.WriteFileError!usize),
netClose: @Restricted(*const fn (?*anyopaque, handle: []const net.Socket.Handle) void),
netShutdown: @Restricted(*const fn (?*anyopaque, handle: net.Socket.Handle, how: net.ShutdownHow) net.ShutdownError!void),
netInterfaceNameResolve: @Restricted(*const fn (?*anyopaque, *const net.Interface.Name) net.Interface.Name.ResolveError!net.Interface),
netInterfaceName: @Restricted(*const fn (?*anyopaque, net.Interface) net.Interface.NameError!net.Interface.Name),
netLookup: @Restricted(*const fn (?*anyopaque, net.HostName, *Queue(net.HostName.LookupResult), net.HostName.LookupOptions) net.HostName.LookupError!void),
};
pub const Operation = union(enum) {
pub const Operation = union(@Restricted(Tag)) {
file_read_streaming: FileReadStreaming,
file_write_streaming: FileWriteStreaming,
/// On Windows this is NtDeviceIoControlFile. On POSIX this is ioctl. On
@@ -261,7 +261,13 @@ pub const Operation = union(enum) {
net_receive: NetReceive,
net_read: NetRead,
pub const Tag = @typeInfo(Operation).@"union".tag_type.?;
pub const Tag = enum {
file_read_streaming,
file_write_streaming,
device_io_control,
net_receive,
net_read,
};
/// May return 0 reads which is different than `error.EndOfStream`.
pub const FileReadStreaming = struct {
@@ -1187,7 +1193,9 @@ pub const Timeout = union(enum) {
}
};
pub const AnyFuture = opaque {};
pub const AnyFuture = opaque {
pub const Start = @Restricted(*const fn (context: *const anyopaque, result: *anyopaque) void);
};
pub fn Future(Result: type) type {
return struct {
@@ -1243,6 +1251,8 @@ pub const Group = struct {
pub const init: Group = .{ .token = .init(null), .state = 0 };
pub const Start = @Restricted(*const fn (context: *const anyopaque) void);
/// Equivalent to `Io.async`, except the task is spawned in this `Group`
/// instead of becoming associated with a `Future`.
///
@@ -2711,7 +2721,7 @@ pub fn noCrashHandler(userdata: ?*anyopaque) void {
_ = userdata;
}
pub fn noAsync(userdata: ?*anyopaque, result: []u8, result_alignment: std.mem.Alignment, context: []const u8, context_alignment: std.mem.Alignment, start: *const fn (context: *const anyopaque, result: *anyopaque) void) ?*AnyFuture {
pub fn noAsync(userdata: ?*anyopaque, result: []u8, result_alignment: std.mem.Alignment, context: []const u8, context_alignment: std.mem.Alignment, start: AnyFuture.Start) ?*AnyFuture {
_ = userdata;
_ = result_alignment;
_ = context_alignment;
@@ -2725,7 +2735,7 @@ pub fn failingConcurrent(
result_alignment: std.mem.Alignment,
context: []const u8,
context_alignment: std.mem.Alignment,
start: *const fn (context: *const anyopaque, result: *anyopaque) void,
start: AnyFuture.Start,
) ConcurrentError!*AnyFuture {
_ = userdata;
_ = result_len;
@@ -2767,7 +2777,7 @@ pub fn noGroupAsync(
group: *Group,
context: []const u8,
context_alignment: std.mem.Alignment,
start: *const fn (context: *const anyopaque) void,
start: Group.Start,
) void {
_ = userdata;
_ = group;
@@ -2780,7 +2790,7 @@ pub fn failingGroupConcurrent(
group: *Group,
context: []const u8,
context_alignment: std.mem.Alignment,
start: *const fn (context: *const anyopaque) void,
start: Group.Start,
) ConcurrentError!void {
_ = userdata;
_ = group;
+10 -10
View File
@@ -986,7 +986,7 @@ fn crashHandler(userdata: ?*anyopaque) void {
const AsyncClosure = struct {
evented: *Evented,
fiber: *Fiber,
start: *const fn (context: *const anyopaque, result: *anyopaque) void,
start: Io.AnyFuture.Start,
result_align: Alignment,
fn fromFiber(fiber: *Fiber) *AsyncClosure {
@@ -1038,8 +1038,8 @@ fn async(
result_alignment: Alignment,
context: []const u8,
context_alignment: Alignment,
start: *const fn (context: *const anyopaque, result: *anyopaque) void,
) ?*std.Io.AnyFuture {
start: Io.AnyFuture.Start,
) ?*Io.AnyFuture {
const ev: *Evented = @ptrCast(@alignCast(userdata));
return concurrent(ev, result.len, result_alignment, context, context_alignment, start) catch {
start(context.ptr, result.ptr);
@@ -1053,8 +1053,8 @@ fn concurrent(
result_alignment: Alignment,
context: []const u8,
context_alignment: Alignment,
start: *const fn (context: *const anyopaque, result: *anyopaque) void,
) Io.ConcurrentError!*std.Io.AnyFuture {
start: Io.AnyFuture.Start,
) Io.ConcurrentError!*Io.AnyFuture {
assert(result_alignment.compare(.lte, Fiber.max_result_align)); // TODO
assert(context_alignment.compare(.lte, Fiber.max_context_align)); // TODO
assert(result_len <= Fiber.max_result_size); // TODO
@@ -1101,7 +1101,7 @@ fn concurrent(
fn await(
userdata: ?*anyopaque,
future: *std.Io.AnyFuture,
future: *Io.AnyFuture,
result: []u8,
result_alignment: Alignment,
) void {
@@ -1115,7 +1115,7 @@ fn await(
fn cancel(
userdata: ?*anyopaque,
future: *std.Io.AnyFuture,
future: *Io.AnyFuture,
result: []u8,
result_alignment: Alignment,
) void {
@@ -1329,7 +1329,7 @@ const Group = struct {
evented: *Evented,
group: Group,
fiber: *Fiber,
start: *const fn (context: *const anyopaque) void,
start: Io.Group.Start,
fn fromFiber(fiber: *Fiber) *Group.AsyncClosure {
return @ptrFromInt(Fiber.max_context_align.max(.of(Group.AsyncClosure)).backward(
@@ -1381,7 +1381,7 @@ fn groupAsync(
type_erased: *Io.Group,
context: []const u8,
context_alignment: Alignment,
start: *const fn (context: *const anyopaque) void,
start: Io.Group.Start,
) void {
const ev: *Evented = @ptrCast(@alignCast(userdata));
return groupConcurrent(ev, type_erased, context, context_alignment, start) catch {
@@ -1394,7 +1394,7 @@ fn groupConcurrent(
type_erased: *Io.Group,
context: []const u8,
context_alignment: Alignment,
start: *const fn (context: *const anyopaque) void,
start: Io.Group.Start,
) Io.ConcurrentError!void {
assert(context_alignment.compare(.lte, Fiber.max_context_align)); // TODO
assert(context.len <= Fiber.max_context_size); // TODO
+11 -11
View File
@@ -3,15 +3,15 @@ const builtin = @import("builtin");
const std = @import("../std.zig");
const Io = std.Io;
const Dir = std.Io.Dir;
const File = std.Io.File;
const net = std.Io.net;
const Dir = Io.Dir;
const File = Io.File;
const net = Io.net;
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
const Alignment = std.mem.Alignment;
const IpAddress = std.Io.net.IpAddress;
const errnoBug = std.Io.Threaded.errnoBug;
const closeFd = std.Io.Threaded.closeFd;
const IpAddress = Io.net.IpAddress;
const errnoBug = Io.Threaded.errnoBug;
const closeFd = Io.Threaded.closeFd;
const posix = std.posix;
const posixSocketModeProtocol = Io.Threaded.posixSocketModeProtocol;
@@ -577,7 +577,7 @@ fn fiberEntry() callconv(.naked) void {
const AsyncClosure = struct {
kqueue: *Kqueue,
fiber: *Fiber,
start: *const fn (context: *const anyopaque, result: *anyopaque) void,
start: Io.AnyFuture.Start,
result_align: Alignment,
already_awaited: bool,
@@ -669,7 +669,7 @@ fn async(
result_alignment: std.mem.Alignment,
context: []const u8,
context_alignment: std.mem.Alignment,
start: *const fn (context: *const anyopaque, result: *anyopaque) void,
start: Io.AnyFuture.Start,
) ?*Io.AnyFuture {
return concurrent(userdata, result.len, result_alignment, context, context_alignment, start) catch {
start(context.ptr, result.ptr);
@@ -683,7 +683,7 @@ fn concurrent(
result_alignment: Alignment,
context: []const u8,
context_alignment: Alignment,
start: *const fn (context: *const anyopaque, result: *anyopaque) void,
start: Io.AnyFuture.Start,
) Io.ConcurrentError!*Io.AnyFuture {
const k: *Kqueue = @ptrCast(@alignCast(userdata));
assert(result_alignment.compare(.lte, Fiber.max_result_align)); // TODO
@@ -767,7 +767,7 @@ fn groupAsync(
type_erased: *Io.Group,
context: []const u8,
context_alignment: Alignment,
start: *const fn (context: *const anyopaque) void,
start: Io.Group.Start,
) void {
const k: *Kqueue = @ptrCast(@alignCast(userdata));
_ = k;
@@ -783,7 +783,7 @@ fn groupConcurrent(
type_erased: *Io.Group,
context: []const u8,
context_alignment: Alignment,
start: *const fn (context: *const anyopaque) void,
start: Io.Group.Start,
) Io.ConcurrentError!void {
const k: *Kqueue = @ptrCast(@alignCast(userdata));
_ = k;
+4 -4
View File
@@ -43,7 +43,7 @@ pub const VTable = struct {
/// choose to store data in `buffer`, modifying `seek` and `end`
/// accordingly. Implementations are encouraged to take advantage of
/// this if it simplifies the logic.
stream: *const fn (r: *Reader, w: *Writer, limit: Limit) StreamError!usize,
stream: @Restricted(*const fn (r: *Reader, w: *Writer, limit: Limit) StreamError!usize),
/// Consumes bytes from the internally tracked stream position without
/// providing access to them.
@@ -64,7 +64,7 @@ pub const VTable = struct {
/// data.
///
/// This function is only called when `buffer` is empty.
discard: *const fn (r: *Reader, limit: Limit) Error!usize = defaultDiscard,
discard: @Restricted(*const fn (r: *Reader, limit: Limit) Error!usize) = defaultDiscard,
/// Returns number of bytes written to `data`.
///
@@ -84,7 +84,7 @@ pub const VTable = struct {
///
/// The default implementation calls `stream` with either `data[0]` or
/// `Reader.buffer`, whichever is bigger.
readVec: *const fn (r: *Reader, data: [][]u8) Error!usize = defaultReadVec,
readVec: @Restricted(*const fn (r: *Reader, data: [][]u8) Error!usize) = defaultReadVec,
/// Ensures `capacity` data can be buffered without rebasing.
///
@@ -96,7 +96,7 @@ pub const VTable = struct {
///
/// The default implementation moves buffered data to the start of
/// `buffer`, setting `seek` to zero, and cannot fail.
rebase: *const fn (r: *Reader, capacity: usize) RebaseError!void = defaultRebase,
rebase: @Restricted(*const fn (r: *Reader, capacity: usize) RebaseError!void) = defaultRebase,
};
pub const StreamError = error{
+14 -17
View File
@@ -449,7 +449,7 @@ const default_fn_align = switch (builtin.mode) {
const Runnable = struct {
node: std.SinglyLinkedList.Node,
startFn: *const fn (*Runnable, *Thread, *Threaded) void,
startFn: @Restricted(*const fn (*Runnable, *Thread, *Threaded) void),
};
const Group = struct {
@@ -484,7 +484,7 @@ const Group = struct {
const Task = struct {
runnable: Runnable,
group: *Io.Group,
func: *const fn (context: *const anyopaque) void,
startFn: Io.Group.Start,
context_alignment: Alignment,
alloc_len: usize,
@@ -494,7 +494,7 @@ const Group = struct {
group: Group,
context: []const u8,
context_alignment: Alignment,
func: *const fn (context: *const anyopaque) void,
startFn: Io.Group.Start,
) Allocator.Error!*Task {
const max_context_misalignment = context_alignment.toByteUnits() -| @alignOf(Task);
const worst_case_context_offset = context_alignment.forward(@sizeOf(Task) + max_context_misalignment);
@@ -509,7 +509,7 @@ const Group = struct {
.startFn = &start,
},
.group = group.ptr,
.func = func,
.startFn = startFn,
.context_alignment = context_alignment,
.alloc_len = alloc_len,
};
@@ -549,7 +549,7 @@ const Group = struct {
}, .monotonic);
}
task.func(task.contextPointer());
task.startFn(task.contextPointer());
thread.status.store(.{ .cancelation = .none, .awaitable = .null }, .monotonic);
const old_status = group.status().fetchSub(.{
@@ -631,7 +631,7 @@ const Group = struct {
/// 2. result
const Future = struct {
runnable: Runnable,
func: *const fn (context: *const anyopaque, result: *anyopaque) void,
startFn: Io.AnyFuture.Start,
status: std.atomic.Value(Status),
/// On completion, increment this `u32` and do a futex wake on it.
awaiter: *std.atomic.Value(u32),
@@ -666,7 +666,7 @@ const Future = struct {
result_alignment: Alignment,
context: []const u8,
context_alignment: Alignment,
func: *const fn (context: *const anyopaque, result: *anyopaque) void,
startFn: Io.AnyFuture.Start,
) Allocator.Error!*Future {
const max_context_misalignment = context_alignment.toByteUnits() -| @alignOf(Future);
const worst_case_context_offset = context_alignment.forward(@sizeOf(Future) + max_context_misalignment);
@@ -684,7 +684,7 @@ const Future = struct {
.node = undefined,
.startFn = &start,
},
.func = func,
.startFn = startFn,
.status = .init(.{
.tag = .pending,
.thread = .null,
@@ -738,7 +738,7 @@ const Future = struct {
}
}
future.func(future.contextPointer(), future.resultPointer());
future.startFn(future.contextPointer(), future.resultPointer());
const had_acknowledged_cancel = switch (thread.status.load(.monotonic).cancelation) {
.none, .canceling => false,
@@ -2073,7 +2073,7 @@ fn async(
result_alignment: Alignment,
context: []const u8,
context_alignment: Alignment,
start: *const fn (context: *const anyopaque, result: *anyopaque) void,
start: Io.AnyFuture.Start,
) ?*Io.AnyFuture {
const t: *Threaded = @ptrCast(@alignCast(userdata));
if (builtin.single_threaded) {
@@ -2129,7 +2129,7 @@ fn concurrent(
result_alignment: Alignment,
context: []const u8,
context_alignment: Alignment,
start: *const fn (context: *const anyopaque, result: *anyopaque) void,
start: Io.AnyFuture.Start,
) Io.ConcurrentError!*Io.AnyFuture {
if (builtin.single_threaded) return error.ConcurrencyUnavailable;
@@ -2174,7 +2174,7 @@ fn groupAsync(
type_erased: *Io.Group,
context: []const u8,
context_alignment: Alignment,
start: *const fn (context: *const anyopaque) void,
start: Io.Group.Start,
) void {
const t: *Threaded = @ptrCast(@alignCast(userdata));
const g: Group = .{ .ptr = type_erased };
@@ -2224,10 +2224,7 @@ fn groupAsync(
mutexUnlock(&t.mutex);
condSignal(&t.cond);
}
fn groupAsyncEager(
start: *const fn (context: *const anyopaque) void,
context: *const anyopaque,
) void {
fn groupAsyncEager(start: Io.Group.Start, context: *const anyopaque) void {
start(context);
}
@@ -2236,7 +2233,7 @@ fn groupConcurrent(
type_erased: *Io.Group,
context: []const u8,
context_alignment: Alignment,
start: *const fn (context: *const anyopaque) void,
start: Io.Group.Start,
) Io.ConcurrentError!void {
if (builtin.single_threaded) return error.ConcurrencyUnavailable;
+10 -10
View File
@@ -1375,7 +1375,7 @@ fn crashHandler(userdata: ?*anyopaque) void {
const AsyncClosure = struct {
evented: *Evented,
fiber: *Fiber,
start: *const fn (context: *const anyopaque, result: *anyopaque) void,
start: Io.AnyFuture.Start,
result_align: Alignment,
fn fromFiber(fiber: *Fiber) *AsyncClosure {
@@ -1431,8 +1431,8 @@ fn async(
result_alignment: Alignment,
context: []const u8,
context_alignment: Alignment,
start: *const fn (context: *const anyopaque, result: *anyopaque) void,
) ?*std.Io.AnyFuture {
start: Io.AnyFuture.Start,
) ?*Io.AnyFuture {
const ev: *Evented = @ptrCast(@alignCast(userdata));
return concurrent(ev, result.len, result_alignment, context, context_alignment, start) catch {
start(context.ptr, result.ptr);
@@ -1446,8 +1446,8 @@ fn concurrent(
result_alignment: Alignment,
context: []const u8,
context_alignment: Alignment,
start: *const fn (context: *const anyopaque, result: *anyopaque) void,
) Io.ConcurrentError!*std.Io.AnyFuture {
start: Io.AnyFuture.Start,
) Io.ConcurrentError!*Io.AnyFuture {
assert(result_alignment.compare(.lte, Fiber.max_result_align)); // TODO
assert(context_alignment.compare(.lte, Fiber.max_context_align)); // TODO
assert(result_len <= Fiber.max_result_size); // TODO
@@ -1510,7 +1510,7 @@ fn concurrent(
fn await(
userdata: ?*anyopaque,
future: *std.Io.AnyFuture,
future: *Io.AnyFuture,
result: []u8,
result_alignment: Alignment,
) void {
@@ -1524,7 +1524,7 @@ fn await(
fn cancel(
userdata: ?*anyopaque,
future: *std.Io.AnyFuture,
future: *Io.AnyFuture,
result: []u8,
result_alignment: Alignment,
) void {
@@ -1739,7 +1739,7 @@ const Group = struct {
evented: *Evented,
group: Group,
fiber: *Fiber,
start: *const fn (context: *const anyopaque) void,
start: Group.Start,
fn fromFiber(fiber: *Fiber) *Group.AsyncClosure {
return @ptrFromInt(Fiber.max_context_align.max(.of(Group.AsyncClosure)).backward(
@@ -1797,7 +1797,7 @@ fn groupAsync(
type_erased: *Io.Group,
context: []const u8,
context_alignment: Alignment,
start: *const fn (context: *const anyopaque) void,
start: Group.Start,
) void {
const ev: *Evented = @ptrCast(@alignCast(userdata));
return groupConcurrent(ev, type_erased, context, context_alignment, start) catch {
@@ -1810,7 +1810,7 @@ fn groupConcurrent(
type_erased: *Io.Group,
context: []const u8,
context_alignment: Alignment,
start: *const fn (context: *const anyopaque) void,
start: Group.Start,
) Io.ConcurrentError!void {
assert(context_alignment.compare(.lte, Fiber.max_context_align)); // TODO
assert(context.len <= Fiber.max_context_size); // TODO
+5 -5
View File
@@ -43,7 +43,7 @@ pub const VTable = struct {
/// Number of bytes returned may be zero, which does not indicate stream
/// end. A subsequent call may return nonzero, or signal end of stream via
/// `error.WriteFailed`.
drain: *const fn (w: *Writer, data: []const []const u8, splat: usize) Error!usize,
drain: @Restricted(*const fn (w: *Writer, data: []const []const u8, splat: usize) Error!usize),
/// Copies contents from an open file to the logical sink. `buffer[0..end]`
/// is consumed first, followed by `limit` bytes from `file_reader`.
@@ -60,14 +60,14 @@ pub const VTable = struct {
///
/// `error.Unimplemented` indicates the callee cannot offer a more
/// efficient implementation than the caller performing its own reads.
sendFile: *const fn (
sendFile: @Restricted(*const fn (
w: *Writer,
file_reader: *File.Reader,
/// Maximum amount of bytes to read from the file. Implementations may
/// assume that the file size does not exceed this amount. Data from
/// `buffer` does not count towards this limit.
limit: Limit,
) FileError!usize = unimplementedSendFile,
) FileError!usize) = unimplementedSendFile,
/// Consumes all remaining buffer.
///
@@ -77,7 +77,7 @@ pub const VTable = struct {
///
/// There may be subsequent calls to `drain` and `sendFile` after a `flush`
/// operation.
flush: *const fn (w: *Writer) Error!void = defaultFlush,
flush: @Restricted(*const fn (w: *Writer) Error!void) = defaultFlush,
/// Ensures `capacity` more bytes can be buffered without rebasing.
///
@@ -85,7 +85,7 @@ pub const VTable = struct {
///
/// Only called when `capacity` bytes cannot fit into the unused capacity
/// of `buffer`.
rebase: *const fn (w: *Writer, preserve: usize, capacity: usize) Error!void = defaultRebase,
rebase: @Restricted(*const fn (w: *Writer, preserve: usize, capacity: usize) Error!void) = defaultRebase,
};
pub const Error = error{
+1 -1
View File
@@ -772,7 +772,7 @@ pub const BodyWriter = struct {
};
pub fn isEliding(w: *const BodyWriter) bool {
return w.writer.vtable.drain == elidingDrain;
return @as(*const fn (*Writer, []const []const u8, usize) Writer.Error!usize, w.writer.vtable.drain) == elidingDrain;
}
/// Sends all buffered data across `BodyWriter.http_protocol_output`.
+4 -4
View File
@@ -26,7 +26,7 @@ pub const VTable = struct {
/// `ret_addr` is optionally provided as the first return address of the
/// allocation call stack. If the value is `0` it means no return address
/// has been provided.
alloc: *const fn (*anyopaque, len: usize, alignment: Alignment, ret_addr: usize) ?[*]u8,
alloc: @Restricted(*const fn (*anyopaque, len: usize, alignment: Alignment, ret_addr: usize) ?[*]u8),
/// Attempt to expand or shrink memory in place.
///
@@ -45,7 +45,7 @@ pub const VTable = struct {
/// `ret_addr` is optionally provided as the first return address of the
/// allocation call stack. If the value is `0` it means no return address
/// has been provided.
resize: *const fn (*anyopaque, memory: []u8, alignment: Alignment, new_len: usize, ret_addr: usize) bool,
resize: @Restricted(*const fn (*anyopaque, memory: []u8, alignment: Alignment, new_len: usize, ret_addr: usize) bool),
/// Attempt to expand or shrink memory, allowing relocation.
///
@@ -66,7 +66,7 @@ pub const VTable = struct {
/// `ret_addr` is optionally provided as the first return address of the
/// allocation call stack. If the value is `0` it means no return address
/// has been provided.
remap: *const fn (*anyopaque, memory: []u8, alignment: Alignment, new_len: usize, ret_addr: usize) ?[*]u8,
remap: @Restricted(*const fn (*anyopaque, memory: []u8, alignment: Alignment, new_len: usize, ret_addr: usize) ?[*]u8),
/// Free and invalidate a region of memory.
///
@@ -78,7 +78,7 @@ pub const VTable = struct {
/// `ret_addr` is optionally provided as the first return address of the
/// allocation call stack. If the value is `0` it means no return address
/// has been provided.
free: *const fn (*anyopaque, memory: []u8, alignment: Alignment, ret_addr: usize) void,
free: @Restricted(*const fn (*anyopaque, memory: []u8, alignment: Alignment, ret_addr: usize) void),
};
pub fn noAlloc(