Merge pull request 'Linux 7.0 headers + syscalls' (#32019) from alexrp/zig:linux-7.0 into master

Reviewed-on: https://codeberg.org/ziglang/zig/pulls/32019
This commit is contained in:
Alex Rønne Petersen
2026-04-22 16:32:24 +02:00
113 changed files with 1823 additions and 573 deletions
+1
View File
@@ -146,5 +146,6 @@
#define HWCAP3_MTE_FAR (1UL << 0)
#define HWCAP3_MTE_STORE_ONLY (1UL << 1)
#define HWCAP3_LSFE (1UL << 2)
#define HWCAP3_LS64 (1UL << 3)
#endif /* __ASM_HWCAP_H */
+1
View File
@@ -327,6 +327,7 @@
#define __NR_file_getattr 468
#define __NR_file_setattr 469
#define __NR_listns 470
#define __NR_rseq_slice_yield 471
#endif /* _ASM_UNISTD_64_H */
+2
View File
@@ -55,6 +55,7 @@
#define EMULTIHOP 72 /* Multihop attempted */
#define EDOTDOT 73 /* RFS specific error */
#define EBADMSG 74 /* Not a data message */
#define EFSBADCRC EBADMSG /* Bad CRC detected */
#define EOVERFLOW 75 /* Value too large for defined data type */
#define ENOTUNIQ 76 /* Name not unique on network */
#define EBADFD 77 /* File descriptor in bad state */
@@ -98,6 +99,7 @@
#define EINPROGRESS 115 /* Operation now in progress */
#define ESTALE 116 /* Stale file handle */
#define EUCLEAN 117 /* Structure needs cleaning */
#define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */
#define ENOTNAM 118 /* Not a XENIX named type file */
#define ENAVAIL 119 /* No XENIX semaphores available */
#define EISNAM 120 /* Is a named type file */
+4 -1
View File
@@ -860,8 +860,11 @@ __SYSCALL(__NR_file_setattr, sys_file_setattr)
#define __NR_listns 470
__SYSCALL(__NR_listns, sys_listns)
#define __NR_rseq_slice_yield 471
__SYSCALL(__NR_rseq_slice_yield, sys_rseq_slice_yield)
#undef __NR_syscalls
#define __NR_syscalls 471
#define __NR_syscalls 472
/*
* 32 bit systems traditionally used different
+20 -6
View File
@@ -105,8 +105,6 @@ extern "C" {
*
* %AMDGPU_GEM_DOMAIN_DOORBELL Doorbell. It is an MMIO region for
* signalling user mode queues.
*
* %AMDGPU_GEM_DOMAIN_MMIO_REMAP MMIO remap page (special mapping for HDP flushing).
*/
#define AMDGPU_GEM_DOMAIN_CPU 0x1
#define AMDGPU_GEM_DOMAIN_GTT 0x2
@@ -115,15 +113,13 @@ extern "C" {
#define AMDGPU_GEM_DOMAIN_GWS 0x10
#define AMDGPU_GEM_DOMAIN_OA 0x20
#define AMDGPU_GEM_DOMAIN_DOORBELL 0x40
#define AMDGPU_GEM_DOMAIN_MMIO_REMAP 0x80
#define AMDGPU_GEM_DOMAIN_MASK (AMDGPU_GEM_DOMAIN_CPU | \
AMDGPU_GEM_DOMAIN_GTT | \
AMDGPU_GEM_DOMAIN_VRAM | \
AMDGPU_GEM_DOMAIN_GDS | \
AMDGPU_GEM_DOMAIN_GWS | \
AMDGPU_GEM_DOMAIN_OA | \
AMDGPU_GEM_DOMAIN_DOORBELL | \
AMDGPU_GEM_DOMAIN_MMIO_REMAP)
AMDGPU_GEM_DOMAIN_DOORBELL)
/* Flag that CPU access will be required for the case of VRAM domain */
#define AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED (1 << 0)
@@ -883,7 +879,7 @@ struct drm_amdgpu_gem_list_handles_entry {
#define AMDGPU_VM_PAGE_WRITEABLE (1 << 2)
/* executable mapping, new for VI */
#define AMDGPU_VM_PAGE_EXECUTABLE (1 << 3)
/* partially resident texture */
/* unmapped page of partially resident textures */
#define AMDGPU_VM_PAGE_PRT (1 << 4)
/* MTYPE flags use bit 5 to 8 */
#define AMDGPU_VM_MTYPE_MASK (0xf << 5)
@@ -1427,6 +1423,7 @@ struct drm_amdgpu_info_vbios {
#define AMDGPU_VRAM_TYPE_LPDDR4 11
#define AMDGPU_VRAM_TYPE_LPDDR5 12
#define AMDGPU_VRAM_TYPE_HBM3E 13
#define AMDGPU_VRAM_TYPE_HBM4 14
struct drm_amdgpu_info_device {
/** PCI Device ID */
@@ -1629,9 +1626,25 @@ struct drm_amdgpu_info_uq_metadata_gfx {
__u32 csa_alignment;
};
struct drm_amdgpu_info_uq_metadata_compute {
/* EOP size for gfx11 */
__u32 eop_size;
/* EOP base virtual alignment for gfx11 */
__u32 eop_alignment;
};
struct drm_amdgpu_info_uq_metadata_sdma {
/* context save area size for sdma6 */
__u32 csa_size;
/* context save area base virtual alignment for sdma6 */
__u32 csa_alignment;
};
struct drm_amdgpu_info_uq_metadata {
union {
struct drm_amdgpu_info_uq_metadata_gfx gfx;
struct drm_amdgpu_info_uq_metadata_compute compute;
struct drm_amdgpu_info_uq_metadata_sdma sdma;
};
};
@@ -1654,6 +1667,7 @@ struct drm_amdgpu_info_uq_metadata {
#define AMDGPU_FAMILY_GC_10_3_6 149 /* GC 10.3.6 */
#define AMDGPU_FAMILY_GC_10_3_7 151 /* GC 10.3.7 */
#define AMDGPU_FAMILY_GC_11_5_0 150 /* GC 11.5.0 */
#define AMDGPU_FAMILY_GC_11_5_4 154 /* GC 11.5.4 */
#define AMDGPU_FAMILY_GC_12_0_0 152 /* GC 12.0.0 */
#if defined(__cplusplus)
+8
View File
@@ -19,6 +19,14 @@ extern "C" {
#define AMDXDNA_INVALID_BO_HANDLE 0
#define AMDXDNA_INVALID_FENCE_HANDLE 0
/*
* Define hardware context priority
*/
#define AMDXDNA_QOS_REALTIME_PRIORITY 0x100
#define AMDXDNA_QOS_HIGH_PRIORITY 0x180
#define AMDXDNA_QOS_NORMAL_PRIORITY 0x200
#define AMDXDNA_QOS_LOW_PRIORITY 0x280
enum amdxdna_device_type {
AMDXDNA_DEV_TYPE_UNKNOWN = -1,
AMDXDNA_DEV_TYPE_KMQ,
+6 -6
View File
@@ -401,8 +401,8 @@ extern "C" {
* implementation can multiply the values by 2^6=64. For that reason the padding
* must only contain zeros.
* index 0 = Y plane, [15:0] z:Y [6:10] little endian
* index 1 = Cr plane, [15:0] z:Cr [6:10] little endian
* index 2 = Cb plane, [15:0] z:Cb [6:10] little endian
* index 1 = Cb plane, [15:0] z:Cb [6:10] little endian
* index 2 = Cr plane, [15:0] z:Cr [6:10] little endian
*/
#define DRM_FORMAT_S010 fourcc_code('S', '0', '1', '0') /* 2x2 subsampled Cb (1) and Cr (2) planes 10 bits per channel */
#define DRM_FORMAT_S210 fourcc_code('S', '2', '1', '0') /* 2x1 subsampled Cb (1) and Cr (2) planes 10 bits per channel */
@@ -414,8 +414,8 @@ extern "C" {
* implementation can multiply the values by 2^4=16. For that reason the padding
* must only contain zeros.
* index 0 = Y plane, [15:0] z:Y [4:12] little endian
* index 1 = Cr plane, [15:0] z:Cr [4:12] little endian
* index 2 = Cb plane, [15:0] z:Cb [4:12] little endian
* index 1 = Cb plane, [15:0] z:Cb [4:12] little endian
* index 2 = Cr plane, [15:0] z:Cr [4:12] little endian
*/
#define DRM_FORMAT_S012 fourcc_code('S', '0', '1', '2') /* 2x2 subsampled Cb (1) and Cr (2) planes 12 bits per channel */
#define DRM_FORMAT_S212 fourcc_code('S', '2', '1', '2') /* 2x1 subsampled Cb (1) and Cr (2) planes 12 bits per channel */
@@ -424,8 +424,8 @@ extern "C" {
/*
* 3 plane YCbCr
* index 0 = Y plane, [15:0] Y little endian
* index 1 = Cr plane, [15:0] Cr little endian
* index 2 = Cb plane, [15:0] Cb little endian
* index 1 = Cb plane, [15:0] Cb little endian
* index 2 = Cr plane, [15:0] Cr little endian
*/
#define DRM_FORMAT_S016 fourcc_code('S', '0', '1', '6') /* 2x2 subsampled Cb (1) and Cr (2) planes 16 bits per channel */
#define DRM_FORMAT_S216 fourcc_code('S', '2', '1', '6') /* 2x1 subsampled Cb (1) and Cr (2) planes 16 bits per channel */
+75 -1
View File
@@ -24,6 +24,8 @@ extern "C" {
#define DRM_PANFROST_SET_LABEL_BO 0x09
#define DRM_PANFROST_JM_CTX_CREATE 0x0a
#define DRM_PANFROST_JM_CTX_DESTROY 0x0b
#define DRM_PANFROST_SYNC_BO 0x0c
#define DRM_PANFROST_QUERY_BO_INFO 0x0d
#define DRM_IOCTL_PANFROST_SUBMIT DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_SUBMIT, struct drm_panfrost_submit)
#define DRM_IOCTL_PANFROST_WAIT_BO DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_WAIT_BO, struct drm_panfrost_wait_bo)
@@ -35,6 +37,8 @@ extern "C" {
#define DRM_IOCTL_PANFROST_SET_LABEL_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_SET_LABEL_BO, struct drm_panfrost_set_label_bo)
#define DRM_IOCTL_PANFROST_JM_CTX_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_JM_CTX_CREATE, struct drm_panfrost_jm_ctx_create)
#define DRM_IOCTL_PANFROST_JM_CTX_DESTROY DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_JM_CTX_DESTROY, struct drm_panfrost_jm_ctx_destroy)
#define DRM_IOCTL_PANFROST_SYNC_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_SYNC_BO, struct drm_panfrost_sync_bo)
#define DRM_IOCTL_PANFROST_QUERY_BO_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_QUERY_BO_INFO, struct drm_panfrost_query_bo_info)
/*
* Unstable ioctl(s): only exposed when the unsafe unstable_ioctls module
@@ -120,9 +124,12 @@ struct drm_panfrost_wait_bo {
__s64 timeout_ns;
};
/* Valid flags to pass to drm_panfrost_create_bo */
/* Valid flags to pass to drm_panfrost_create_bo.
* PANFROST_BO_WB_MMAP can't be set if PANFROST_BO_HEAP is.
*/
#define PANFROST_BO_NOEXEC 1
#define PANFROST_BO_HEAP 2
#define PANFROST_BO_WB_MMAP 4
/**
* struct drm_panfrost_create_bo - ioctl argument for creating Panfrost BOs.
@@ -228,6 +235,13 @@ enum drm_panfrost_param {
DRM_PANFROST_PARAM_SYSTEM_TIMESTAMP,
DRM_PANFROST_PARAM_SYSTEM_TIMESTAMP_FREQUENCY,
DRM_PANFROST_PARAM_ALLOWED_JM_CTX_PRIORITIES,
DRM_PANFROST_PARAM_SELECTED_COHERENCY,
};
enum drm_panfrost_gpu_coherency {
DRM_PANFROST_GPU_COHERENCY_ACE_LITE = 0,
DRM_PANFROST_GPU_COHERENCY_ACE = 1,
DRM_PANFROST_GPU_COHERENCY_NONE = 31,
};
struct drm_panfrost_get_param {
@@ -301,6 +315,66 @@ struct drm_panfrost_set_label_bo {
__u64 label;
};
/* Valid flags to pass to drm_panfrost_bo_sync_op */
#define PANFROST_BO_SYNC_CPU_CACHE_FLUSH 0
#define PANFROST_BO_SYNC_CPU_CACHE_FLUSH_AND_INVALIDATE 1
/**
* struct drm_panthor_bo_flush_map_op - BO map sync op
*/
struct drm_panfrost_bo_sync_op {
/** @handle: Handle of the buffer object to sync. */
__u32 handle;
/** @type: Type of sync operation. */
__u32 type;
/**
* @offset: Offset into the BO at which the sync range starts.
*
* This will be rounded down to the nearest cache line as needed.
*/
__u32 offset;
/**
* @size: Size of the range to sync
*
* @size + @offset will be rounded up to the nearest cache line as
* needed.
*/
__u32 size;
};
/**
* struct drm_panfrost_sync_bo - ioctl argument for syncing BO maps
*/
struct drm_panfrost_sync_bo {
/** Array of struct drm_panfrost_bo_sync_op */
__u64 ops;
/** Number of BO sync ops */
__u32 op_count;
__u32 pad;
};
/** BO comes from a different subsystem. */
#define DRM_PANFROST_BO_IS_IMPORTED (1 << 0)
struct drm_panfrost_query_bo_info {
/** Handle of the object being queried. */
__u32 handle;
/** Extra flags that are not coming from the BO_CREATE ioctl(). */
__u32 extra_flags;
/** Flags passed at creation time. */
__u32 create_flags;
/** Will be zero on return. */
__u32 pad;
};
/* Definitions for coredump decoding in user space */
#define PANFROSTDUMP_MAJOR 1
#define PANFROSTDUMP_MINOR 0
+154 -3
View File
@@ -144,6 +144,16 @@ enum drm_panthor_ioctl_id {
* pgoff_t size.
*/
DRM_PANTHOR_SET_USER_MMIO_OFFSET,
/** @DRM_PANTHOR_BO_SYNC: Sync BO data to/from the device */
DRM_PANTHOR_BO_SYNC,
/**
* @DRM_PANTHOR_BO_QUERY_INFO: Query information about a BO.
*
* This is useful for imported BOs.
*/
DRM_PANTHOR_BO_QUERY_INFO,
};
/**
@@ -245,6 +255,26 @@ enum drm_panthor_dev_query_type {
DRM_PANTHOR_DEV_QUERY_GROUP_PRIORITIES_INFO,
};
/**
* enum drm_panthor_gpu_coherency: Type of GPU coherency
*/
enum drm_panthor_gpu_coherency {
/**
* @DRM_PANTHOR_GPU_COHERENCY_ACE_LITE: ACE Lite coherency.
*/
DRM_PANTHOR_GPU_COHERENCY_ACE_LITE = 0,
/**
* @DRM_PANTHOR_GPU_COHERENCY_ACE: ACE coherency.
*/
DRM_PANTHOR_GPU_COHERENCY_ACE = 1,
/**
* @DRM_PANTHOR_GPU_COHERENCY_NONE: No coherency.
*/
DRM_PANTHOR_GPU_COHERENCY_NONE = 31,
};
/**
* struct drm_panthor_gpu_info - GPU information
*
@@ -301,7 +331,16 @@ struct drm_panthor_gpu_info {
*/
__u32 thread_max_barrier_size;
/** @coherency_features: Coherency features. */
/**
* @coherency_features: Coherency features.
*
* Combination of drm_panthor_gpu_coherency flags.
*
* Note that this is just what the coherency protocols supported by the
* GPU, but the actual coherency in place depends on the SoC
* integration and is reflected by
* drm_panthor_gpu_info::selected_coherency.
*/
__u32 coherency_features;
/** @texture_features: Texture features. */
@@ -310,8 +349,12 @@ struct drm_panthor_gpu_info {
/** @as_present: Bitmask encoding the number of address-space exposed by the MMU. */
__u32 as_present;
/** @pad0: MBZ. */
__u32 pad0;
/**
* @selected_coherency: Coherency selected for this device.
*
* One of drm_panthor_gpu_coherency.
*/
__u32 selected_coherency;
/** @shader_present: Bitmask encoding the shader cores exposed by the GPU. */
__u64 shader_present;
@@ -638,6 +681,15 @@ struct drm_panthor_vm_get_state {
enum drm_panthor_bo_flags {
/** @DRM_PANTHOR_BO_NO_MMAP: The buffer object will never be CPU-mapped in userspace. */
DRM_PANTHOR_BO_NO_MMAP = (1 << 0),
/**
* @DRM_PANTHOR_BO_WB_MMAP: Force "Write-Back Cacheable" CPU mapping.
*
* CPU map the buffer object in userspace by forcing the "Write-Back
* Cacheable" cacheability attribute. The mapping otherwise uses the
* "Non-Cacheable" attribute if the GPU is not IO coherent.
*/
DRM_PANTHOR_BO_WB_MMAP = (1 << 1),
};
/**
@@ -1040,6 +1092,101 @@ struct drm_panthor_set_user_mmio_offset {
__u64 offset;
};
/**
* enum drm_panthor_bo_sync_op_type - BO sync type
*/
enum drm_panthor_bo_sync_op_type {
/** @DRM_PANTHOR_BO_SYNC_CPU_CACHE_FLUSH: Flush CPU caches. */
DRM_PANTHOR_BO_SYNC_CPU_CACHE_FLUSH = 0,
/** @DRM_PANTHOR_BO_SYNC_CPU_CACHE_FLUSH_AND_INVALIDATE: Flush and invalidate CPU caches. */
DRM_PANTHOR_BO_SYNC_CPU_CACHE_FLUSH_AND_INVALIDATE = 1,
};
/**
* struct drm_panthor_bo_sync_op - BO map sync op
*/
struct drm_panthor_bo_sync_op {
/** @handle: Handle of the buffer object to sync. */
__u32 handle;
/** @type: Type of operation. */
__u32 type;
/**
* @offset: Offset into the BO at which the sync range starts.
*
* This will be rounded down to the nearest cache line as needed.
*/
__u64 offset;
/**
* @size: Size of the range to sync
*
* @size + @offset will be rounded up to the nearest cache line as
* needed.
*/
__u64 size;
};
/**
* struct drm_panthor_bo_sync - BO map sync request
*/
struct drm_panthor_bo_sync {
/**
* @ops: Array of struct drm_panthor_bo_sync_op sync operations.
*/
struct drm_panthor_obj_array ops;
};
/**
* enum drm_panthor_bo_extra_flags - Set of flags returned on a BO_QUERY_INFO request
*
* Those are flags reflecting BO properties that are not directly coming from the flags
* passed are creation time, or information on BOs that were imported from other drivers.
*/
enum drm_panthor_bo_extra_flags {
/**
* @DRM_PANTHOR_BO_IS_IMPORTED: BO has been imported from an external driver.
*
* Note that imported dma-buf handles are not flagged as imported if they
* where exported by panthor. Only buffers that are coming from other drivers
* (dma heaps, other GPUs, display controllers, V4L, ...).
*
* It's also important to note that all imported BOs are mapped cached and can't
* be considered IO-coherent even if the GPU is. This means they require explicit
* syncs that must go through the DRM_PANTHOR_BO_SYNC ioctl (userland cache
* maintenance is not allowed in that case, because extra operations might be
* needed to make changes visible to the CPU/device, like buffer migration when the
* exporter is a GPU with its own VRAM).
*/
DRM_PANTHOR_BO_IS_IMPORTED = (1 << 0),
};
/**
* struct drm_panthor_bo_query_info - Query BO info
*/
struct drm_panthor_bo_query_info {
/** @handle: Handle of the buffer object to query flags on. */
__u32 handle;
/**
* @extra_flags: Combination of enum drm_panthor_bo_extra_flags flags.
*/
__u32 extra_flags;
/**
* @create_flags: Flags passed at creation time.
*
* Combination of enum drm_panthor_bo_flags flags.
* Will be zero if the buffer comes from a different driver.
*/
__u32 create_flags;
/** @pad: Will be zero on return. */
__u32 pad;
};
/**
* DRM_IOCTL_PANTHOR() - Build a Panthor IOCTL number
* @__access: Access type. Must be R, W or RW.
@@ -1086,6 +1233,10 @@ enum {
DRM_IOCTL_PANTHOR(WR, BO_SET_LABEL, bo_set_label),
DRM_IOCTL_PANTHOR_SET_USER_MMIO_OFFSET =
DRM_IOCTL_PANTHOR(WR, SET_USER_MMIO_OFFSET, set_user_mmio_offset),
DRM_IOCTL_PANTHOR_BO_SYNC =
DRM_IOCTL_PANTHOR(WR, BO_SYNC, bo_sync),
DRM_IOCTL_PANTHOR_BO_QUERY_INFO =
DRM_IOCTL_PANTHOR(WR, BO_QUERY_INFO, bo_query_info),
};
#if defined(__cplusplus)
+74 -24
View File
@@ -26,20 +26,27 @@ extern "C" {
*
*/
struct drm_rocket_create_bo {
/** Input: Size of the requested BO. */
/**
* @size: Input: Size of the requested BO.
*/
__u32 size;
/** Output: GEM handle for the BO. */
/**
* @handle: Output: GEM handle for the BO.
*/
__u32 handle;
/**
* Output: DMA address for the BO in the NPU address space. This address
* is private to the DRM fd and is valid for the lifetime of the GEM
* handle.
* @dma_address: Output: DMA address for the BO in the NPU address
* space. This address is private to the DRM fd and is valid for
* the lifetime of the GEM handle.
*/
__u64 dma_address;
/** Output: Offset into the drm node to use for subsequent mmap call. */
/**
* @offset: Output: Offset into the drm node to use for subsequent
* mmap call.
*/
__u64 offset;
};
@@ -50,13 +57,19 @@ struct drm_rocket_create_bo {
* synchronization.
*/
struct drm_rocket_prep_bo {
/** Input: GEM handle of the buffer object. */
/**
* @handle: Input: GEM handle of the buffer object.
*/
__u32 handle;
/** Reserved, must be zero. */
/**
* @reserved: Reserved, must be zero.
*/
__u32 reserved;
/** Input: Amount of time to wait for NPU jobs. */
/**
* @timeout_ns: Input: Amount of time to wait for NPU jobs.
*/
__s64 timeout_ns;
};
@@ -66,10 +79,14 @@ struct drm_rocket_prep_bo {
* Synchronize caches for NPU access.
*/
struct drm_rocket_fini_bo {
/** Input: GEM handle of the buffer object. */
/**
* @handle: Input: GEM handle of the buffer object.
*/
__u32 handle;
/** Reserved, must be zero. */
/**
* @reserved: Reserved, must be zero.
*/
__u32 reserved;
};
@@ -79,10 +96,15 @@ struct drm_rocket_fini_bo {
* A task is the smallest unit of work that can be run on the NPU.
*/
struct drm_rocket_task {
/** Input: DMA address to NPU mapping of register command buffer */
/**
* @regcmd: Input: DMA address to NPU mapping of register command buffer
*/
__u32 regcmd;
/** Input: Number of commands in the register command buffer */
/**
* @regcmd_count: Input: Number of commands in the register command
* buffer
*/
__u32 regcmd_count;
};
@@ -94,25 +116,44 @@ struct drm_rocket_task {
* sequentially on the same core, to benefit from memory residency in SRAM.
*/
struct drm_rocket_job {
/** Input: Pointer to an array of struct drm_rocket_task. */
/**
* @tasks: Input: Pointer to an array of struct drm_rocket_task.
*/
__u64 tasks;
/** Input: Pointer to a u32 array of the BOs that are read by the job. */
/**
* @in_bo_handles: Input: Pointer to a u32 array of the BOs that
* are read by the job.
*/
__u64 in_bo_handles;
/** Input: Pointer to a u32 array of the BOs that are written to by the job. */
/**
* @out_bo_handles: Input: Pointer to a u32 array of the BOs that
* are written to by the job.
*/
__u64 out_bo_handles;
/** Input: Number of tasks passed in. */
/**
* @task_count: Input: Number of tasks passed in.
*/
__u32 task_count;
/** Input: Size in bytes of the structs in the @tasks field. */
/**
* @task_struct_size: Input: Size in bytes of the structs in the
* @tasks field.
*/
__u32 task_struct_size;
/** Input: Number of input BO handles passed in (size is that times 4). */
/**
* @in_bo_handle_count: Input: Number of input BO handles passed in
* (size is that times 4).
*/
__u32 in_bo_handle_count;
/** Input: Number of output BO handles passed in (size is that times 4). */
/**
* @out_bo_handle_count: Input: Number of output BO handles passed in
* (size is that times 4).
*/
__u32 out_bo_handle_count;
};
@@ -122,16 +163,25 @@ struct drm_rocket_job {
* The kernel will schedule the execution of these jobs in dependency order.
*/
struct drm_rocket_submit {
/** Input: Pointer to an array of struct drm_rocket_job. */
/**
* @jobs: Input: Pointer to an array of struct drm_rocket_job.
*/
__u64 jobs;
/** Input: Number of jobs passed in. */
/**
* @job_count: Input: Number of jobs passed in.
*/
__u32 job_count;
/** Input: Size in bytes of the structs in the @jobs field. */
/**
* @job_struct_size: Input: Size in bytes of the structs in the
* @jobs field.
*/
__u32 job_struct_size;
/** Reserved, must be zero. */
/**
* @reserved: Reserved, must be zero.
*/
__u64 reserved;
};
+89 -6
View File
@@ -106,6 +106,7 @@ extern "C" {
#define DRM_XE_OBSERVATION 0x0b
#define DRM_XE_MADVISE 0x0c
#define DRM_XE_VM_QUERY_MEM_RANGE_ATTRS 0x0d
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY 0x0e
/* Must be kept compact -- no holes */
@@ -123,6 +124,7 @@ extern "C" {
#define DRM_IOCTL_XE_OBSERVATION DRM_IOW(DRM_COMMAND_BASE + DRM_XE_OBSERVATION, struct drm_xe_observation_param)
#define DRM_IOCTL_XE_MADVISE DRM_IOW(DRM_COMMAND_BASE + DRM_XE_MADVISE, struct drm_xe_madvise)
#define DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_QUERY_MEM_RANGE_ATTRS, struct drm_xe_vm_query_mem_range_attr)
#define DRM_IOCTL_XE_EXEC_QUEUE_SET_PROPERTY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_SET_PROPERTY, struct drm_xe_exec_queue_set_property)
/**
* DOC: Xe IOCTL Extensions
@@ -210,8 +212,12 @@ struct drm_xe_ext_set_property {
/** @pad: MBZ */
__u32 pad;
/** @value: property value */
__u64 value;
union {
/** @value: property value */
__u64 value;
/** @ptr: pointer to user value */
__u64 ptr;
};
/** @reserved: Reserved */
__u64 reserved[2];
@@ -403,6 +409,9 @@ struct drm_xe_query_mem_regions {
* has low latency hint support
* - %DRM_XE_QUERY_CONFIG_FLAG_HAS_CPU_ADDR_MIRROR - Flag is set if the
* device has CPU address mirroring support
* - %DRM_XE_QUERY_CONFIG_FLAG_HAS_NO_COMPRESSION_HINT - Flag is set if the
* device supports the userspace hint %DRM_XE_GEM_CREATE_FLAG_NO_COMPRESSION.
* This is exposed only on Xe2+.
* - %DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT - Minimal memory alignment
* required by this device, typically SZ_4K or SZ_64K
* - %DRM_XE_QUERY_CONFIG_VA_BITS - Maximum bits of a virtual address
@@ -421,6 +430,7 @@ struct drm_xe_query_config {
#define DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM (1 << 0)
#define DRM_XE_QUERY_CONFIG_FLAG_HAS_LOW_LATENCY (1 << 1)
#define DRM_XE_QUERY_CONFIG_FLAG_HAS_CPU_ADDR_MIRROR (1 << 2)
#define DRM_XE_QUERY_CONFIG_FLAG_HAS_NO_COMPRESSION_HINT (1 << 3)
#define DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT 2
#define DRM_XE_QUERY_CONFIG_VA_BITS 3
#define DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY 4
@@ -791,6 +801,17 @@ struct drm_xe_device_query {
* need to use VRAM for display surfaces, therefore the kernel requires
* setting this flag for such objects, otherwise an error is thrown on
* small-bar systems.
* - %DRM_XE_GEM_CREATE_FLAG_NO_COMPRESSION - Allows userspace to
* hint that compression (CCS) should be disabled for the buffer being
* created. This can avoid unnecessary memory operations and CCS state
* management.
* On pre-Xe2 platforms, this flag is currently rejected as compression
* control is not supported via PAT index. On Xe2+ platforms, compression
* is controlled via PAT entries. If this flag is set, the driver will reject
* any VM bind that requests a PAT index enabling compression for this BO.
* Note: On dGPU platforms, there is currently no change in behavior with
* this flag, but future improvements may leverage it. The current benefit is
* primarily applicable to iGPU platforms.
*
* @cpu_caching supports the following values:
* - %DRM_XE_GEM_CPU_CACHING_WB - Allocate the pages with write-back
@@ -837,6 +858,7 @@ struct drm_xe_gem_create {
#define DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING (1 << 0)
#define DRM_XE_GEM_CREATE_FLAG_SCANOUT (1 << 1)
#define DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM (1 << 2)
#define DRM_XE_GEM_CREATE_FLAG_NO_COMPRESSION (1 << 3)
/**
* @flags: Flags, currently a mask of memory instances of where BO can
* be placed
@@ -1252,6 +1274,17 @@ struct drm_xe_vm_bind {
* Given that going into a power-saving state kills PXP HWDRM sessions,
* runtime PM will be blocked while queues of this type are alive.
* All PXP queues will be killed if a PXP invalidation event occurs.
* - %DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_GROUP - Create a multi-queue group
* or add secondary queues to a multi-queue group.
* If the extension's 'value' field has %DRM_XE_MULTI_GROUP_CREATE flag set,
* then a new multi-queue group is created with this queue as the primary queue
* (Q0). Otherwise, the queue gets added to the multi-queue group whose primary
* queue's exec_queue_id is specified in the lower 32 bits of the 'value' field.
* All the other non-relevant bits of extension's 'value' field while adding the
* primary or the secondary queues of the group must be set to 0.
* - %DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_QUEUE_PRIORITY - Set the queue
* priority within the multi-queue group. Current valid priority values are 02
* (default is 1), with higher values indicating higher priority.
*
* The example below shows how to use @drm_xe_exec_queue_create to create
* a simple exec_queue (no parallel submission) of class
@@ -1292,6 +1325,10 @@ struct drm_xe_exec_queue_create {
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY 0
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE 1
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE 2
#define DRM_XE_EXEC_QUEUE_SET_HANG_REPLAY_STATE 3
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_GROUP 4
#define DRM_XE_MULTI_GROUP_CREATE (1ull << 63)
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_QUEUE_PRIORITY 5
/** @extensions: Pointer to the first extension struct, if any */
__u64 extensions;
@@ -1655,6 +1692,9 @@ enum drm_xe_oa_unit_type {
/** @DRM_XE_OA_UNIT_TYPE_OAM_SAG: OAM_SAG OA unit */
DRM_XE_OA_UNIT_TYPE_OAM_SAG,
/** @DRM_XE_OA_UNIT_TYPE_MERT: MERT OA unit */
DRM_XE_OA_UNIT_TYPE_MERT,
};
/**
@@ -1677,12 +1717,19 @@ struct drm_xe_oa_unit {
#define DRM_XE_OA_CAPS_OA_BUFFER_SIZE (1 << 2)
#define DRM_XE_OA_CAPS_WAIT_NUM_REPORTS (1 << 3)
#define DRM_XE_OA_CAPS_OAM (1 << 4)
#define DRM_XE_OA_CAPS_OA_UNIT_GT_ID (1 << 5)
/** @oa_timestamp_freq: OA timestamp freq */
__u64 oa_timestamp_freq;
/** @gt_id: gt id for this OA unit */
__u16 gt_id;
/** @reserved1: MBZ */
__u16 reserved1[3];
/** @reserved: MBZ */
__u64 reserved[4];
__u64 reserved[3];
/** @num_engines: number of engines in @eci array */
__u64 num_engines;
@@ -2072,7 +2119,13 @@ struct drm_xe_madvise {
struct {
#define DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE 0
#define DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM -1
/** @preferred_mem_loc.devmem_fd: fd for preferred loc */
/**
* @preferred_mem_loc.devmem_fd:
* Device file-descriptor of the device where the
* preferred memory is located, or one of the
* above special values. Please also see
* @preferred_mem_loc.region_instance below.
*/
__u32 devmem_fd;
#define DRM_XE_MIGRATE_ALL_PAGES 0
@@ -2080,8 +2133,14 @@ struct drm_xe_madvise {
/** @preferred_mem_loc.migration_policy: Page migration policy */
__u16 migration_policy;
/** @preferred_mem_loc.pad : MBZ */
__u16 pad;
/**
* @preferred_mem_loc.region_instance : Region instance.
* MBZ if @devmem_fd <= &DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE.
* Otherwise should point to the desired device
* VRAM instance of the device indicated by
* @preferred_mem_loc.devmem_fd.
*/
__u16 region_instance;
/** @preferred_mem_loc.reserved : Reserved */
__u64 reserved;
@@ -2274,6 +2333,30 @@ struct drm_xe_vm_query_mem_range_attr {
};
/**
* struct drm_xe_exec_queue_set_property - exec queue set property
*
* Sets execution queue properties dynamically.
* Currently only %DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_QUEUE_PRIORITY
* property can be dynamically set.
*/
struct drm_xe_exec_queue_set_property {
/** @extensions: Pointer to the first extension struct, if any */
__u64 extensions;
/** @exec_queue_id: Exec queue ID */
__u32 exec_queue_id;
/** @property: property to set */
__u32 property;
/** @value: property value */
__u64 value;
/** @reserved: Reserved */
__u64 reserved[2];
};
#if defined(__cplusplus)
}
#endif
+1 -1
View File
@@ -278,7 +278,7 @@ enum {
* NOTE: Two special error codes you should check for when calling
* in to the driver are:
*
* EINTR -- The operation has been interupted. This should be
* EINTR -- The operation has been interrupted. This should be
* handled by retrying the ioctl() until a different error code
* is returned.
*
+28
View File
@@ -119,6 +119,14 @@ enum bpf_cgroup_iter_order {
BPF_CGROUP_ITER_DESCENDANTS_PRE, /* walk descendants in pre-order. */
BPF_CGROUP_ITER_DESCENDANTS_POST, /* walk descendants in post-order. */
BPF_CGROUP_ITER_ANCESTORS_UP, /* walk ancestors upward. */
/*
* Walks the immediate children of the specified parent
* cgroup_subsys_state. Unlike BPF_CGROUP_ITER_DESCENDANTS_PRE,
* BPF_CGROUP_ITER_DESCENDANTS_POST, and BPF_CGROUP_ITER_ANCESTORS_UP
* the iterator does not include the specified parent as one of the
* returned iterator elements.
*/
BPF_CGROUP_ITER_CHILDREN,
};
union bpf_iter_link_info {
@@ -918,6 +926,16 @@ union bpf_iter_link_info {
* Number of bytes read from the stream on success, or -1 if an
* error occurred (in which case, *errno* is set appropriately).
*
* BPF_PROG_ASSOC_STRUCT_OPS
* Description
* Associate a BPF program with a struct_ops map. The struct_ops
* map is identified by *map_fd* and the BPF program is
* identified by *prog_fd*.
*
* Return
* 0 on success or -1 if an error occurred (in which case,
* *errno* is set appropriately).
*
* NOTES
* eBPF objects (maps and programs) can be shared between processes.
*
@@ -974,6 +992,7 @@ enum bpf_cmd {
BPF_PROG_BIND_MAP,
BPF_TOKEN_CREATE,
BPF_PROG_STREAM_READ_BY_FD,
BPF_PROG_ASSOC_STRUCT_OPS,
__MAX_BPF_CMD,
};
@@ -1134,6 +1153,7 @@ enum bpf_attach_type {
BPF_NETKIT_PEER,
BPF_TRACE_KPROBE_SESSION,
BPF_TRACE_UPROBE_SESSION,
BPF_TRACE_FSESSION,
__MAX_BPF_ATTACH_TYPE
};
@@ -1373,6 +1393,8 @@ enum {
BPF_NOEXIST = 1, /* create new element if it didn't exist */
BPF_EXIST = 2, /* update existing element */
BPF_F_LOCK = 4, /* spin_lock-ed map_lookup/map_update */
BPF_F_CPU = 8, /* cpu flag for percpu maps, upper 32-bit of flags is a cpu number */
BPF_F_ALL_CPUS = 16, /* update value across all CPUs for percpu maps */
};
/* flags for BPF_MAP_CREATE command */
@@ -1894,6 +1916,12 @@ union bpf_attr {
__u32 prog_fd;
} prog_stream_read;
struct {
__u32 map_fd;
__u32 prog_fd;
__u32 flags;
} prog_assoc_struct_ops;
} __attribute__((aligned(8)));
/* The description below is an attempt at providing documentation to eBPF
+1
View File
@@ -334,6 +334,7 @@ struct btrfs_ioctl_fs_info_args {
#define BTRFS_FEATURE_INCOMPAT_EXTENT_TREE_V2 (1ULL << 13)
#define BTRFS_FEATURE_INCOMPAT_RAID_STRIPE_TREE (1ULL << 14)
#define BTRFS_FEATURE_INCOMPAT_SIMPLE_QUOTA (1ULL << 16)
#define BTRFS_FEATURE_INCOMPAT_REMAP_TREE (1ULL << 17)
struct btrfs_ioctl_feature_flags {
__u64 compat_flags;
+32 -2
View File
@@ -72,6 +72,9 @@
/* Tracks RAID stripes in block groups. */
#define BTRFS_RAID_STRIPE_TREE_OBJECTID 12ULL
/* Holds details of remapped addresses after relocation. */
#define BTRFS_REMAP_TREE_OBJECTID 13ULL
/* device stats in the device tree */
#define BTRFS_DEV_STATS_OBJECTID 0ULL
@@ -278,6 +281,10 @@
#define BTRFS_RAID_STRIPE_KEY 230
#define BTRFS_IDENTITY_REMAP_KEY 234
#define BTRFS_REMAP_KEY 235
#define BTRFS_REMAP_BACKREF_KEY 236
/*
* Records the overall state of the qgroups.
* There's only one instance of this key present,
@@ -710,9 +717,12 @@ struct btrfs_super_block {
__u8 metadata_uuid[BTRFS_FSID_SIZE];
__u64 nr_global_roots;
__le64 remap_root;
__le64 remap_root_generation;
__u8 remap_root_level;
/* Future expansion */
__le64 reserved[27];
__u8 reserved[199];
__u8 sys_chunk_array[BTRFS_SYSTEM_CHUNK_ARRAY_SIZE];
struct btrfs_root_backup super_roots[BTRFS_NUM_BACKUP_ROOTS];
@@ -1157,12 +1167,15 @@ struct btrfs_dev_replace_item {
#define BTRFS_BLOCK_GROUP_RAID6 (1ULL << 8)
#define BTRFS_BLOCK_GROUP_RAID1C3 (1ULL << 9)
#define BTRFS_BLOCK_GROUP_RAID1C4 (1ULL << 10)
#define BTRFS_BLOCK_GROUP_REMAPPED (1ULL << 11)
#define BTRFS_BLOCK_GROUP_METADATA_REMAP (1ULL << 12)
#define BTRFS_BLOCK_GROUP_RESERVED (BTRFS_AVAIL_ALLOC_BIT_SINGLE | \
BTRFS_SPACE_INFO_GLOBAL_RSV)
#define BTRFS_BLOCK_GROUP_TYPE_MASK (BTRFS_BLOCK_GROUP_DATA | \
BTRFS_BLOCK_GROUP_SYSTEM | \
BTRFS_BLOCK_GROUP_METADATA)
BTRFS_BLOCK_GROUP_METADATA | \
BTRFS_BLOCK_GROUP_METADATA_REMAP)
#define BTRFS_BLOCK_GROUP_PROFILE_MASK (BTRFS_BLOCK_GROUP_RAID0 | \
BTRFS_BLOCK_GROUP_RAID1 | \
@@ -1215,6 +1228,14 @@ struct btrfs_block_group_item {
__le64 flags;
} __attribute__ ((__packed__));
struct btrfs_block_group_item_v2 {
__le64 used;
__le64 chunk_objectid;
__le64 flags;
__le64 remap_bytes;
__le32 identity_remap_count;
} __attribute__ ((__packed__));
struct btrfs_free_space_info {
__le32 extent_count;
__le32 flags;
@@ -1319,4 +1340,13 @@ struct btrfs_verity_descriptor_item {
__u8 encryption;
} __attribute__ ((__packed__));
/*
* For a range identified by a BTRFS_REMAP_KEY item in the remap tree, gives
* the address that the start of the range will get remapped to. This
* structure is also shared by BTRFS_REMAP_BACKREF_KEY.
*/
struct btrfs_remap_item {
__le64 address;
} __attribute__ ((__packed__));
#endif /* _BTRFS_CTREE_H_ */
+1
View File
@@ -20,6 +20,7 @@
#ifndef _DMA_BUF_UAPI_H_
#define _DMA_BUF_UAPI_H_
#include <linux/ioctl.h>
#include <linux/types.h>
/**
+1
View File
@@ -253,6 +253,7 @@ enum dpll_a_pin {
DPLL_A_PIN_ESYNC_PULSE,
DPLL_A_PIN_REFERENCE_SYNC,
DPLL_A_PIN_PHASE_ADJUST_GRAN,
DPLL_A_PIN_FRACTIONAL_FREQUENCY_OFFSET_PPT,
__DPLL_A_PIN_MAX,
DPLL_A_PIN_MAX = (__DPLL_A_PIN_MAX - 1)
+2
View File
@@ -545,6 +545,8 @@ typedef struct elf64_shdr {
#define NT_RISCV_VECTOR 0x901 /* RISC-V vector registers */
#define NN_RISCV_TAGGED_ADDR_CTRL "LINUX"
#define NT_RISCV_TAGGED_ADDR_CTRL 0x902 /* RISC-V tagged address control (prctl()) */
#define NN_RISCV_USER_CFI "LINUX"
#define NT_RISCV_USER_CFI 0x903 /* RISC-V shadow stack state */
#define NN_LOONGARCH_CPUCFG "LINUX"
#define NT_LOONGARCH_CPUCFG 0xa00 /* LoongArch CPU config registers */
#define NN_LOONGARCH_CSR "LINUX"
+21 -5
View File
@@ -15,11 +15,10 @@
#define _LINUX_ETHTOOL_H
#include <linux/const.h>
#include <linux/typelimits.h>
#include <linux/types.h>
#include <linux/if_ether.h>
#include <limits.h> /* for INT_MAX */
/* All structures exposed to userland should be defined such that they
* have the same layout for 32-bit and 64-bit userland.
*/
@@ -601,6 +600,8 @@ enum ethtool_link_ext_state {
ETHTOOL_LINK_EXT_STATE_POWER_BUDGET_EXCEEDED,
ETHTOOL_LINK_EXT_STATE_OVERHEAT,
ETHTOOL_LINK_EXT_STATE_MODULE,
ETHTOOL_LINK_EXT_STATE_OTP_SPEED_VIOLATION,
ETHTOOL_LINK_EXT_STATE_BMC_REQUEST_DOWN,
};
/* More information in addition to ETHTOOL_LINK_EXT_STATE_AUTONEG. */
@@ -1092,13 +1093,20 @@ enum ethtool_module_fw_flash_status {
* struct ethtool_gstrings - string set for data tagging
* @cmd: Command number = %ETHTOOL_GSTRINGS
* @string_set: String set ID; one of &enum ethtool_stringset
* @len: On return, the number of strings in the string set
* @len: Number of strings in the string set
* @data: Buffer for strings. Each string is null-padded to a size of
* %ETH_GSTRING_LEN.
*
* Users must use %ETHTOOL_GSSET_INFO to find the number of strings in
* the string set. They must allocate a buffer of the appropriate
* size immediately following this structure.
*
* Setting @len on input is optional (though preferred), but must be zeroed
* otherwise.
* When set, @len will return the requested count if it matches the actual
* count; otherwise, it will be zero.
* This prevents issues when the number of strings is different than the
* userspace allocation.
*/
struct ethtool_gstrings {
__u32 cmd;
@@ -1175,13 +1183,20 @@ struct ethtool_test {
/**
* struct ethtool_stats - device-specific statistics
* @cmd: Command number = %ETHTOOL_GSTATS
* @n_stats: On return, the number of statistics
* @n_stats: Number of statistics
* @data: Array of statistics
*
* Users must use %ETHTOOL_GSSET_INFO or %ETHTOOL_GDRVINFO to find the
* number of statistics that will be returned. They must allocate a
* buffer of the appropriate size (8 * number of statistics)
* immediately following this structure.
*
* Setting @n_stats on input is optional (though preferred), but must be zeroed
* otherwise.
* When set, @n_stats will return the requested count if it matches the actual
* count; otherwise, it will be zero.
* This prevents issues when the number of stats is different than the
* userspace allocation.
*/
struct ethtool_stats {
__u32 cmd;
@@ -2188,6 +2203,7 @@ enum ethtool_link_mode_bit_indices {
#define SPEED_40000 40000
#define SPEED_50000 50000
#define SPEED_56000 56000
#define SPEED_80000 80000
#define SPEED_100000 100000
#define SPEED_200000 200000
#define SPEED_400000 400000
@@ -2198,7 +2214,7 @@ enum ethtool_link_mode_bit_indices {
static __inline__ int ethtool_validate_speed(__u32 speed)
{
return speed <= INT_MAX || speed == (__u32)SPEED_UNKNOWN;
return speed <= __KERNEL_INT_MAX || speed == (__u32)SPEED_UNKNOWN;
}
/* Duplex, half or full. */
+1
View File
@@ -249,6 +249,7 @@ struct file_attr {
#define FS_XFLAG_FILESTREAM 0x00004000 /* use filestream allocator */
#define FS_XFLAG_DAX 0x00008000 /* use DAX for IO */
#define FS_XFLAG_COWEXTSIZE 0x00010000 /* CoW extent size allocator hint */
#define FS_XFLAG_VERITY 0x00020000 /* fs-verity enabled */
#define FS_XFLAG_HASATTR 0x80000000 /* no DIFLAG for this */
/* the read-only stuff doesn't really belong here, but any other place is
+1 -1
View File
@@ -362,7 +362,7 @@ struct hv_kvp_exchg_msg_value {
__u8 value[HV_KVP_EXCHANGE_MAX_VALUE_SIZE];
__u32 value_u32;
__u64 value_u64;
};
} __attribute__((packed));
} __attribute__((packed));
struct hv_kvp_msg_enumerate {
+134 -134
View File
@@ -3,7 +3,7 @@
#ifndef _USR_IDXD_H_
#define _USR_IDXD_H_
#include <stdint.h>
#include <linux/types.h>
/* Driver command error status */
enum idxd_scmd_stat {
@@ -172,132 +172,132 @@ enum iax_completion_status {
#define DSA_COMP_STATUS(status) ((status) & DSA_COMP_STATUS_MASK)
struct dsa_hw_desc {
uint32_t pasid:20;
uint32_t rsvd:11;
uint32_t priv:1;
uint32_t flags:24;
uint32_t opcode:8;
uint64_t completion_addr;
__u32 pasid:20;
__u32 rsvd:11;
__u32 priv:1;
__u32 flags:24;
__u32 opcode:8;
__u64 completion_addr;
union {
uint64_t src_addr;
uint64_t rdback_addr;
uint64_t pattern;
uint64_t desc_list_addr;
uint64_t pattern_lower;
uint64_t transl_fetch_addr;
__u64 src_addr;
__u64 rdback_addr;
__u64 pattern;
__u64 desc_list_addr;
__u64 pattern_lower;
__u64 transl_fetch_addr;
};
union {
uint64_t dst_addr;
uint64_t rdback_addr2;
uint64_t src2_addr;
uint64_t comp_pattern;
__u64 dst_addr;
__u64 rdback_addr2;
__u64 src2_addr;
__u64 comp_pattern;
};
union {
uint32_t xfer_size;
uint32_t desc_count;
uint32_t region_size;
__u32 xfer_size;
__u32 desc_count;
__u32 region_size;
};
uint16_t int_handle;
uint16_t rsvd1;
__u16 int_handle;
__u16 rsvd1;
union {
uint8_t expected_res;
__u8 expected_res;
/* create delta record */
struct {
uint64_t delta_addr;
uint32_t max_delta_size;
uint32_t delt_rsvd;
uint8_t expected_res_mask;
__u64 delta_addr;
__u32 max_delta_size;
__u32 delt_rsvd;
__u8 expected_res_mask;
};
uint32_t delta_rec_size;
uint64_t dest2;
__u32 delta_rec_size;
__u64 dest2;
/* CRC */
struct {
uint32_t crc_seed;
uint32_t crc_rsvd;
uint64_t seed_addr;
__u32 crc_seed;
__u32 crc_rsvd;
__u64 seed_addr;
};
/* DIF check or strip */
struct {
uint8_t src_dif_flags;
uint8_t dif_chk_res;
uint8_t dif_chk_flags;
uint8_t dif_chk_res2[5];
uint32_t chk_ref_tag_seed;
uint16_t chk_app_tag_mask;
uint16_t chk_app_tag_seed;
__u8 src_dif_flags;
__u8 dif_chk_res;
__u8 dif_chk_flags;
__u8 dif_chk_res2[5];
__u32 chk_ref_tag_seed;
__u16 chk_app_tag_mask;
__u16 chk_app_tag_seed;
};
/* DIF insert */
struct {
uint8_t dif_ins_res;
uint8_t dest_dif_flag;
uint8_t dif_ins_flags;
uint8_t dif_ins_res2[13];
uint32_t ins_ref_tag_seed;
uint16_t ins_app_tag_mask;
uint16_t ins_app_tag_seed;
__u8 dif_ins_res;
__u8 dest_dif_flag;
__u8 dif_ins_flags;
__u8 dif_ins_res2[13];
__u32 ins_ref_tag_seed;
__u16 ins_app_tag_mask;
__u16 ins_app_tag_seed;
};
/* DIF update */
struct {
uint8_t src_upd_flags;
uint8_t upd_dest_flags;
uint8_t dif_upd_flags;
uint8_t dif_upd_res[5];
uint32_t src_ref_tag_seed;
uint16_t src_app_tag_mask;
uint16_t src_app_tag_seed;
uint32_t dest_ref_tag_seed;
uint16_t dest_app_tag_mask;
uint16_t dest_app_tag_seed;
__u8 src_upd_flags;
__u8 upd_dest_flags;
__u8 dif_upd_flags;
__u8 dif_upd_res[5];
__u32 src_ref_tag_seed;
__u16 src_app_tag_mask;
__u16 src_app_tag_seed;
__u32 dest_ref_tag_seed;
__u16 dest_app_tag_mask;
__u16 dest_app_tag_seed;
};
/* Fill */
uint64_t pattern_upper;
__u64 pattern_upper;
/* Translation fetch */
struct {
uint64_t transl_fetch_res;
uint32_t region_stride;
__u64 transl_fetch_res;
__u32 region_stride;
};
/* DIX generate */
struct {
uint8_t dix_gen_res;
uint8_t dest_dif_flags;
uint8_t dif_flags;
uint8_t dix_gen_res2[13];
uint32_t ref_tag_seed;
uint16_t app_tag_mask;
uint16_t app_tag_seed;
__u8 dix_gen_res;
__u8 dest_dif_flags;
__u8 dif_flags;
__u8 dix_gen_res2[13];
__u32 ref_tag_seed;
__u16 app_tag_mask;
__u16 app_tag_seed;
};
uint8_t op_specific[24];
__u8 op_specific[24];
};
} __attribute__((packed));
struct iax_hw_desc {
uint32_t pasid:20;
uint32_t rsvd:11;
uint32_t priv:1;
uint32_t flags:24;
uint32_t opcode:8;
uint64_t completion_addr;
uint64_t src1_addr;
uint64_t dst_addr;
uint32_t src1_size;
uint16_t int_handle;
__u32 pasid:20;
__u32 rsvd:11;
__u32 priv:1;
__u32 flags:24;
__u32 opcode:8;
__u64 completion_addr;
__u64 src1_addr;
__u64 dst_addr;
__u32 src1_size;
__u16 int_handle;
union {
uint16_t compr_flags;
uint16_t decompr_flags;
__u16 compr_flags;
__u16 decompr_flags;
};
uint64_t src2_addr;
uint32_t max_dst_size;
uint32_t src2_size;
uint32_t filter_flags;
uint32_t num_inputs;
__u64 src2_addr;
__u32 max_dst_size;
__u32 src2_size;
__u32 filter_flags;
__u32 num_inputs;
} __attribute__((packed));
struct dsa_raw_desc {
uint64_t field[8];
__u64 field[8];
} __attribute__((packed));
/*
@@ -305,91 +305,91 @@ struct dsa_raw_desc {
* __volatile__ and prevent the compiler from optimize the read.
*/
struct dsa_completion_record {
__volatile__ uint8_t status;
__volatile__ __u8 status;
union {
uint8_t result;
uint8_t dif_status;
__u8 result;
__u8 dif_status;
};
uint8_t fault_info;
uint8_t rsvd;
__u8 fault_info;
__u8 rsvd;
union {
uint32_t bytes_completed;
uint32_t descs_completed;
__u32 bytes_completed;
__u32 descs_completed;
};
uint64_t fault_addr;
__u64 fault_addr;
union {
/* common record */
struct {
uint32_t invalid_flags:24;
uint32_t rsvd2:8;
__u32 invalid_flags:24;
__u32 rsvd2:8;
};
uint32_t delta_rec_size;
uint64_t crc_val;
__u32 delta_rec_size;
__u64 crc_val;
/* DIF check & strip */
struct {
uint32_t dif_chk_ref_tag;
uint16_t dif_chk_app_tag_mask;
uint16_t dif_chk_app_tag;
__u32 dif_chk_ref_tag;
__u16 dif_chk_app_tag_mask;
__u16 dif_chk_app_tag;
};
/* DIF insert */
struct {
uint64_t dif_ins_res;
uint32_t dif_ins_ref_tag;
uint16_t dif_ins_app_tag_mask;
uint16_t dif_ins_app_tag;
__u64 dif_ins_res;
__u32 dif_ins_ref_tag;
__u16 dif_ins_app_tag_mask;
__u16 dif_ins_app_tag;
};
/* DIF update */
struct {
uint32_t dif_upd_src_ref_tag;
uint16_t dif_upd_src_app_tag_mask;
uint16_t dif_upd_src_app_tag;
uint32_t dif_upd_dest_ref_tag;
uint16_t dif_upd_dest_app_tag_mask;
uint16_t dif_upd_dest_app_tag;
__u32 dif_upd_src_ref_tag;
__u16 dif_upd_src_app_tag_mask;
__u16 dif_upd_src_app_tag;
__u32 dif_upd_dest_ref_tag;
__u16 dif_upd_dest_app_tag_mask;
__u16 dif_upd_dest_app_tag;
};
/* DIX generate */
struct {
uint64_t dix_gen_res;
uint32_t dix_ref_tag;
uint16_t dix_app_tag_mask;
uint16_t dix_app_tag;
__u64 dix_gen_res;
__u32 dix_ref_tag;
__u16 dix_app_tag_mask;
__u16 dix_app_tag;
};
uint8_t op_specific[16];
__u8 op_specific[16];
};
} __attribute__((packed));
struct dsa_raw_completion_record {
uint64_t field[4];
__u64 field[4];
} __attribute__((packed));
struct iax_completion_record {
__volatile__ uint8_t status;
uint8_t error_code;
uint8_t fault_info;
uint8_t rsvd;
uint32_t bytes_completed;
uint64_t fault_addr;
uint32_t invalid_flags;
uint32_t rsvd2;
uint32_t output_size;
uint8_t output_bits;
uint8_t rsvd3;
uint16_t xor_csum;
uint32_t crc;
uint32_t min;
uint32_t max;
uint32_t sum;
uint64_t rsvd4[2];
__volatile__ __u8 status;
__u8 error_code;
__u8 fault_info;
__u8 rsvd;
__u32 bytes_completed;
__u64 fault_addr;
__u32 invalid_flags;
__u32 rsvd2;
__u32 output_size;
__u8 output_bits;
__u8 rsvd3;
__u16 xor_csum;
__u32 crc;
__u32 min;
__u32 max;
__u32 sum;
__u64 rsvd4[2];
} __attribute__((packed));
struct iax_raw_completion_record {
uint64_t field[8];
__u64 field[8];
} __attribute__((packed));
#endif
+1 -1
View File
@@ -42,7 +42,7 @@ struct sockaddr_alg_new {
struct af_alg_iv {
__u32 ivlen;
__u8 iv[];
__u8 iv[] __counted_by(ivlen);
};
/* Socket options */
+1
View File
@@ -1441,6 +1441,7 @@ enum {
IFLA_GENEVE_DF,
IFLA_GENEVE_INNER_PROTO_INHERIT,
IFLA_GENEVE_PORT_RANGE,
IFLA_GENEVE_GRO_HINT,
__IFLA_GENEVE_MAX
};
#define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1)
@@ -643,6 +643,10 @@
#define KEY_EPRIVACY_SCREEN_ON 0x252
#define KEY_EPRIVACY_SCREEN_OFF 0x253
#define KEY_ACTION_ON_SELECTION 0x254 /* AL Action on Selection (HUTRR119) */
#define KEY_CONTEXTUAL_INSERT 0x255 /* AL Contextual Insertion (HUTRR119) */
#define KEY_CONTEXTUAL_QUERY 0x256 /* AL Contextual Query (HUTRR119) */
#define KEY_KBDINPUTASSIST_PREV 0x260
#define KEY_KBDINPUTASSIST_NEXT 0x261
#define KEY_KBDINPUTASSIST_PREVGROUP 0x262
+33 -2
View File
@@ -188,7 +188,8 @@ enum io_uring_sqe_flags_bit {
/*
* If COOP_TASKRUN is set, get notified if task work is available for
* running and a kernel transition would be needed to run it. This sets
* IORING_SQ_TASKRUN in the sq ring flags. Not valid with COOP_TASKRUN.
* IORING_SQ_TASKRUN in the sq ring flags. Not valid without COOP_TASKRUN
* or DEFER_TASKRUN.
*/
#define IORING_SETUP_TASKRUN_FLAG (1U << 9)
#define IORING_SETUP_SQE128 (1U << 10) /* SQEs are 128 byte */
@@ -237,6 +238,18 @@ enum io_uring_sqe_flags_bit {
*/
#define IORING_SETUP_SQE_MIXED (1U << 19)
/*
* When set, io_uring ignores SQ head and tail and fetches SQEs to submit
* starting from index 0 instead from the index stored in the head pointer.
* IOW, the user should place all SQE at the beginning of the SQ memory
* before issuing a submission syscall.
*
* It requires IORING_SETUP_NO_SQARRAY and is incompatible with
* IORING_SETUP_SQPOLL. The user must also never change the SQ head and tail
* values and keep it set to 0. Any other value is undefined behaviour.
*/
#define IORING_SETUP_SQ_REWIND (1U << 20)
enum io_uring_op {
IORING_OP_NOP,
IORING_OP_READV,
@@ -700,6 +713,9 @@ enum io_uring_register_op {
/* auxiliary zcrx configuration, see enum zcrx_ctrl_op */
IORING_REGISTER_ZCRX_CTRL = 36,
/* register bpf filtering programs */
IORING_REGISTER_BPF_FILTER = 37,
/* this goes last */
IORING_REGISTER_LAST,
@@ -805,6 +821,13 @@ struct io_uring_restriction {
__u32 resv2[3];
};
struct io_uring_task_restriction {
__u16 flags;
__u16 nr_res;
__u32 resv[3];
__DECLARE_FLEX_ARRAY(struct io_uring_restriction, restrictions);
};
struct io_uring_clock_register {
__u32 clockid;
__u32 __resv[3];
@@ -1068,6 +1091,14 @@ enum zcrx_reg_flags {
ZCRX_REG_IMPORT = 1,
};
enum zcrx_features {
/*
* The user can ask for the desired rx page size by passing the
* value in struct io_uring_zcrx_ifq_reg::rx_buf_len.
*/
ZCRX_FEATURE_RX_PAGE_SIZE = 1 << 0,
};
/*
* Argument for IORING_REGISTER_ZCRX_IFQ
*/
@@ -1082,7 +1113,7 @@ struct io_uring_zcrx_ifq_reg {
struct io_uring_zcrx_offsets offsets;
__u32 zcrx_id;
__u32 __resv2;
__u32 rx_buf_len;
__u64 __resv[3];
};
@@ -0,0 +1,68 @@
/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR MIT */
/*
* Header file for the io_uring BPF filters.
*/
#ifndef LINUX_IO_URING_BPF_FILTER_H
#define LINUX_IO_URING_BPF_FILTER_H
#include <linux/types.h>
/*
* Struct passed to filters.
*/
struct io_uring_bpf_ctx {
__u64 user_data;
__u8 opcode;
__u8 sqe_flags;
__u8 pdu_size; /* size of aux data for filter */
__u8 pad[5];
union {
struct {
__u32 family;
__u32 type;
__u32 protocol;
} socket;
struct {
__u64 flags;
__u64 mode;
__u64 resolve;
} open;
};
};
enum {
/*
* If set, any currently unset opcode will have a deny filter attached
*/
IO_URING_BPF_FILTER_DENY_REST = 1,
/*
* If set, if kernel and application don't agree on pdu_size for
* the given opcode, fail the registration of the filter.
*/
IO_URING_BPF_FILTER_SZ_STRICT = 2,
};
struct io_uring_bpf_filter {
__u32 opcode; /* io_uring opcode to filter */
__u32 flags;
__u32 filter_len; /* number of BPF instructions */
__u8 pdu_size; /* expected pdu size for opcode */
__u8 resv[3];
__u64 filter_ptr; /* pointer to BPF filter */
__u64 resv2[5];
};
enum {
IO_URING_BPF_CMD_FILTER = 1,
};
struct io_uring_bpf {
__u16 cmd_type; /* IO_URING_BPF_* values */
__u16 cmd_flags; /* none so far */
__u32 resv;
union {
struct io_uring_bpf_filter filter;
};
};
#endif
+5 -1
View File
@@ -1,6 +1,9 @@
/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR MIT */
/*
* Header file for the io_uring query interface.
*
* Copyright (C) 2026 Pavel Begunkov <asml.silence@gmail.com>
* Copyright (C) Meta Platforms, Inc.
*/
#ifndef LINUX_IO_URING_QUERY_H
#define LINUX_IO_URING_QUERY_H
@@ -50,7 +53,8 @@ struct io_uring_query_zcrx {
__u64 area_flags;
/* The number of supported ZCRX_CTRL_* opcodes */
__u32 nr_ctrl_opcodes;
__u32 __resv1;
/* Bitmask of ZCRX_FEATURE_* indicating which features are available */
__u32 features;
/* The refill ring header size */
__u32 rq_hdr_size;
/* The alignment for the header */
+39
View File
@@ -465,16 +465,27 @@ struct iommu_hwpt_arm_smmuv3 {
__aligned_le64 ste[2];
};
/**
* struct iommu_hwpt_amd_guest - AMD IOMMU guest I/O page table data
* (IOMMU_HWPT_DATA_AMD_GUEST)
* @dte: Guest Device Table Entry (DTE)
*/
struct iommu_hwpt_amd_guest {
__aligned_u64 dte[4];
};
/**
* enum iommu_hwpt_data_type - IOMMU HWPT Data Type
* @IOMMU_HWPT_DATA_NONE: no data
* @IOMMU_HWPT_DATA_VTD_S1: Intel VT-d stage-1 page table
* @IOMMU_HWPT_DATA_ARM_SMMUV3: ARM SMMUv3 Context Descriptor Table
* @IOMMU_HWPT_DATA_AMD_GUEST: AMD IOMMU guest page table
*/
enum iommu_hwpt_data_type {
IOMMU_HWPT_DATA_NONE = 0,
IOMMU_HWPT_DATA_VTD_S1 = 1,
IOMMU_HWPT_DATA_ARM_SMMUV3 = 2,
IOMMU_HWPT_DATA_AMD_GUEST = 3,
};
/**
@@ -623,6 +634,32 @@ struct iommu_hw_info_tegra241_cmdqv {
__u8 __reserved;
};
/**
* struct iommu_hw_info_amd - AMD IOMMU device info
*
* @efr : Value of AMD IOMMU Extended Feature Register (EFR)
* @efr2: Value of AMD IOMMU Extended Feature 2 Register (EFR2)
*
* Please See description of these registers in the following sections of
* the AMD I/O Virtualization Technology (IOMMU) Specification.
* (https://docs.amd.com/v/u/en-US/48882_3.10_PUB)
*
* - MMIO Offset 0030h IOMMU Extended Feature Register
* - MMIO Offset 01A0h IOMMU Extended Feature 2 Register
*
* Note: The EFR and EFR2 are raw values reported by hardware.
* VMM is responsible to determine the appropriate flags to be exposed to
* the VM since cetertain features are not currently supported by the kernel
* for HW-vIOMMU.
*
* Current VMM-allowed list of feature flags are:
* - EFR[GTSup, GASup, GioSup, PPRSup, EPHSup, GATS, GLX, PASmax]
*/
struct iommu_hw_info_amd {
__aligned_u64 efr;
__aligned_u64 efr2;
};
/**
* enum iommu_hw_info_type - IOMMU Hardware Info Types
* @IOMMU_HW_INFO_TYPE_NONE: Output by the drivers that do not report hardware
@@ -632,6 +669,7 @@ struct iommu_hw_info_tegra241_cmdqv {
* @IOMMU_HW_INFO_TYPE_ARM_SMMUV3: ARM SMMUv3 iommu info type
* @IOMMU_HW_INFO_TYPE_TEGRA241_CMDQV: NVIDIA Tegra241 CMDQV (extension for ARM
* SMMUv3) info type
* @IOMMU_HW_INFO_TYPE_AMD: AMD IOMMU info type
*/
enum iommu_hw_info_type {
IOMMU_HW_INFO_TYPE_NONE = 0,
@@ -639,6 +677,7 @@ enum iommu_hw_info_type {
IOMMU_HW_INFO_TYPE_INTEL_VTD = 1,
IOMMU_HW_INFO_TYPE_ARM_SMMUV3 = 2,
IOMMU_HW_INFO_TYPE_TEGRA241_CMDQV = 3,
IOMMU_HW_INFO_TYPE_AMD = 4,
};
/**
+13 -3
View File
@@ -44,9 +44,13 @@
* - 1.16 - Add contiguous VRAM allocation flag
* - 1.17 - Add SDMA queue creation with target SDMA engine ID
* - 1.18 - Rename pad in set_memory_policy_args to misc_process_flag
* - 1.19 - Add a new ioctl to craete secondary kfd processes
* - 1.20 - Trap handler support for expert scheduling mode available
* - 1.21 - Debugger support to subscribe to LDS out-of-address exceptions
* - 1.22 - Add queue creation with metadata ring base address
*/
#define KFD_IOCTL_MAJOR_VERSION 1
#define KFD_IOCTL_MINOR_VERSION 18
#define KFD_IOCTL_MINOR_VERSION 22
struct kfd_ioctl_get_version_args {
__u32 major_version; /* from KFD */
@@ -84,7 +88,7 @@ struct kfd_ioctl_create_queue_args {
__u32 ctx_save_restore_size; /* to KFD */
__u32 ctl_stack_size; /* to KFD */
__u32 sdma_engine_id; /* to KFD */
__u32 pad;
__u32 metadata_ring_size; /* to KFD */
};
struct kfd_ioctl_destroy_queue_args {
@@ -145,6 +149,8 @@ struct kfd_dbg_device_info_entry {
__u32 num_xcc;
__u32 capability;
__u32 debug_prop;
__u32 capability2;
__u32 pad;
};
/* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */
@@ -945,6 +951,7 @@ enum kfd_dbg_trap_address_watch_mode {
enum kfd_dbg_trap_flags {
KFD_DBG_TRAP_FLAG_SINGLE_MEM_OP = 1,
KFD_DBG_TRAP_FLAG_SINGLE_ALU_OP = 2,
KFD_DBG_TRAP_FLAG_LDS_OUT_OF_ADDR_RANGE = 4
};
/* Trap exceptions */
@@ -1671,7 +1678,10 @@ struct kfd_ioctl_dbg_trap_args {
#define AMDKFD_IOC_DBG_TRAP \
AMDKFD_IOWR(0x26, struct kfd_ioctl_dbg_trap_args)
#define AMDKFD_IOC_CREATE_PROCESS \
AMDKFD_IO(0x27)
#define AMDKFD_COMMAND_START 0x01
#define AMDKFD_COMMAND_END 0x27
#define AMDKFD_COMMAND_END 0x28
#endif
+2 -1
View File
@@ -64,7 +64,8 @@
#define HSA_CAP_RESERVED 0x000f8000
#define HSA_CAP2_PER_SDMA_QUEUE_RESET_SUPPORTED 0x00000001
#define HSA_CAP2_RESERVED 0xfffffffe
#define HSA_CAP2_TRAP_DEBUG_LDS_OUT_OF_ADDR_RANGE_SUPPORTED 0x00000002
#define HSA_CAP2_RESERVED 0xfffffffc
/* debug_prop bits in node properties */
#define HSA_DBG_WATCH_ADDR_MASK_LO_BIT_MASK 0x0000000f
+30 -6
View File
@@ -11,9 +11,11 @@
#include <linux/const.h>
#include <linux/types.h>
#include <linux/stddef.h>
#include <linux/ioctl.h>
#include <asm/kvm.h>
#define KVM_API_VERSION 12
/*
@@ -135,6 +137,12 @@ struct kvm_xen_exit {
} u;
};
struct kvm_exit_snp_req_certs {
__u64 gpa;
__u64 npages;
__u64 ret;
};
#define KVM_S390_GET_SKEYS_NONE 1
#define KVM_S390_SKEYS_MAX 1048576
@@ -180,6 +188,8 @@ struct kvm_xen_exit {
#define KVM_EXIT_MEMORY_FAULT 39
#define KVM_EXIT_TDX 40
#define KVM_EXIT_ARM_SEA 41
#define KVM_EXIT_ARM_LDST64B 42
#define KVM_EXIT_SNP_REQ_CERTS 43
/* For KVM_EXIT_INTERNAL_ERROR */
/* Emulate instruction failed. */
@@ -394,7 +404,7 @@ struct kvm_run {
} eoi;
/* KVM_EXIT_HYPERV */
struct kvm_hyperv_exit hyperv;
/* KVM_EXIT_ARM_NISV */
/* KVM_EXIT_ARM_NISV / KVM_EXIT_ARM_LDST64B */
struct {
__u64 esr_iss;
__u64 fault_ipa;
@@ -474,6 +484,8 @@ struct kvm_run {
__u64 gva;
__u64 gpa;
} arm_sea;
/* KVM_EXIT_SNP_REQ_CERTS */
struct kvm_exit_snp_req_certs snp_req_certs;
/* Fix the size of the union. */
char padding[256];
};
@@ -520,7 +532,7 @@ struct kvm_coalesced_mmio {
struct kvm_coalesced_mmio_ring {
__u32 first, last;
struct kvm_coalesced_mmio coalesced_mmio[];
__DECLARE_FLEX_ARRAY(struct kvm_coalesced_mmio, coalesced_mmio);
};
#define KVM_COALESCED_MMIO_MAX \
@@ -570,7 +582,7 @@ struct kvm_clear_dirty_log {
/* for KVM_SET_SIGNAL_MASK */
struct kvm_signal_mask {
__u32 len;
__u8 sigset[];
__DECLARE_FLEX_ARRAY(__u8, sigset);
};
/* for KVM_TPR_ACCESS_REPORTING */
@@ -966,6 +978,7 @@ struct kvm_enable_cap {
#define KVM_CAP_GUEST_MEMFD_FLAGS 244
#define KVM_CAP_ARM_SEA_TO_USER 245
#define KVM_CAP_S390_USER_OPEREXEC 246
#define KVM_CAP_S390_KEYOP 247
struct kvm_irq_routing_irqchip {
__u32 irqchip;
@@ -1028,7 +1041,7 @@ struct kvm_irq_routing_entry {
struct kvm_irq_routing {
__u32 nr;
__u32 flags;
struct kvm_irq_routing_entry entries[];
__DECLARE_FLEX_ARRAY(struct kvm_irq_routing_entry, entries);
};
#define KVM_IRQFD_FLAG_DEASSIGN (1 << 0)
@@ -1119,7 +1132,7 @@ struct kvm_dirty_tlb {
struct kvm_reg_list {
__u64 n; /* number of regs */
__u64 reg[];
__DECLARE_FLEX_ARRAY(__u64, reg);
};
struct kvm_one_reg {
@@ -1211,6 +1224,16 @@ struct kvm_vfio_spapr_tce {
__s32 tablefd;
};
#define KVM_S390_KEYOP_ISKE 0x01
#define KVM_S390_KEYOP_RRBE 0x02
#define KVM_S390_KEYOP_SSKE 0x03
struct kvm_s390_keyop {
__u64 guest_addr;
__u8 key;
__u8 operation;
__u8 pad[6];
};
/*
* KVM_CREATE_VCPU receives as a parameter the vcpu slot, and returns
* a vcpu fd.
@@ -1230,6 +1253,7 @@ struct kvm_vfio_spapr_tce {
#define KVM_S390_UCAS_MAP _IOW(KVMIO, 0x50, struct kvm_s390_ucas_mapping)
#define KVM_S390_UCAS_UNMAP _IOW(KVMIO, 0x51, struct kvm_s390_ucas_mapping)
#define KVM_S390_VCPU_FAULT _IOW(KVMIO, 0x52, unsigned long)
#define KVM_S390_KEYOP _IOWR(KVMIO, 0x53, struct kvm_s390_keyop)
/* Device model IOC */
#define KVM_CREATE_IRQCHIP _IO(KVMIO, 0x60)
@@ -1571,7 +1595,7 @@ struct kvm_stats_desc {
__u16 size;
__u32 offset;
__u32 bucket_size;
char name[];
__DECLARE_FLEX_ARRAY(char, name);
};
#define KVM_GET_STATS_FD _IO(KVMIO, 0xce)
+22 -8
View File
@@ -117,11 +117,24 @@ struct landlock_ruleset_attr {
* future nested domains, not the one being created. It can also be used
* with a @ruleset_fd value of -1 to mute subdomain logs without creating a
* domain.
*
* The following flag supports policy enforcement in multithreaded processes:
*
* %LANDLOCK_RESTRICT_SELF_TSYNC
* Applies the new Landlock configuration atomically to all threads of the
* current process, including the Landlock domain and logging
* configuration. This overrides the Landlock configuration of sibling
* threads, irrespective of previously established Landlock domains and
* logging configurations on these threads.
*
* If the calling thread is running with no_new_privs, this operation
* enables no_new_privs on the sibling threads as well.
*/
/* clang-format off */
#define LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF (1U << 0)
#define LANDLOCK_RESTRICT_SELF_LOG_NEW_EXEC_ON (1U << 1)
#define LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF (1U << 2)
#define LANDLOCK_RESTRICT_SELF_TSYNC (1U << 3)
/* clang-format on */
/**
@@ -182,11 +195,13 @@ struct landlock_net_port_attr {
* It should be noted that port 0 passed to :manpage:`bind(2)` will bind
* to an available port from the ephemeral port range. This can be
* configured with the ``/proc/sys/net/ipv4/ip_local_port_range`` sysctl
* (also used for IPv6).
* (also used for IPv6), and within that range, on a per-socket basis
* with ``setsockopt(IP_LOCAL_PORT_RANGE)``.
*
* A Landlock rule with port 0 and the ``LANDLOCK_ACCESS_NET_BIND_TCP``
* A Landlock rule with port 0 and the %LANDLOCK_ACCESS_NET_BIND_TCP
* right means that requesting to bind on port 0 is allowed and it will
* automatically translate to binding on the related port range.
* automatically translate to binding on a kernel-assigned ephemeral
* port.
*/
__u64 port;
};
@@ -329,13 +344,12 @@ struct landlock_net_port_attr {
* These flags enable to restrict a sandboxed process to a set of network
* actions.
*
* This is supported since Landlock ABI version 4.
*
* The following access rights apply to TCP port numbers:
*
* - %LANDLOCK_ACCESS_NET_BIND_TCP: Bind a TCP socket to a local port.
* - %LANDLOCK_ACCESS_NET_CONNECT_TCP: Connect an active TCP socket to
* a remote port.
* - %LANDLOCK_ACCESS_NET_BIND_TCP: Bind TCP sockets to the given local
* port. Support added in Landlock ABI version 4.
* - %LANDLOCK_ACCESS_NET_CONNECT_TCP: Connect TCP sockets to the given
* remote port. Support added in Landlock ABI version 4.
*/
/* clang-format off */
#define LANDLOCK_ACCESS_NET_BIND_TCP (1ULL << 0)
+1
View File
@@ -104,5 +104,6 @@
#define SECRETMEM_MAGIC 0x5345434d /* "SECM" */
#define PID_FS_MAGIC 0x50494446 /* "PIDF" */
#define GUEST_MEMFD_MAGIC 0x474d454d /* "GMEM" */
#define NULL_FS_MAGIC 0x4E554C4C /* "NULL" */
#endif /* __LINUX_MAGIC_H__ */
+3
View File
@@ -39,6 +39,9 @@ enum {
#define MPOL_MODE_FLAGS \
(MPOL_F_STATIC_NODES | MPOL_F_RELATIVE_NODES | MPOL_F_NUMA_BALANCING)
/* Whether the nodemask is specified by users */
#define MPOL_USER_NODEMASK_FLAGS (MPOL_F_STATIC_NODES | MPOL_F_RELATIVE_NODES)
/* Flags for get_mempolicy */
#define MPOL_F_NODE (1<<0) /* return next IL mode instead of node mask */
#define MPOL_F_ADDR (1<<1) /* look up vma using address */
+11 -2
View File
@@ -61,7 +61,8 @@
/*
* open_tree() flags.
*/
#define OPEN_TREE_CLONE 1 /* Clone the target tree and attach the clone */
#define OPEN_TREE_CLONE (1 << 0) /* Clone the target tree and attach the clone */
#define OPEN_TREE_NAMESPACE (1 << 1) /* Clone the target tree into a new mount namespace */
#define OPEN_TREE_CLOEXEC O_CLOEXEC /* Close the file on execve() */
/*
@@ -197,7 +198,10 @@ struct statmount {
*/
struct mnt_id_req {
__u32 size;
__u32 mnt_ns_fd;
union {
__u32 mnt_ns_fd;
__u32 mnt_fd;
};
__u64 mnt_id;
__u64 param;
__u64 mnt_ns_id;
@@ -232,4 +236,9 @@ struct mnt_id_req {
#define LSMT_ROOT 0xffffffffffffffff /* root mount */
#define LISTMOUNT_REVERSE (1 << 0) /* List later mounts first */
/*
* @flag bits for statmount(2)
*/
#define STATMOUNT_BY_FD 0x00000001U /* want mountinfo for given fd */
#endif /* _LINUX_MOUNT_H */
+1 -1
View File
@@ -11,7 +11,7 @@
#define MPTCP_PM_VER 1
/**
* enum mptcp_event_type
* enum mptcp_event_type - Netlink MPTCP event types
* @MPTCP_EVENT_UNSPEC: unused event
* @MPTCP_EVENT_CREATED: A new MPTCP connection has been created. It is the
* good time to allocate memory and send ADD_ADDR if needed. Depending on the
+2
View File
@@ -27,6 +27,8 @@ enum {
MSHV_PT_BIT_X2APIC,
MSHV_PT_BIT_GPA_SUPER_PAGES,
MSHV_PT_BIT_CPU_AND_XSAVE_FEATURES,
MSHV_PT_BIT_NESTED_VIRTUALIZATION,
MSHV_PT_BIT_SMT_ENABLED_GUEST,
MSHV_PT_BIT_COUNT,
};
+5 -4
View File
@@ -5,13 +5,14 @@
/* bridge-specific defines for netfilter.
*/
#include <netinet/if_ether.h> /* for __UAPI_DEF_ETHHDR if defined */
#include <linux/in.h>
#include <linux/netfilter.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/if_pppox.h>
#include <limits.h> /* for INT_MIN, INT_MAX */
#include <linux/typelimits.h>
/* Bridge Hooks */
/* After promisc drops, checksum checks. */
@@ -29,14 +30,14 @@
#define NF_BR_NUMHOOKS 6
enum nf_br_hook_priorities {
NF_BR_PRI_FIRST = INT_MIN,
NF_BR_PRI_FIRST = __KERNEL_INT_MIN,
NF_BR_PRI_NAT_DST_BRIDGED = -300,
NF_BR_PRI_FILTER_BRIDGED = -200,
NF_BR_PRI_BRNF = 0,
NF_BR_PRI_NAT_DST_OTHER = 100,
NF_BR_PRI_FILTER_OTHER = 200,
NF_BR_PRI_NAT_SRC = 300,
NF_BR_PRI_LAST = INT_MAX,
NF_BR_PRI_LAST = __KERNEL_INT_MAX,
};
#endif /* __LINUX_BRIDGE_NETFILTER_H */
+4 -5
View File
@@ -7,11 +7,10 @@
#include <linux/netfilter.h>
#include <linux/typelimits.h>
/* only for userspace compatibility */
#include <limits.h> /* for INT_MIN, INT_MAX */
/* IP Hooks */
/* After promisc drops, checksum checks. */
#define NF_IP_PRE_ROUTING 0
@@ -26,7 +25,7 @@
#define NF_IP_NUMHOOKS 5
enum nf_ip_hook_priorities {
NF_IP_PRI_FIRST = INT_MIN,
NF_IP_PRI_FIRST = __KERNEL_INT_MIN,
NF_IP_PRI_RAW_BEFORE_DEFRAG = -450,
NF_IP_PRI_CONNTRACK_DEFRAG = -400,
NF_IP_PRI_RAW = -300,
@@ -39,8 +38,8 @@ enum nf_ip_hook_priorities {
NF_IP_PRI_NAT_SRC = 100,
NF_IP_PRI_SELINUX_LAST = 225,
NF_IP_PRI_CONNTRACK_HELPER = 300,
NF_IP_PRI_CONNTRACK_CONFIRM = INT_MAX,
NF_IP_PRI_LAST = INT_MAX,
NF_IP_PRI_CONNTRACK_CONFIRM = __KERNEL_INT_MAX,
NF_IP_PRI_LAST = __KERNEL_INT_MAX,
};
/* Arguments for setsockopt SOL_IP: */
+3 -4
View File
@@ -10,11 +10,10 @@
#include <linux/netfilter.h>
#include <linux/typelimits.h>
/* only for userspace compatibility */
#include <limits.h> /* for INT_MIN, INT_MAX */
/* IP6 Hooks */
/* After promisc drops, checksum checks. */
#define NF_IP6_PRE_ROUTING 0
@@ -30,7 +29,7 @@
enum nf_ip6_hook_priorities {
NF_IP6_PRI_FIRST = INT_MIN,
NF_IP6_PRI_FIRST = __KERNEL_INT_MIN,
NF_IP6_PRI_RAW_BEFORE_DEFRAG = -450,
NF_IP6_PRI_CONNTRACK_DEFRAG = -400,
NF_IP6_PRI_RAW = -300,
@@ -43,7 +42,7 @@ enum nf_ip6_hook_priorities {
NF_IP6_PRI_NAT_SRC = 100,
NF_IP6_PRI_SELINUX_LAST = 225,
NF_IP6_PRI_CONNTRACK_HELPER = 300,
NF_IP6_PRI_LAST = INT_MAX,
NF_IP6_PRI_LAST = __KERNEL_INT_MAX,
};
+1 -1
View File
@@ -55,7 +55,7 @@
NFSERR_NODEV = 19, /* v2 v3 v4 */
NFSERR_NOTDIR = 20, /* v2 v3 v4 */
NFSERR_ISDIR = 21, /* v2 v3 v4 */
NFSERR_INVAL = 22, /* v2 v3 v4 */
NFSERR_INVAL = 22, /* v3 v4 */
NFSERR_FBIG = 27, /* v2 v3 v4 */
NFSERR_NOSPC = 28, /* v2 v3 v4 */
NFSERR_ROFS = 30, /* v2 v3 v4 */
+1
View File
@@ -35,6 +35,7 @@ enum {
NFSD_A_SERVER_GRACETIME,
NFSD_A_SERVER_LEASETIME,
NFSD_A_SERVER_SCOPE,
NFSD_A_SERVER_MIN_THREADS,
__NFSD_A_SERVER_MAX,
NFSD_A_SERVER_MAX = (__NFSD_A_SERVER_MAX - 1)
+2 -2
View File
@@ -58,7 +58,7 @@ NILFS_CPINFO_FNS(INVALID, invalid)
NILFS_CPINFO_FNS(MINOR, minor)
/**
* nilfs_suinfo - segment usage information
* struct nilfs_suinfo - segment usage information
* @sui_lastmod: timestamp of last modification
* @sui_nblocks: number of written blocks in segment
* @sui_flags: segment usage flags
@@ -93,7 +93,7 @@ static __inline__ int nilfs_suinfo_clean(const struct nilfs_suinfo *si)
}
/**
* nilfs_suinfo_update - segment usage information update
* struct nilfs_suinfo_update - segment usage information update
* @sup_segnum: segment number
* @sup_flags: flags for which fields are active in sup_sui
* @sup_reserved: reserved necessary for alignment
+89 -58
View File
@@ -133,73 +133,104 @@ struct nilfs_super_root {
/**
* struct nilfs_super_block - structure of super block on disk
* @s_rev_level: Revision level
* @s_minor_rev_level: minor revision level
* @s_magic: Magic signature
* @s_bytes: Bytes count of CRC calculation for
* this structure. s_reserved is excluded.
* @s_flags: flags
* @s_crc_seed: Seed value of CRC calculation
* @s_sum: Check sum of super block
* @s_log_block_size: Block size represented as follows:
* blocksize = 1 << (s_log_block_size + 10)
* @s_nsegments: Number of segments in filesystem
* @s_dev_size: block device size in bytes
* @s_first_data_block: 1st seg disk block number
* @s_blocks_per_segment: number of blocks per full segment
* @s_r_segments_percentage: Reserved segments percentage
* @s_last_cno: Last checkpoint number
* @s_last_pseg: disk block addr pseg written last
* @s_last_seq: seq. number of seg written last
* @s_free_blocks_count: Free blocks count
* @s_ctime: Creation time (execution time of newfs)
* @s_mtime: Mount time
* @s_wtime: Write time
* @s_mnt_count: Mount count
* @s_max_mnt_count: Maximal mount count
* @s_state: File system state
* @s_errors: Behaviour when detecting errors
* @s_lastcheck: time of last check
* @s_checkinterval: max. time between checks
* @s_creator_os: OS
* @s_def_resuid: Default uid for reserved blocks
* @s_def_resgid: Default gid for reserved blocks
* @s_first_ino: First non-reserved inode
* @s_inode_size: Size of an inode
* @s_dat_entry_size: Size of a dat entry
* @s_checkpoint_size: Size of a checkpoint
* @s_segment_usage_size: Size of a segment usage
* @s_uuid: 128-bit uuid for volume
* @s_volume_name: volume name
* @s_c_interval: Commit interval of segment
* @s_c_block_max: Threshold of data amount for the
* segment construction
* @s_feature_compat: Compatible feature set
* @s_feature_compat_ro: Read-only compatible feature set
* @s_feature_incompat: Incompatible feature set
* @s_reserved: padding to the end of the block
*/
struct nilfs_super_block {
/*00*/ __le32 s_rev_level; /* Revision level */
__le16 s_minor_rev_level; /* minor revision level */
__le16 s_magic; /* Magic signature */
/*00*/ __le32 s_rev_level;
__le16 s_minor_rev_level;
__le16 s_magic;
__le16 s_bytes; /*
* Bytes count of CRC calculation
* for this structure. s_reserved
* is excluded.
*/
__le16 s_flags; /* flags */
__le32 s_crc_seed; /* Seed value of CRC calculation */
/*10*/ __le32 s_sum; /* Check sum of super block */
__le16 s_bytes;
__le16 s_flags;
__le32 s_crc_seed;
/*10*/ __le32 s_sum;
__le32 s_log_block_size; /*
* Block size represented as follows
* blocksize =
* 1 << (s_log_block_size + 10)
*/
__le64 s_nsegments; /* Number of segments in filesystem */
/*20*/ __le64 s_dev_size; /* block device size in bytes */
__le64 s_first_data_block; /* 1st seg disk block number */
/*30*/ __le32 s_blocks_per_segment; /* number of blocks per full segment */
__le32 s_r_segments_percentage; /* Reserved segments percentage */
__le32 s_log_block_size;
__le64 s_nsegments;
/*20*/ __le64 s_dev_size;
__le64 s_first_data_block;
/*30*/ __le32 s_blocks_per_segment;
__le32 s_r_segments_percentage;
__le64 s_last_cno; /* Last checkpoint number */
/*40*/ __le64 s_last_pseg; /* disk block addr pseg written last */
__le64 s_last_seq; /* seq. number of seg written last */
/*50*/ __le64 s_free_blocks_count; /* Free blocks count */
__le64 s_last_cno;
/*40*/ __le64 s_last_pseg;
__le64 s_last_seq;
/*50*/ __le64 s_free_blocks_count;
__le64 s_ctime; /*
* Creation time (execution time of
* newfs)
*/
/*60*/ __le64 s_mtime; /* Mount time */
__le64 s_wtime; /* Write time */
/*70*/ __le16 s_mnt_count; /* Mount count */
__le16 s_max_mnt_count; /* Maximal mount count */
__le16 s_state; /* File system state */
__le16 s_errors; /* Behaviour when detecting errors */
__le64 s_lastcheck; /* time of last check */
__le64 s_ctime;
/*60*/ __le64 s_mtime;
__le64 s_wtime;
/*70*/ __le16 s_mnt_count;
__le16 s_max_mnt_count;
__le16 s_state;
__le16 s_errors;
__le64 s_lastcheck;
/*80*/ __le32 s_checkinterval; /* max. time between checks */
__le32 s_creator_os; /* OS */
__le16 s_def_resuid; /* Default uid for reserved blocks */
__le16 s_def_resgid; /* Default gid for reserved blocks */
__le32 s_first_ino; /* First non-reserved inode */
/*80*/ __le32 s_checkinterval;
__le32 s_creator_os;
__le16 s_def_resuid;
__le16 s_def_resgid;
__le32 s_first_ino;
/*90*/ __le16 s_inode_size; /* Size of an inode */
__le16 s_dat_entry_size; /* Size of a dat entry */
__le16 s_checkpoint_size; /* Size of a checkpoint */
__le16 s_segment_usage_size; /* Size of a segment usage */
/*90*/ __le16 s_inode_size;
__le16 s_dat_entry_size;
__le16 s_checkpoint_size;
__le16 s_segment_usage_size;
/*98*/ __u8 s_uuid[16]; /* 128-bit uuid for volume */
/*A8*/ char s_volume_name[80] /* volume name */
__kernel_nonstring;
/*98*/ __u8 s_uuid[16];
/*A8*/ char s_volume_name[80] __kernel_nonstring;
/*F8*/ __le32 s_c_interval; /* Commit interval of segment */
__le32 s_c_block_max; /*
* Threshold of data amount for
* the segment construction
*/
/*100*/ __le64 s_feature_compat; /* Compatible feature set */
__le64 s_feature_compat_ro; /* Read-only compatible feature set */
__le64 s_feature_incompat; /* Incompatible feature set */
__u32 s_reserved[186]; /* padding to the end of the block */
/*F8*/ __le32 s_c_interval;
__le32 s_c_block_max;
/*100*/ __le64 s_feature_compat;
__le64 s_feature_compat_ro;
__le64 s_feature_incompat;
__u32 s_reserved[186];
};
/*
@@ -449,7 +480,7 @@ struct nilfs_btree_node {
/**
* struct nilfs_direct_node - header of built-in bmap array
* @dn_flags: flags
* @dn_pad: padding
* @pad: padding
*/
struct nilfs_direct_node {
__u8 dn_flags;
+104 -3
View File
@@ -11,7 +11,7 @@
* Copyright 2008 Jouni Malinen <jouni.malinen@atheros.com>
* Copyright 2008 Colin McCabe <colin@cozybit.com>
* Copyright 2015-2017 Intel Deutschland GmbH
* Copyright (C) 2018-2025 Intel Corporation
* Copyright (C) 2018-2026 Intel Corporation
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -2974,6 +2974,16 @@ enum nl80211_commands {
* primary channel is 2 MHz wide, and the control channel designates
* the 1 MHz primary subchannel within that 2 MHz primary.
*
* @NL80211_ATTR_EPP_PEER: A flag attribute to indicate if the peer is an EPP
* STA. Used with %NL80211_CMD_NEW_STA and %NL80211_CMD_ADD_LINK_STA
*
* @NL80211_ATTR_UHR_CAPABILITY: UHR Capability information element (from
* association request when used with NL80211_CMD_NEW_STATION). Can be set
* only if HE/EHT are also available.
* @NL80211_ATTR_DISABLE_UHR: Force UHR capable interfaces to disable
* this feature during association. This is a flag attribute.
* Currently only supported in mac80211 drivers.
*
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
@@ -3542,6 +3552,11 @@ enum nl80211_attrs {
NL80211_ATTR_S1G_PRIMARY_2MHZ,
NL80211_ATTR_EPP_PEER,
NL80211_ATTR_UHR_CAPABILITY,
NL80211_ATTR_DISABLE_UHR,
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -3894,6 +3909,12 @@ enum nl80211_eht_ru_alloc {
* @NL80211_RATE_INFO_4_MHZ_WIDTH: 4 MHz S1G rate
* @NL80211_RATE_INFO_8_MHZ_WIDTH: 8 MHz S1G rate
* @NL80211_RATE_INFO_16_MHZ_WIDTH: 16 MHz S1G rate
* @NL80211_RATE_INFO_UHR_MCS: UHR MCS index (u8, 0-15, 17, 19, 20, 23)
* Note that the other EHT attributes (such as @NL80211_RATE_INFO_EHT_NSS)
* are used in conjunction with this where applicable
* @NL80211_RATE_INFO_UHR_ELR: UHR ELR flag, which restricts NSS to 1,
* MCS to 0 or 1, and GI to %NL80211_RATE_INFO_EHT_GI_1_6.
* @NL80211_RATE_INFO_UHR_IM: UHR Interference Mitigation flag
* @__NL80211_RATE_INFO_AFTER_LAST: internal use
*/
enum nl80211_rate_info {
@@ -3927,6 +3948,9 @@ enum nl80211_rate_info {
NL80211_RATE_INFO_4_MHZ_WIDTH,
NL80211_RATE_INFO_8_MHZ_WIDTH,
NL80211_RATE_INFO_16_MHZ_WIDTH,
NL80211_RATE_INFO_UHR_MCS,
NL80211_RATE_INFO_UHR_ELR,
NL80211_RATE_INFO_UHR_IM,
/* keep last */
__NL80211_RATE_INFO_AFTER_LAST,
@@ -4249,6 +4273,10 @@ enum nl80211_mpath_info {
* capabilities element
* @NL80211_BAND_IFTYPE_ATTR_EHT_CAP_PPE: EHT PPE thresholds information as
* defined in EHT capabilities element
* @NL80211_BAND_IFTYPE_ATTR_UHR_CAP_MAC: UHR MAC capabilities as in UHR
* capabilities element
* @NL80211_BAND_IFTYPE_ATTR_UHR_CAP_PHY: UHR PHY capabilities as in UHR
* capabilities element
* @__NL80211_BAND_IFTYPE_ATTR_AFTER_LAST: internal use
* @NL80211_BAND_IFTYPE_ATTR_MAX: highest band attribute currently defined
*/
@@ -4266,6 +4294,8 @@ enum nl80211_band_iftype_attr {
NL80211_BAND_IFTYPE_ATTR_EHT_CAP_PHY,
NL80211_BAND_IFTYPE_ATTR_EHT_CAP_MCS_SET,
NL80211_BAND_IFTYPE_ATTR_EHT_CAP_PPE,
NL80211_BAND_IFTYPE_ATTR_UHR_CAP_MAC,
NL80211_BAND_IFTYPE_ATTR_UHR_CAP_PHY,
/* keep last */
__NL80211_BAND_IFTYPE_ATTR_AFTER_LAST,
@@ -4445,6 +4475,11 @@ enum nl80211_wmm_rule {
* channel in current regulatory domain.
* @NL80211_FREQUENCY_ATTR_NO_16MHZ: 16 MHz operation is not allowed on this
* channel in current regulatory domain.
* @NL80211_FREQUENCY_ATTR_S1G_NO_PRIMARY: Channel is not permitted for use
* as a primary channel. Does not prevent the channel from existing
* as a non-primary subchannel. Only applicable to S1G channels.
* @NL80211_FREQUENCY_ATTR_NO_UHR: UHR operation is not allowed on this channel
* in current regulatory domain.
* @NL80211_FREQUENCY_ATTR_MAX: highest frequency attribute number
* currently defined
* @__NL80211_FREQUENCY_ATTR_AFTER_LAST: internal use
@@ -4493,6 +4528,8 @@ enum nl80211_frequency_attr {
NL80211_FREQUENCY_ATTR_NO_4MHZ,
NL80211_FREQUENCY_ATTR_NO_8MHZ,
NL80211_FREQUENCY_ATTR_NO_16MHZ,
NL80211_FREQUENCY_ATTR_S1G_NO_PRIMARY,
NL80211_FREQUENCY_ATTR_NO_UHR,
/* keep last */
__NL80211_FREQUENCY_ATTR_AFTER_LAST,
@@ -4706,6 +4743,7 @@ enum nl80211_sched_scan_match_attr {
* despite NO_IR configuration.
* @NL80211_RRF_ALLOW_20MHZ_ACTIVITY: Allow activity in 20 MHz bandwidth,
* despite NO_IR configuration.
* @NL80211_RRF_NO_UHR: UHR operation not allowed
*/
enum nl80211_reg_rule_flags {
NL80211_RRF_NO_OFDM = 1 << 0,
@@ -4732,6 +4770,7 @@ enum nl80211_reg_rule_flags {
NL80211_RRF_NO_6GHZ_AFC_CLIENT = 1 << 23,
NL80211_RRF_ALLOW_6GHZ_VLP_AP = 1 << 24,
NL80211_RRF_ALLOW_20MHZ_ACTIVITY = 1 << 25,
NL80211_RRF_NO_UHR = 1 << 26,
};
#define NL80211_RRF_PASSIVE_SCAN NL80211_RRF_NO_IR
@@ -5426,6 +5465,7 @@ enum nl80211_bss_status {
* @NL80211_AUTHTYPE_FILS_SK: Fast Initial Link Setup shared key
* @NL80211_AUTHTYPE_FILS_SK_PFS: Fast Initial Link Setup shared key with PFS
* @NL80211_AUTHTYPE_FILS_PK: Fast Initial Link Setup public key
* @NL80211_AUTHTYPE_EPPKE: Enhanced Privacy Protection Key Exchange
* @__NL80211_AUTHTYPE_NUM: internal
* @NL80211_AUTHTYPE_MAX: maximum valid auth algorithm
* @NL80211_AUTHTYPE_AUTOMATIC: determine automatically (if necessary by
@@ -5441,6 +5481,7 @@ enum nl80211_auth_type {
NL80211_AUTHTYPE_FILS_SK,
NL80211_AUTHTYPE_FILS_SK_PFS,
NL80211_AUTHTYPE_FILS_PK,
NL80211_AUTHTYPE_EPPKE,
/* keep last */
__NL80211_AUTHTYPE_NUM,
@@ -6745,6 +6786,15 @@ enum nl80211_feature_flags {
* @NL80211_EXT_FEATURE_BEACON_RATE_EHT: Driver supports beacon rate
* configuration (AP/mesh) with EHT rates.
*
* @NL80211_EXT_FEATURE_EPPKE: Driver supports Enhanced Privacy Protection
* Key Exchange (EPPKE) with user space SME (NL80211_CMD_AUTHENTICATE)
* in non-AP STA mode.
*
* @NL80211_EXT_FEATURE_ASSOC_FRAME_ENCRYPTION: This specifies that the
* driver supports encryption of (Re)Association Request and Response
* frames in both nonAP STA and AP mode as specified in
* "IEEE P802.11bi/D3.0, 12.16.6".
*
* @NUM_NL80211_EXT_FEATURES: number of extended features.
* @MAX_NL80211_EXT_FEATURES: highest extended feature index.
*/
@@ -6821,6 +6871,8 @@ enum nl80211_ext_feature_index {
NL80211_EXT_FEATURE_DFS_CONCURRENT,
NL80211_EXT_FEATURE_SPP_AMSDU_SUPPORT,
NL80211_EXT_FEATURE_BEACON_RATE_EHT,
NL80211_EXT_FEATURE_EPPKE,
NL80211_EXT_FEATURE_ASSOC_FRAME_ENCRYPTION,
/* add new features before the definition below */
NUM_NL80211_EXT_FEATURES,
@@ -7433,6 +7485,8 @@ enum nl80211_nan_band_conf_attributes {
* address that can take values from 50-6F-9A-01-00-00 to
* 50-6F-9A-01-FF-FF. This attribute is optional. If not present,
* a random Cluster ID will be chosen.
* This attribute will be ignored in NL80211_CMD_CHANGE_NAN_CONFIG
* since after NAN was started, the cluster ID can no longer change.
* @NL80211_NAN_CONF_EXTRA_ATTRS: Additional NAN attributes to be
* published in the beacons. This is an optional byte array.
* @NL80211_NAN_CONF_VENDOR_ELEMS: Vendor-specific elements that will
@@ -7767,6 +7821,30 @@ enum nl80211_peer_measurement_attrs {
* trigger based ranging measurement is supported
* @NL80211_PMSR_FTM_CAPA_ATTR_NON_TRIGGER_BASED: flag attribute indicating
* if non-trigger-based ranging measurement is supported
* @NL80211_PMSR_FTM_CAPA_ATTR_6GHZ_SUPPORT: flag attribute indicating if
* ranging on the 6 GHz band is supported
* @NL80211_PMSR_FTM_CAPA_ATTR_MAX_TX_LTF_REP: u32 attribute indicating
* the maximum number of LTF repetitions the device can transmit in the
* preamble of the ranging NDP (zero means only one LTF, no repetitions)
* @NL80211_PMSR_FTM_CAPA_ATTR_MAX_RX_LTF_REP: u32 attribute indicating
* the maximum number of LTF repetitions the device can receive in the
* preamble of the ranging NDP (zero means only one LTF, no repetitions)
* @NL80211_PMSR_FTM_CAPA_ATTR_MAX_TX_STS: u32 attribute indicating
* the maximum number of space-time streams supported for ranging NDP TX
* (zero-based)
* @NL80211_PMSR_FTM_CAPA_ATTR_MAX_RX_STS: u32 attribute indicating
* the maximum number of space-time streams supported for ranging NDP RX
* (zero-based)
* @NL80211_PMSR_FTM_CAPA_ATTR_MAX_TOTAL_LTF_TX: u32 attribute indicating the
* maximum total number of LTFs the device can transmit. The total number
* of LTFs is (number of LTF repetitions) * (number of space-time streams).
* This limits the allowed combinations of LTF repetitions and STS.
* @NL80211_PMSR_FTM_CAPA_ATTR_MAX_TOTAL_LTF_RX: u32 attribute indicating the
* maximum total number of LTFs the device can receive. The total number
* of LTFs is (number of LTF repetitions) * (number of space-time streams).
* This limits the allowed combinations of LTF repetitions and STS.
* @NL80211_PMSR_FTM_CAPA_ATTR_RSTA_SUPPORT: flag attribute indicating the
* device supports operating as the RSTA in PMSR FTM request
*
* @NUM_NL80211_PMSR_FTM_CAPA_ATTR: internal
* @NL80211_PMSR_FTM_CAPA_ATTR_MAX: highest attribute number
@@ -7784,6 +7862,14 @@ enum nl80211_peer_measurement_ftm_capa {
NL80211_PMSR_FTM_CAPA_ATTR_MAX_FTMS_PER_BURST,
NL80211_PMSR_FTM_CAPA_ATTR_TRIGGER_BASED,
NL80211_PMSR_FTM_CAPA_ATTR_NON_TRIGGER_BASED,
NL80211_PMSR_FTM_CAPA_ATTR_6GHZ_SUPPORT,
NL80211_PMSR_FTM_CAPA_ATTR_MAX_TX_LTF_REP,
NL80211_PMSR_FTM_CAPA_ATTR_MAX_RX_LTF_REP,
NL80211_PMSR_FTM_CAPA_ATTR_MAX_TX_STS,
NL80211_PMSR_FTM_CAPA_ATTR_MAX_RX_STS,
NL80211_PMSR_FTM_CAPA_ATTR_MAX_TOTAL_LTF_TX,
NL80211_PMSR_FTM_CAPA_ATTR_MAX_TOTAL_LTF_RX,
NL80211_PMSR_FTM_CAPA_ATTR_RSTA_SUPPORT,
/* keep last */
NUM_NL80211_PMSR_FTM_CAPA_ATTR,
@@ -7799,12 +7885,15 @@ enum nl80211_peer_measurement_ftm_capa {
* &enum nl80211_preamble), optional for DMG (u32)
* @NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP: number of bursts exponent as in
* 802.11-2016 9.4.2.168 "Fine Timing Measurement Parameters element"
* (u8, 0-15, optional with default 15 i.e. "no preference")
* (u8, 0-15, optional with default 15 i.e. "no preference". No limit for
* non-EDCA ranging)
* @NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD: interval between bursts in units
* of 100ms (u16, optional with default 0)
* @NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION: burst duration, as in 802.11-2016
* Table 9-257 "Burst Duration field encoding" (u8, 0-15, optional with
* default 15 i.e. "no preference")
* default 15 i.e. "no preference"). For non-EDCA ranging, this is the
* burst duration in milliseconds (optional with default 0, i.e. let the
* device decide).
* @NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST: number of successful FTM frames
* requested per burst
* (u8, 0-31, optional with default 0 i.e. "no preference")
@@ -7833,6 +7922,14 @@ enum nl80211_peer_measurement_ftm_capa {
* @NL80211_PMSR_FTM_REQ_ATTR_BSS_COLOR: optional. The BSS color of the
* responder. Only valid if %NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED
* or %NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED is set.
* @NL80211_PMSR_FTM_REQ_ATTR_RSTA: optional. Request to perform the measurement
* as the RSTA (flag). When set, the device is expected to dwell on the
* channel specified in %NL80211_PMSR_PEER_ATTR_CHAN until it receives the
* FTM request from the peer or the timeout specified by
* %NL80211_ATTR_TIMEOUT has expired.
* Only valid if %NL80211_PMSR_FTM_REQ_ATTR_LMR_FEEDBACK is set (so the
* RSTA will have the measurement results to report back in the FTM
* response).
*
* @NUM_NL80211_PMSR_FTM_REQ_ATTR: internal
* @NL80211_PMSR_FTM_REQ_ATTR_MAX: highest attribute number
@@ -7853,6 +7950,7 @@ enum nl80211_peer_measurement_ftm_req {
NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED,
NL80211_PMSR_FTM_REQ_ATTR_LMR_FEEDBACK,
NL80211_PMSR_FTM_REQ_ATTR_BSS_COLOR,
NL80211_PMSR_FTM_REQ_ATTR_RSTA,
/* keep last */
NUM_NL80211_PMSR_FTM_REQ_ATTR,
@@ -7937,6 +8035,8 @@ enum nl80211_peer_measurement_ftm_failure_reasons {
* 9.4.2.22.1) starting with the Measurement Token, with Measurement
* Type 11.
* @NL80211_PMSR_FTM_RESP_ATTR_PAD: ignore, for u64/s64 padding only
* @NL80211_PMSR_FTM_RESP_ATTR_BURST_PERIOD: actual burst period used by
* the responder (similar to request, u16)
*
* @NUM_NL80211_PMSR_FTM_RESP_ATTR: internal
* @NL80211_PMSR_FTM_RESP_ATTR_MAX: highest attribute number
@@ -7965,6 +8065,7 @@ enum nl80211_peer_measurement_ftm_resp {
NL80211_PMSR_FTM_RESP_ATTR_LCI,
NL80211_PMSR_FTM_RESP_ATTR_CIVICLOC,
NL80211_PMSR_FTM_RESP_ATTR_PAD,
NL80211_PMSR_FTM_RESP_ATTR_BURST_PERIOD,
/* keep last */
NUM_NL80211_PMSR_FTM_RESP_ATTR,
+7
View File
@@ -39,4 +39,11 @@
#define PCIIOC_MMAP_IS_MEM (PCIIOC_BASE | 0x02) /* Set mmap state to MEM space. */
#define PCIIOC_WRITE_COMBINE (PCIIOC_BASE | 0x03) /* Enable/disable write-combining. */
enum pci_hotplug_event {
PCI_HOTPLUG_LINK_UP,
PCI_HOTPLUG_LINK_DOWN,
PCI_HOTPLUG_CARD_PRESENT,
PCI_HOTPLUG_CARD_NOT_PRESENT,
};
#endif /* LINUX_PCI_H */
+65 -6
View File
@@ -132,6 +132,11 @@
#define PCI_SECONDARY_BUS 0x19 /* Secondary bus number */
#define PCI_SUBORDINATE_BUS 0x1a /* Highest bus number behind the bridge */
#define PCI_SEC_LATENCY_TIMER 0x1b /* Latency timer for secondary interface */
/* Masks for dword-sized processing of Bus Number and Sec Latency Timer fields */
#define PCI_PRIMARY_BUS_MASK 0x000000ff
#define PCI_SECONDARY_BUS_MASK 0x0000ff00
#define PCI_SUBORDINATE_BUS_MASK 0x00ff0000
#define PCI_SEC_LATENCY_TIMER_MASK 0xff000000
#define PCI_IO_BASE 0x1c /* I/O range behind the bridge */
#define PCI_IO_LIMIT 0x1d
#define PCI_IO_RANGE_TYPE_MASK 0x0fUL /* I/O bridging type */
@@ -707,7 +712,7 @@
#define PCI_EXP_LNKCTL2_HASD 0x0020 /* HW Autonomous Speed Disable */
#define PCI_EXP_LNKSTA2 0x32 /* Link Status 2 */
#define PCI_EXP_LNKSTA2_FLIT 0x0400 /* Flit Mode Status */
#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 0x32 /* end of v2 EPs w/ link */
#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 0x34 /* end of v2 EPs w/ link */
#define PCI_EXP_SLTCAP2 0x34 /* Slot Capabilities 2 */
#define PCI_EXP_SLTCAP2_IBPD 0x00000001 /* In-band PD Disable Supported */
#define PCI_EXP_SLTCTL2 0x38 /* Slot Control 2 */
@@ -1253,11 +1258,6 @@
#define PCI_DEV3_STA 0x0c /* Device 3 Status Register */
#define PCI_DEV3_STA_SEGMENT 0x8 /* Segment Captured (end-to-end flit-mode detected) */
/* Compute Express Link (CXL r3.1, sec 8.1.5) */
#define PCI_DVSEC_CXL_PORT 3
#define PCI_DVSEC_CXL_PORT_CTL 0x0c
#define PCI_DVSEC_CXL_PORT_CTL_UNMASK_SBR 0x00000001
/* Integrity and Data Encryption Extended Capability */
#define PCI_IDE_CAP 0x04
#define PCI_IDE_CAP_LINK 0x1 /* Link IDE Stream Supported */
@@ -1338,4 +1338,63 @@
#define PCI_IDE_SEL_ADDR_3(x) (28 + (x) * PCI_IDE_SEL_ADDR_BLOCK_SIZE)
#define PCI_IDE_SEL_BLOCK_SIZE(nr_assoc) (20 + PCI_IDE_SEL_ADDR_BLOCK_SIZE * (nr_assoc))
/*
* Compute Express Link (CXL r4.0, sec 8.1)
*
* Note that CXL DVSEC id 3 and 7 to be ignored when the CXL link state
* is "disconnected" (CXL r4.0, sec 9.12.3). Re-enumerate these
* registers on downstream link-up events.
*/
/* CXL r4.0, 8.1.3: PCIe DVSEC for CXL Device */
#define PCI_DVSEC_CXL_DEVICE 0
#define PCI_DVSEC_CXL_CAP 0xA
#define PCI_DVSEC_CXL_MEM_CAPABLE _BITUL(2)
#define PCI_DVSEC_CXL_HDM_COUNT __GENMASK(5, 4)
#define PCI_DVSEC_CXL_CTRL 0xC
#define PCI_DVSEC_CXL_MEM_ENABLE _BITUL(2)
#define PCI_DVSEC_CXL_RANGE_SIZE_HIGH(i) (0x18 + (i * 0x10))
#define PCI_DVSEC_CXL_RANGE_SIZE_LOW(i) (0x1C + (i * 0x10))
#define PCI_DVSEC_CXL_MEM_INFO_VALID _BITUL(0)
#define PCI_DVSEC_CXL_MEM_ACTIVE _BITUL(1)
#define PCI_DVSEC_CXL_MEM_SIZE_LOW __GENMASK(31, 28)
#define PCI_DVSEC_CXL_RANGE_BASE_HIGH(i) (0x20 + (i * 0x10))
#define PCI_DVSEC_CXL_RANGE_BASE_LOW(i) (0x24 + (i * 0x10))
#define PCI_DVSEC_CXL_MEM_BASE_LOW __GENMASK(31, 28)
#define CXL_DVSEC_RANGE_MAX 2
/* CXL r4.0, 8.1.4: Non-CXL Function Map DVSEC */
#define PCI_DVSEC_CXL_FUNCTION_MAP 2
/* CXL r4.0, 8.1.5: Extensions DVSEC for Ports */
#define PCI_DVSEC_CXL_PORT 3
#define PCI_DVSEC_CXL_PORT_CTL 0x0c
#define PCI_DVSEC_CXL_PORT_CTL_UNMASK_SBR 0x00000001
/* CXL r4.0, 8.1.6: GPF DVSEC for CXL Port */
#define PCI_DVSEC_CXL_PORT_GPF 4
#define PCI_DVSEC_CXL_PORT_GPF_PHASE_1_CONTROL 0x0C
#define PCI_DVSEC_CXL_PORT_GPF_PHASE_1_TMO_BASE __GENMASK(3, 0)
#define PCI_DVSEC_CXL_PORT_GPF_PHASE_1_TMO_SCALE __GENMASK(11, 8)
#define PCI_DVSEC_CXL_PORT_GPF_PHASE_2_CONTROL 0xE
#define PCI_DVSEC_CXL_PORT_GPF_PHASE_2_TMO_BASE __GENMASK(3, 0)
#define PCI_DVSEC_CXL_PORT_GPF_PHASE_2_TMO_SCALE __GENMASK(11, 8)
/* CXL r4.0, 8.1.7: GPF DVSEC for CXL Device */
#define PCI_DVSEC_CXL_DEVICE_GPF 5
/* CXL r4.0, 8.1.8: Flex Bus DVSEC */
#define PCI_DVSEC_CXL_FLEXBUS_PORT 7
#define PCI_DVSEC_CXL_FLEXBUS_PORT_STATUS 0xE
#define PCI_DVSEC_CXL_FLEXBUS_PORT_STATUS_CACHE _BITUL(0)
#define PCI_DVSEC_CXL_FLEXBUS_PORT_STATUS_MEM _BITUL(2)
/* CXL r4.0, 8.1.9: Register Locator DVSEC */
#define PCI_DVSEC_CXL_REG_LOCATOR 8
#define PCI_DVSEC_CXL_REG_LOCATOR_BLOCK1 0xC
#define PCI_DVSEC_CXL_REG_LOCATOR_BIR __GENMASK(2, 0)
#define PCI_DVSEC_CXL_REG_LOCATOR_BLOCK_ID __GENMASK(15, 8)
#define PCI_DVSEC_CXL_REG_LOCATOR_BLOCK_OFF_LOW __GENMASK(31, 16)
#endif /* LINUX_PCI_REGS_H */
+1
View File
@@ -22,6 +22,7 @@
#define PCITEST_GET_IRQTYPE _IO('P', 0x9)
#define PCITEST_BARS _IO('P', 0xa)
#define PCITEST_DOORBELL _IO('P', 0xb)
#define PCITEST_BAR_SUBRANGE _IO('P', 0xc)
#define PCITEST_CLEAR_IRQ _IO('P', 0x10)
#define PCITEST_IRQ_TYPE_UNDEFINED -1
+24 -3
View File
@@ -1330,14 +1330,16 @@ union perf_mem_data_src {
mem_snoopx : 2, /* Snoop mode, ext */
mem_blk : 3, /* Access blocked */
mem_hops : 3, /* Hop level */
mem_rsvd : 18;
mem_region : 5, /* cache/memory regions */
mem_rsvd : 13;
};
};
#elif defined(__BIG_ENDIAN_BITFIELD)
union perf_mem_data_src {
__u64 val;
struct {
__u64 mem_rsvd : 18,
__u64 mem_rsvd : 13,
mem_region : 5, /* cache/memory regions */
mem_hops : 3, /* Hop level */
mem_blk : 3, /* Access blocked */
mem_snoopx : 2, /* Snoop mode, ext */
@@ -1394,7 +1396,7 @@ union perf_mem_data_src {
#define PERF_MEM_LVLNUM_L4 0x0004 /* L4 */
#define PERF_MEM_LVLNUM_L2_MHB 0x0005 /* L2 Miss Handling Buffer */
#define PERF_MEM_LVLNUM_MSC 0x0006 /* Memory-side Cache */
/* 0x007 available */
#define PERF_MEM_LVLNUM_L0 0x0007 /* L0 */
#define PERF_MEM_LVLNUM_UNC 0x0008 /* Uncached */
#define PERF_MEM_LVLNUM_CXL 0x0009 /* CXL */
#define PERF_MEM_LVLNUM_IO 0x000a /* I/O */
@@ -1447,6 +1449,25 @@ union perf_mem_data_src {
/* 5-7 available */
#define PERF_MEM_HOPS_SHIFT 43
/* Cache/Memory region */
#define PERF_MEM_REGION_NA 0x0 /* Invalid */
#define PERF_MEM_REGION_RSVD 0x01 /* Reserved */
#define PERF_MEM_REGION_L_SHARE 0x02 /* Local CA shared cache */
#define PERF_MEM_REGION_L_NON_SHARE 0x03 /* Local CA non-shared cache */
#define PERF_MEM_REGION_O_IO 0x04 /* Other CA IO agent */
#define PERF_MEM_REGION_O_SHARE 0x05 /* Other CA shared cache */
#define PERF_MEM_REGION_O_NON_SHARE 0x06 /* Other CA non-shared cache */
#define PERF_MEM_REGION_MMIO 0x07 /* MMIO */
#define PERF_MEM_REGION_MEM0 0x08 /* Memory region 0 */
#define PERF_MEM_REGION_MEM1 0x09 /* Memory region 1 */
#define PERF_MEM_REGION_MEM2 0x0a /* Memory region 2 */
#define PERF_MEM_REGION_MEM3 0x0b /* Memory region 3 */
#define PERF_MEM_REGION_MEM4 0x0c /* Memory region 4 */
#define PERF_MEM_REGION_MEM5 0x0d /* Memory region 5 */
#define PERF_MEM_REGION_MEM6 0x0e /* Memory region 6 */
#define PERF_MEM_REGION_MEM7 0x0f /* Memory region 7 */
#define PERF_MEM_REGION_SHIFT 46
#define PERF_MEM_S(a, s) \
(((__u64)PERF_MEM_##a##_##s) << PERF_MEM_##a##_SHIFT)
+1
View File
@@ -1036,6 +1036,7 @@ enum {
TCA_CAKE_STATS_DROP_NEXT_US,
TCA_CAKE_STATS_P_DROP,
TCA_CAKE_STATS_BLUE_TIMER_US,
TCA_CAKE_STATS_ACTIVE_QUEUES,
__TCA_CAKE_STATS_MAX
};
#define TCA_CAKE_STATS_MAX (__TCA_CAKE_STATS_MAX - 1)
+30
View File
@@ -386,4 +386,34 @@ struct prctl_mm_map {
# define PR_FUTEX_HASH_SET_SLOTS 1
# define PR_FUTEX_HASH_GET_SLOTS 2
/* RSEQ time slice extensions */
#define PR_RSEQ_SLICE_EXTENSION 79
# define PR_RSEQ_SLICE_EXTENSION_GET 1
# define PR_RSEQ_SLICE_EXTENSION_SET 2
/*
* Bits for RSEQ_SLICE_EXTENSION_GET/SET
* PR_RSEQ_SLICE_EXT_ENABLE: Enable
*/
# define PR_RSEQ_SLICE_EXT_ENABLE 0x01
/*
* Get or set the control flow integrity (CFI) configuration for the
* current thread.
*
* Some per-thread control flow integrity settings are not yet
* controlled through this prctl(); see for example
* PR_{GET,SET,LOCK}_SHADOW_STACK_STATUS
*/
#define PR_GET_CFI 80
#define PR_SET_CFI 81
/*
* Forward-edge CFI variants (excluding ARM64 BTI, which has its own
* prctl()s).
*/
#define PR_CFI_BRANCH_LANDING_PADS 0
/* Return and control values for PR_{GET,SET}_CFI */
# define PR_CFI_ENABLE _BITUL(0)
# define PR_CFI_DISABLE _BITUL(1)
# define PR_CFI_LOCK _BITUL(2)
#endif /* _LINUX_PRCTL_H */
+63 -6
View File
@@ -19,13 +19,20 @@ enum rseq_cpu_id_state {
};
enum rseq_flags {
RSEQ_FLAG_UNREGISTER = (1 << 0),
RSEQ_FLAG_UNREGISTER = (1 << 0),
RSEQ_FLAG_SLICE_EXT_DEFAULT_ON = (1 << 1),
};
enum rseq_cs_flags_bit {
/* Historical and unsupported bits */
RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT = 0,
RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT = 1,
RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT = 2,
/* (3) Intentional gap to put new bits into a separate byte */
/* User read only feature flags */
RSEQ_CS_FLAG_SLICE_EXT_AVAILABLE_BIT = 4,
RSEQ_CS_FLAG_SLICE_EXT_ENABLED_BIT = 5,
};
enum rseq_cs_flags {
@@ -35,6 +42,11 @@ enum rseq_cs_flags {
(1U << RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT),
RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE =
(1U << RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT),
RSEQ_CS_FLAG_SLICE_EXT_AVAILABLE =
(1U << RSEQ_CS_FLAG_SLICE_EXT_AVAILABLE_BIT),
RSEQ_CS_FLAG_SLICE_EXT_ENABLED =
(1U << RSEQ_CS_FLAG_SLICE_EXT_ENABLED_BIT),
};
/*
@@ -53,11 +65,39 @@ struct rseq_cs {
__u64 abort_ip;
} __attribute__((aligned(4 * sizeof(__u64))));
/*
* struct rseq is aligned on 4 * 8 bytes to ensure it is always
* contained within a single cache-line.
/**
* rseq_slice_ctrl - Time slice extension control structure
* @all: Compound value
* @request: Request for a time slice extension
* @granted: Granted time slice extension
*
* A single struct rseq per thread is allowed.
* @request is set by user space and can be cleared by user space or kernel
* space. @granted is set and cleared by the kernel and must only be read
* by user space.
*/
struct rseq_slice_ctrl {
union {
__u32 all;
struct {
__u8 request;
__u8 granted;
__u16 __reserved;
};
};
};
/*
* The original size and alignment of the allocation for struct rseq is
* 32 bytes.
*
* The allocation size needs to be greater or equal to
* max(getauxval(AT_RSEQ_FEATURE_SIZE), 32), and the allocation needs to
* be aligned on max(getauxval(AT_RSEQ_ALIGN), 32).
*
* As an alternative, userspace is allowed to use both the original size
* and alignment of 32 bytes for backward compatibility.
*
* A single active struct rseq registration per thread is allowed.
*/
struct rseq {
/*
@@ -141,10 +181,27 @@ struct rseq {
*/
__u32 mm_cid;
/*
* Time slice extension control structure. CPU local updates from
* kernel and user space.
*/
struct rseq_slice_ctrl slice_ctrl;
/*
* Before rseq became extensible, its original size was 32 bytes even
* though the active rseq area was only 20 bytes.
* Exposing a 32 bytes feature size would make life needlessly painful
* for userspace. Therefore, add a reserved byte after byte 32
* to bump the rseq feature size from 32 to 33.
* The next field to be added to the rseq area will be larger
* than one byte, and will replace this reserved byte.
*/
__u8 __reserved;
/*
* Flexible array member at end of structure, after last feature field.
*/
char end[];
} __attribute__((aligned(4 * sizeof(__u64))));
} __attribute__((aligned(32)));
#endif /* _LINUX_RSEQ_H */
-1
View File
@@ -5,7 +5,6 @@
#include <linux/ipc.h>
#include <linux/errno.h>
#include <asm-generic/hugetlb_encode.h>
#include <unistd.h>
/*
* SHMMNI, SHMMAX and SHMALL are default upper limits which can be
+4
View File
@@ -69,6 +69,10 @@
#define __counted_by_be(m)
#endif
#ifndef __counted_by_ptr
#define __counted_by_ptr(m)
#endif
#define __kernel_nonstring
#endif /* _LINUX_STDDEF_H */
+1 -2
View File
@@ -92,7 +92,6 @@ enum
KERN_DOMAINNAME=8, /* string: domainname */
KERN_PANIC=15, /* int: panic timeout */
KERN_REALROOTDEV=16, /* real root device to mount after initrd */
KERN_SPARC_REBOOT=21, /* reboot command on Sparc */
KERN_CTLALTDEL=22, /* int: allow ctl-alt-del to reboot */
@@ -183,7 +182,7 @@ enum
VM_LOWMEM_RESERVE_RATIO=20,/* reservation ratio for lower memory zones */
VM_MIN_FREE_KBYTES=21, /* Minimum free kilobytes to maintain */
VM_MAX_MAP_COUNT=22, /* int: Maximum number of mmaps/address-space */
VM_LAPTOP_MODE=23, /* vm laptop mode */
VM_BLOCK_DUMP=24, /* block dump mode */
VM_HUGETLB_GROUP=25, /* permitted hugetlb group */
VM_VFS_CACHE_PRESSURE=26, /* dcache/icache reclaim pressure */
+12 -1
View File
@@ -18,6 +18,7 @@
#define _LINUX_TASKSTATS_H
#include <linux/types.h>
#include <linux/time_types.h>
/* Format for per-task data returned to userland when
* - a task exits
@@ -34,7 +35,7 @@
*/
#define TASKSTATS_VERSION 16
#define TASKSTATS_VERSION 17
#define TS_COMM_LEN 32 /* should be >= TASK_COMM_LEN
* in linux/sched.h */
@@ -230,6 +231,16 @@ struct taskstats {
__u64 irq_delay_max;
__u64 irq_delay_min;
/*v17: delay max timestamp record*/
struct __kernel_timespec cpu_delay_max_ts;
struct __kernel_timespec blkio_delay_max_ts;
struct __kernel_timespec swapin_delay_max_ts;
struct __kernel_timespec freepages_delay_max_ts;
struct __kernel_timespec thrashing_delay_max_ts;
struct __kernel_timespec compact_delay_max_ts;
struct __kernel_timespec wpcopy_delay_max_ts;
struct __kernel_timespec irq_delay_max_ts;
};
+23 -3
View File
@@ -226,6 +226,24 @@ enum tcp_ca_state {
#define TCPF_CA_Loss (1<<TCP_CA_Loss)
};
/* Values for tcpi_ecn_mode after negotiation */
#define TCPI_ECN_MODE_DISABLED 0x0
#define TCPI_ECN_MODE_RFC3168 0x1
#define TCPI_ECN_MODE_ACCECN 0x2
#define TCPI_ECN_MODE_PENDING 0x3
/* Values for accecn_opt_seen */
#define TCP_ACCECN_OPT_NOT_SEEN 0x0
#define TCP_ACCECN_OPT_EMPTY_SEEN 0x1
#define TCP_ACCECN_OPT_COUNTER_SEEN 0x2
#define TCP_ACCECN_OPT_FAIL_SEEN 0x3
/* Values for accecn_fail_mode */
#define TCP_ACCECN_ACE_FAIL_SEND BIT(0)
#define TCP_ACCECN_ACE_FAIL_RECV BIT(1)
#define TCP_ACCECN_OPT_FAIL_SEND BIT(2)
#define TCP_ACCECN_OPT_FAIL_RECV BIT(3)
struct tcp_info {
__u8 tcpi_state;
__u8 tcpi_ca_state;
@@ -316,15 +334,17 @@ struct tcp_info {
* in milliseconds, including any
* unfinished recovery.
*/
__u32 tcpi_received_ce; /* # of CE marks received */
__u32 tcpi_received_ce; /* # of CE marked segments received */
__u32 tcpi_delivered_e1_bytes; /* Accurate ECN byte counters */
__u32 tcpi_delivered_e0_bytes;
__u32 tcpi_delivered_ce_bytes;
__u32 tcpi_received_e1_bytes;
__u32 tcpi_received_e0_bytes;
__u32 tcpi_received_ce_bytes;
__u16 tcpi_accecn_fail_mode;
__u16 tcpi_accecn_opt_seen;
__u32 tcpi_ecn_mode:2,
tcpi_accecn_opt_seen:2,
tcpi_accecn_fail_mode:4,
tcpi_options2:24;
};
/* netlink attributes types for SCM_TIMESTAMPING_OPT_STATS */
+8
View File
@@ -0,0 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _LINUX_TYPELIMITS_H
#define _LINUX_TYPELIMITS_H
#define __KERNEL_INT_MAX ((int)(~0U >> 1))
#define __KERNEL_INT_MIN (-__KERNEL_INT_MAX - 1)
#endif /* _LINUX_TYPELIMITS_H */
+120 -1
View File
@@ -55,7 +55,8 @@
_IOWR('u', 0x15, struct ublksrv_ctrl_cmd)
#define UBLK_U_CMD_QUIESCE_DEV \
_IOWR('u', 0x16, struct ublksrv_ctrl_cmd)
#define UBLK_U_CMD_TRY_STOP_DEV \
_IOWR('u', 0x17, struct ublksrv_ctrl_cmd)
/*
* 64bits are enough now, and it should be easy to extend in case of
* running out of feature flags
@@ -103,6 +104,30 @@
#define UBLK_U_IO_UNREGISTER_IO_BUF \
_IOWR('u', 0x24, struct ublksrv_io_cmd)
/*
* return 0 if the command is run successfully, otherwise failure code
* is returned
*/
#define UBLK_U_IO_PREP_IO_CMDS \
_IOWR('u', 0x25, struct ublk_batch_io)
/*
* If failure code is returned, nothing in the command buffer is handled.
* Otherwise, the returned value means how many bytes in command buffer
* are handled actually, then number of handled IOs can be calculated with
* `elem_bytes` for each IO. IOs in the remained bytes are not committed,
* userspace has to check return value for dealing with partial committing
* correctly.
*/
#define UBLK_U_IO_COMMIT_IO_CMDS \
_IOWR('u', 0x26, struct ublk_batch_io)
/*
* Fetch io commands to provided buffer in multishot style,
* `IORING_URING_CMD_MULTISHOT` is required for this command.
*/
#define UBLK_U_IO_FETCH_IO_CMDS \
_IOWR('u', 0x27, struct ublk_batch_io)
/* only ABORT means that no re-fetch */
#define UBLK_IO_RES_OK 0
#define UBLK_IO_RES_NEED_GET_DATA 1
@@ -134,6 +159,10 @@
#define UBLKSRV_IO_BUF_TOTAL_BITS (UBLK_QID_OFF + UBLK_QID_BITS)
#define UBLKSRV_IO_BUF_TOTAL_SIZE (1ULL << UBLKSRV_IO_BUF_TOTAL_BITS)
/* Copy to/from request integrity buffer instead of data buffer */
#define UBLK_INTEGRITY_FLAG_OFF 62
#define UBLKSRV_IO_INTEGRITY_FLAG (1ULL << UBLK_INTEGRITY_FLAG_OFF)
/*
* ublk server can register data buffers for incoming I/O requests with a sparse
* io_uring buffer table. The request buffer can then be used as the data buffer
@@ -311,6 +340,36 @@
*/
#define UBLK_F_BUF_REG_OFF_DAEMON (1ULL << 14)
/*
* Support the following commands for delivering & committing io command
* in batch.
*
* - UBLK_U_IO_PREP_IO_CMDS
* - UBLK_U_IO_COMMIT_IO_CMDS
* - UBLK_U_IO_FETCH_IO_CMDS
* - UBLK_U_IO_REGISTER_IO_BUF
* - UBLK_U_IO_UNREGISTER_IO_BUF
*
* The existing UBLK_U_IO_FETCH_REQ, UBLK_U_IO_COMMIT_AND_FETCH_REQ and
* UBLK_U_IO_NEED_GET_DATA uring_cmd are not supported for this feature.
*/
#define UBLK_F_BATCH_IO (1ULL << 15)
/*
* ublk device supports requests with integrity/metadata buffer.
* Requires UBLK_F_USER_COPY.
*/
#define UBLK_F_INTEGRITY (1ULL << 16)
/*
* The device supports the UBLK_CMD_TRY_STOP_DEV command, which
* allows stopping the device only if there are no openers.
*/
#define UBLK_F_SAFE_STOP_DEV (1ULL << 17)
/* Disable automatic partition scanning when device is started */
#define UBLK_F_NO_AUTO_PART_SCAN (1ULL << 18)
/* device state */
#define UBLK_S_DEV_DEAD 0
#define UBLK_S_DEV_LIVE 1
@@ -408,6 +467,8 @@ struct ublksrv_ctrl_dev_info {
* passed in.
*/
#define UBLK_IO_F_NEED_REG_BUF (1U << 17)
/* Request has an integrity data buffer */
#define UBLK_IO_F_INTEGRITY (1UL << 18)
/*
* io cmd is described by this structure, and stored in share memory, indexed
@@ -525,6 +586,51 @@ struct ublksrv_io_cmd {
};
};
struct ublk_elem_header {
__u16 tag; /* IO tag */
/*
* Buffer index for incoming io command, only valid iff
* UBLK_F_AUTO_BUF_REG is set
*/
__u16 buf_index;
__s32 result; /* I/O completion result (commit only) */
};
/*
* uring_cmd buffer structure for batch commands
*
* buffer includes multiple elements, which number is specified by
* `nr_elem`. Each element buffer is organized in the following order:
*
* struct ublk_elem_buffer {
* // Mandatory fields (8 bytes)
* struct ublk_elem_header header;
*
* // Optional fields (8 bytes each, included based on flags)
*
* // Buffer address (if UBLK_BATCH_F_HAS_BUF_ADDR) for copying data
* // between ublk request and ublk server buffer
* __u64 buf_addr;
*
* // returned Zone append LBA (if UBLK_BATCH_F_HAS_ZONE_LBA)
* __u64 zone_lba;
* }
*
* Used for `UBLK_U_IO_PREP_IO_CMDS` and `UBLK_U_IO_COMMIT_IO_CMDS`
*/
struct ublk_batch_io {
__u16 q_id;
#define UBLK_BATCH_F_HAS_ZONE_LBA (1 << 0)
#define UBLK_BATCH_F_HAS_BUF_ADDR (1 << 1)
#define UBLK_BATCH_F_AUTO_BUF_REG_FALLBACK (1 << 2)
__u16 flags;
__u16 nr_elem;
__u8 elem_bytes;
__u8 reserved;
__u64 reserved2;
};
struct ublk_param_basic {
#define UBLK_ATTR_READ_ONLY (1 << 0)
#define UBLK_ATTR_ROTATIONAL (1 << 1)
@@ -600,6 +706,17 @@ struct ublk_param_segment {
__u8 pad[2];
};
struct ublk_param_integrity {
__u32 flags; /* LBMD_PI_CAP_* from linux/fs.h */
__u16 max_integrity_segments; /* 0 means no limit */
__u8 interval_exp;
__u8 metadata_size; /* UBLK_PARAM_TYPE_INTEGRITY requires nonzero */
__u8 pi_offset;
__u8 csum_type; /* LBMD_PI_CSUM_* from linux/fs.h */
__u8 tag_size;
__u8 pad[5];
};
struct ublk_params {
/*
* Total length of parameters, userspace has to set 'len' for both
@@ -614,6 +731,7 @@ struct ublk_params {
#define UBLK_PARAM_TYPE_ZONED (1 << 3)
#define UBLK_PARAM_TYPE_DMA_ALIGN (1 << 4)
#define UBLK_PARAM_TYPE_SEGMENT (1 << 5)
#define UBLK_PARAM_TYPE_INTEGRITY (1 << 6) /* requires UBLK_F_INTEGRITY */
__u32 types; /* types of parameter included */
struct ublk_param_basic basic;
@@ -622,6 +740,7 @@ struct ublk_params {
struct ublk_param_zoned zoned;
struct ublk_param_dma_align dma;
struct ublk_param_segment seg;
struct ublk_param_integrity integrity;
};
#endif
+63
View File
@@ -1188,6 +1188,8 @@ enum v4l2_flash_strobe_source {
#define V4L2_CID_FLASH_CHARGE (V4L2_CID_FLASH_CLASS_BASE + 11)
#define V4L2_CID_FLASH_READY (V4L2_CID_FLASH_CLASS_BASE + 12)
#define V4L2_CID_FLASH_DURATION (V4L2_CID_FLASH_CLASS_BASE + 13)
#define V4L2_CID_FLASH_STROBE_OE (V4L2_CID_FLASH_CLASS_BASE + 14)
/* JPEG-class control IDs */
@@ -2095,6 +2097,8 @@ struct v4l2_ctrl_mpeg2_quantisation {
#define V4L2_CID_STATELESS_HEVC_DECODE_MODE (V4L2_CID_CODEC_STATELESS_BASE + 405)
#define V4L2_CID_STATELESS_HEVC_START_CODE (V4L2_CID_CODEC_STATELESS_BASE + 406)
#define V4L2_CID_STATELESS_HEVC_ENTRY_POINT_OFFSETS (V4L2_CID_CODEC_STATELESS_BASE + 407)
#define V4L2_CID_STATELESS_HEVC_EXT_SPS_ST_RPS (V4L2_CID_CODEC_STATELESS_BASE + 408)
#define V4L2_CID_STATELESS_HEVC_EXT_SPS_LT_RPS (V4L2_CID_CODEC_STATELESS_BASE + 409)
enum v4l2_stateless_hevc_decode_mode {
V4L2_STATELESS_HEVC_DECODE_MODE_SLICE_BASED,
@@ -2550,6 +2554,65 @@ struct v4l2_ctrl_hevc_scaling_matrix {
__u8 scaling_list_dc_coef_32x32[2];
};
#define V4L2_HEVC_EXT_SPS_ST_RPS_FLAG_INTER_REF_PIC_SET_PRED 0x1
/*
* struct v4l2_ctrl_hevc_ext_sps_st_rps - HEVC short term RPS parameters
*
* Dynamic size 1-dimension array for short term RPS. The number of elements
* is v4l2_ctrl_hevc_sps::num_short_term_ref_pic_sets. It can contain up to 65 elements.
*
* @delta_idx_minus1: Specifies the delta compare to the index. See details in section 7.4.8
* "Short-term reference picture set semantics" of the specification.
* @delta_rps_sign: Sign of the delta as specified in section 7.4.8 "Short-term reference picture
* set semantics" of the specification.
* @abs_delta_rps_minus1: Absolute delta RPS as specified in section 7.4.8 "Short-term reference
* picture set semantics" of the specification.
* @num_negative_pics: Number of short-term RPS entries that have picture order count values less
* than the picture order count value of the current picture.
* @num_positive_pics: Number of short-term RPS entries that have picture order count values
* greater than the picture order count value of the current picture.
* @used_by_curr_pic: Bit j specifies if short-term RPS j is used by the current picture.
* @use_delta_flag: Bit j equals to 1 specifies that the j-th entry in the source candidate
* short-term RPS is included in this candidate short-term RPS.
* @delta_poc_s0_minus1: Specifies the negative picture order count delta for the i-th entry in
* the short-term RPS. See details in section 7.4.8 "Short-term reference
* picture set semantics" of the specification.
* @delta_poc_s1_minus1: Specifies the positive picture order count delta for the i-th entry in
* the short-term RPS. See details in section 7.4.8 "Short-term reference
* picture set semantics" of the specification.
* @flags: See V4L2_HEVC_EXT_SPS_ST_RPS_FLAG_{}
*/
struct v4l2_ctrl_hevc_ext_sps_st_rps {
__u8 delta_idx_minus1;
__u8 delta_rps_sign;
__u8 num_negative_pics;
__u8 num_positive_pics;
__u32 used_by_curr_pic;
__u32 use_delta_flag;
__u16 abs_delta_rps_minus1;
__u16 delta_poc_s0_minus1[16];
__u16 delta_poc_s1_minus1[16];
__u16 flags;
};
#define V4L2_HEVC_EXT_SPS_LT_RPS_FLAG_USED_LT 0x1
/*
* struct v4l2_ctrl_hevc_ext_sps_lt_rps - HEVC long term RPS parameters
*
* Dynamic size 1-dimension array for long term RPS. The number of elements
* is v4l2_ctrl_hevc_sps::num_long_term_ref_pics_sps. It can contain up to 65 elements.
*
* @lt_ref_pic_poc_lsb_sps: picture order count modulo MaxPicOrderCntLsb of the i-th candidate
* long-term reference picture.
* @flags: See V4L2_HEVC_EXT_SPS_LT_RPS_FLAG_{}
*/
struct v4l2_ctrl_hevc_ext_sps_lt_rps {
__u16 lt_ref_pic_poc_lsb_sps;
__u16 flags;
};
/* Stateless VP9 controls */
#define V4L2_VP9_LOOP_FILTER_FLAG_DELTA_ENABLED 0x1
+2 -2
View File
@@ -236,7 +236,7 @@ struct vmmdev_hgcm_function_parameter32 {
/** Relative to the request header. */
__u32 offset;
} page_list;
} u;
} __attribute__((packed)) u;
} __attribute__((packed));
VMMDEV_ASSERT_SIZE(vmmdev_hgcm_function_parameter32, 4 + 8);
@@ -251,7 +251,7 @@ struct vmmdev_hgcm_function_parameter64 {
union {
__u64 phys_addr;
__u64 linear_addr;
} u;
} __attribute__((packed)) u;
} __attribute__((packed)) pointer;
struct {
/** Size of the buffer described by the page list. */
+80 -5
View File
@@ -10,6 +10,10 @@
#define VDUSE_API_VERSION 0
/* VQ groups and ASID support */
#define VDUSE_API_VERSION_1 1
/*
* Get the version of VDUSE API that kernel supported (VDUSE_API_VERSION).
* This is used for future extension.
@@ -27,6 +31,8 @@
* @features: virtio features
* @vq_num: the number of virtqueues
* @vq_align: the allocation alignment of virtqueue's metadata
* @ngroups: number of vq groups that VDUSE device declares
* @nas: number of address spaces that VDUSE device declares
* @reserved: for future use, needs to be initialized to zero
* @config_size: the size of the configuration space
* @config: the buffer of the configuration space
@@ -41,7 +47,9 @@ struct vduse_dev_config {
__u64 features;
__u32 vq_num;
__u32 vq_align;
__u32 reserved[13];
__u32 ngroups; /* if VDUSE_API_VERSION >= 1 */
__u32 nas; /* if VDUSE_API_VERSION >= 1 */
__u32 reserved[11];
__u32 config_size;
__u8 config[];
};
@@ -118,14 +126,18 @@ struct vduse_config_data {
* struct vduse_vq_config - basic configuration of a virtqueue
* @index: virtqueue index
* @max_size: the max size of virtqueue
* @reserved: for future use, needs to be initialized to zero
* @reserved1: for future use, needs to be initialized to zero
* @group: virtqueue group
* @reserved2: for future use, needs to be initialized to zero
*
* Structure used by VDUSE_VQ_SETUP ioctl to setup a virtqueue.
*/
struct vduse_vq_config {
__u32 index;
__u16 max_size;
__u16 reserved[13];
__u16 reserved1;
__u32 group;
__u16 reserved2[10];
};
/*
@@ -156,6 +168,16 @@ struct vduse_vq_state_packed {
__u16 last_used_idx;
};
/**
* struct vduse_vq_group_asid - virtqueue group ASID
* @group: Index of the virtqueue group
* @asid: Address space ID of the group
*/
struct vduse_vq_group_asid {
__u32 group;
__u32 asid;
};
/**
* struct vduse_vq_info - information of a virtqueue
* @index: virtqueue index
@@ -215,6 +237,7 @@ struct vduse_vq_eventfd {
* @uaddr: start address of userspace memory, it must be aligned to page size
* @iova: start of the IOVA region
* @size: size of the IOVA region
* @asid: Address space ID of the IOVA region
* @reserved: for future use, needs to be initialized to zero
*
* Structure used by VDUSE_IOTLB_REG_UMEM and VDUSE_IOTLB_DEREG_UMEM
@@ -224,7 +247,8 @@ struct vduse_iova_umem {
__u64 uaddr;
__u64 iova;
__u64 size;
__u64 reserved[3];
__u32 asid;
__u32 reserved[5];
};
/* Register userspace memory for IOVA regions */
@@ -238,6 +262,7 @@ struct vduse_iova_umem {
* @start: start of the IOVA region
* @last: last of the IOVA region
* @capability: capability of the IOVA region
* @asid: Address space ID of the IOVA region, only if device API version >= 1
* @reserved: for future use, needs to be initialized to zero
*
* Structure used by VDUSE_IOTLB_GET_INFO ioctl to get information of
@@ -248,7 +273,8 @@ struct vduse_iova_info {
__u64 last;
#define VDUSE_IOVA_CAP_UMEM (1 << 0)
__u64 capability;
__u64 reserved[3];
__u32 asid; /* Only if device API version >= 1 */
__u32 reserved[5];
};
/*
@@ -257,6 +283,32 @@ struct vduse_iova_info {
*/
#define VDUSE_IOTLB_GET_INFO _IOWR(VDUSE_BASE, 0x1a, struct vduse_iova_info)
/**
* struct vduse_iotlb_entry_v2 - entry of IOTLB to describe one IOVA region
*
* @v1: the original vduse_iotlb_entry
* @asid: address space ID of the IOVA region
* @reserved: for future use, needs to be initialized to zero
*
* Structure used by VDUSE_IOTLB_GET_FD2 ioctl to find an overlapped IOVA region.
*/
struct vduse_iotlb_entry_v2 {
__u64 offset;
__u64 start;
__u64 last;
__u8 perm;
__u8 padding[7];
__u32 asid;
__u32 reserved[11];
};
/*
* Same as VDUSE_IOTLB_GET_FD but with vduse_iotlb_entry_v2 argument that
* support extra fields.
*/
#define VDUSE_IOTLB_GET_FD2 _IOWR(VDUSE_BASE, 0x1b, struct vduse_iotlb_entry_v2)
/* The control messages definition for read(2)/write(2) on /dev/vduse/$NAME */
/**
@@ -265,11 +317,14 @@ struct vduse_iova_info {
* @VDUSE_SET_STATUS: set the device status
* @VDUSE_UPDATE_IOTLB: Notify userspace to update the memory mapping for
* specified IOVA range via VDUSE_IOTLB_GET_FD ioctl
* @VDUSE_SET_VQ_GROUP_ASID: Notify userspace to update the address space of a
* virtqueue group.
*/
enum vduse_req_type {
VDUSE_GET_VQ_STATE,
VDUSE_SET_STATUS,
VDUSE_UPDATE_IOTLB,
VDUSE_SET_VQ_GROUP_ASID,
};
/**
@@ -304,6 +359,19 @@ struct vduse_iova_range {
__u64 last;
};
/**
* struct vduse_iova_range_v2 - IOVA range [start, last] if API_VERSION >= 1
* @start: start of the IOVA range
* @last: last of the IOVA range
* @asid: address space ID of the IOVA range
*/
struct vduse_iova_range_v2 {
__u64 start;
__u64 last;
__u32 asid;
__u32 padding;
};
/**
* struct vduse_dev_request - control request
* @type: request type
@@ -312,6 +380,8 @@ struct vduse_iova_range {
* @vq_state: virtqueue state, only index field is available
* @s: device status
* @iova: IOVA range for updating
* @iova_v2: IOVA range for updating if API_VERSION >= 1
* @vq_group_asid: ASID of a virtqueue group
* @padding: padding
*
* Structure used by read(2) on /dev/vduse/$NAME.
@@ -324,6 +394,11 @@ struct vduse_dev_request {
struct vduse_vq_state vq_state;
struct vduse_dev_status s;
struct vduse_iova_range iova;
/* Following members but padding exist only if vduse api
* version >= 1
*/
struct vduse_iova_range_v2 iova_v2;
struct vduse_vq_group_asid vq_group_asid;
__u32 padding[32];
};
};
+3 -3
View File
@@ -1,5 +1,5 @@
#define LINUX_VERSION_CODE 398080
#define LINUX_VERSION_CODE 458752
#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + ((c) > 255 ? 255 : (c)))
#define LINUX_VERSION_MAJOR 6
#define LINUX_VERSION_PATCHLEVEL 19
#define LINUX_VERSION_MAJOR 7
#define LINUX_VERSION_PATCHLEVEL 0
#define LINUX_VERSION_SUBLEVEL 0
+4
View File
@@ -964,6 +964,10 @@ struct vfio_device_bind_iommufd {
* hwpt corresponding to the given pt_id.
*
* Return: 0 on success, -errno on failure.
*
* When a device is resetting, -EBUSY will be returned to reject any concurrent
* attachment to the resetting device itself or any sibling device in the IOMMU
* group having the resetting device.
*/
struct vfio_device_attach_iommufd_pt {
__u32 argsz;
+3
View File
@@ -748,6 +748,7 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_H264_SLICE v4l2_fourcc('S', '2', '6', '4') /* H264 parsed slices */
#define V4L2_PIX_FMT_HEVC_SLICE v4l2_fourcc('S', '2', '6', '5') /* HEVC parsed slices */
#define V4L2_PIX_FMT_AV1_FRAME v4l2_fourcc('A', 'V', '1', 'F') /* AV1 parsed frame */
#define V4L2_PIX_FMT_AV1 v4l2_fourcc('A', 'V', '0', '1') /* AV1 */
#define V4L2_PIX_FMT_SPK v4l2_fourcc('S', 'P', 'K', '0') /* Sorenson Spark */
#define V4L2_PIX_FMT_RV30 v4l2_fourcc('R', 'V', '3', '0') /* RealVideo 8 */
#define V4L2_PIX_FMT_RV40 v4l2_fourcc('R', 'V', '4', '0') /* RealVideo 9 & 10 */
@@ -1916,6 +1917,8 @@ enum v4l2_ctrl_type {
V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS = 0x0272,
V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX = 0x0273,
V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS = 0x0274,
V4L2_CTRL_TYPE_HEVC_EXT_SPS_ST_RPS = 0x0275,
V4L2_CTRL_TYPE_HEVC_EXT_SPS_LT_RPS = 0x0276,
V4L2_CTRL_TYPE_AV1_SEQUENCE = 0x280,
V4L2_CTRL_TYPE_AV1_TILE_GROUP_ENTRY = 0x281,
+1 -2
View File
@@ -31,7 +31,6 @@
* SUCH DAMAGE.
*
* Copyright Rusty Russell IBM Corporation 2007. */
#include <stdint.h>
#include <linux/types.h>
#include <linux/virtio_types.h>
@@ -200,7 +199,7 @@ static __inline__ void vring_init(struct vring *vr, unsigned int num, void *p,
vr->num = num;
vr->desc = p;
vr->avail = (struct vring_avail *)((char *)p + num * sizeof(struct vring_desc));
vr->used = (void *)(((uintptr_t)&vr->avail->ring[num] + sizeof(__virtio16)
vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + sizeof(__virtio16)
+ align-1) & ~(align - 1));
}
+20
View File
@@ -115,6 +115,17 @@ struct vmclock_abi {
* bit again after the update, using the about-to-be-valid fields.
*/
#define VMCLOCK_FLAG_TIME_MONOTONIC (1 << 7)
/*
* If the VM_GEN_COUNTER_PRESENT flag is set, the hypervisor will
* bump the vm_generation_counter field every time the guest is
* loaded from some save state (restored from a snapshot).
*/
#define VMCLOCK_FLAG_VM_GEN_COUNTER_PRESENT (1 << 8)
/*
* If the NOTIFICATION_PRESENT flag is set, the hypervisor will send
* a notification every time it updates seq_count to a new even number.
*/
#define VMCLOCK_FLAG_NOTIFICATION_PRESENT (1 << 9)
__u8 pad[2];
__u8 clock_status;
@@ -177,6 +188,15 @@ struct vmclock_abi {
__le64 time_frac_sec; /* Units of 1/2^64 of a second */
__le64 time_esterror_nanosec;
__le64 time_maxerror_nanosec;
/*
* This field changes to another non-repeating value when the guest
* has been loaded from a snapshot. In addition to handling a
* disruption in time (which will also be signalled through the
* disruption_marker field), a guest may wish to discard UUIDs,
* reset network connections, reseed entropy, etc.
*/
__le64 vm_generation_counter;
};
#endif /* __VMCLOCK_ABI_H__ */
+16
View File
@@ -56,6 +56,7 @@ enum {
BNXT_RE_UCNTX_CMASK_DBR_PACING_ENABLED = 0x08ULL,
BNXT_RE_UCNTX_CMASK_POW2_DISABLED = 0x10ULL,
BNXT_RE_UCNTX_CMASK_MSN_TABLE_ENABLED = 0x40,
BNXT_RE_UCNTX_CMASK_QP_RATE_LIMIT_ENABLED = 0x80ULL,
};
enum bnxt_re_wqe_mode {
@@ -215,4 +216,19 @@ enum bnxt_re_toggle_mem_methods {
BNXT_RE_METHOD_GET_TOGGLE_MEM = (1U << UVERBS_ID_NS_SHIFT),
BNXT_RE_METHOD_RELEASE_TOGGLE_MEM,
};
struct bnxt_re_packet_pacing_caps {
__u32 qp_rate_limit_min;
__u32 qp_rate_limit_max; /* In kbps */
/* Corresponding bit will be set if qp type from
* 'enum ib_qp_type' is supported, e.g.
* supported_qpts |= 1 << IB_QPT_RC
*/
__u32 supported_qpts;
__u32 reserved;
};
struct bnxt_re_query_device_ex_resp {
struct bnxt_re_packet_pacing_caps packet_pacing_caps;
};
#endif /* __BNXT_RE_UVERBS_ABI_H__*/
@@ -56,6 +56,7 @@ enum uverbs_default_objects {
UVERBS_OBJECT_COUNTERS,
UVERBS_OBJECT_ASYNC_EVENT,
UVERBS_OBJECT_DMAH,
UVERBS_OBJECT_DMABUF,
};
enum {
@@ -73,6 +74,7 @@ enum uverbs_methods_device {
UVERBS_METHOD_QUERY_CONTEXT,
UVERBS_METHOD_QUERY_GID_TABLE,
UVERBS_METHOD_QUERY_GID_ENTRY,
UVERBS_METHOD_QUERY_PORT_SPEED,
};
enum uverbs_attrs_invoke_write_cmd_attr_ids {
@@ -86,6 +88,11 @@ enum uverbs_attrs_query_port_cmd_attr_ids {
UVERBS_ATTR_QUERY_PORT_RESP,
};
enum uverbs_attrs_query_port_speed_cmd_attr_ids {
UVERBS_ATTR_QUERY_PORT_SPEED_PORT_NUM,
UVERBS_ATTR_QUERY_PORT_SPEED_RESP,
};
enum uverbs_attrs_get_context_attr_ids {
UVERBS_ATTR_GET_CONTEXT_NUM_COMP_VECTORS,
UVERBS_ATTR_GET_CONTEXT_CORE_SUPPORT,
@@ -257,6 +264,15 @@ enum uverbs_methods_dmah {
UVERBS_METHOD_DMAH_FREE,
};
enum uverbs_attrs_alloc_dmabuf_cmd_attr_ids {
UVERBS_ATTR_ALLOC_DMABUF_HANDLE,
UVERBS_ATTR_ALLOC_DMABUF_PGOFF,
};
enum uverbs_methods_dmabuf {
UVERBS_METHOD_DMABUF_ALLOC,
};
enum uverbs_attrs_reg_dm_mr_cmd_attr_ids {
UVERBS_ATTR_REG_DM_MR_HANDLE,
UVERBS_ATTR_REG_DM_MR_OFFSET,
+3
View File
@@ -17,6 +17,9 @@
#define MANA_IB_UVERBS_ABI_VERSION 1
enum mana_ib_create_cq_flags {
/* Reserved for backward compatibility. Legacy
* kernel versions use it to create CQs in RNIC
*/
MANA_IB_CREATE_RNIC_CQ = 1 << 0,
};
+8 -9
View File
@@ -94,16 +94,15 @@ struct utp_upiu_header {
};
/**
* struct utp_upiu_query - upiu request buffer structure for
* query request.
* @opcode: command to perform B-0
* @idn: a value that indicates the particular type of data B-1
* @index: Index to further identify data B-2
* @selector: Index to further identify data B-3
* struct utp_upiu_query - QUERY REQUEST UPIU structure.
* @opcode: query function to perform B-0
* @idn: descriptor or attribute identification number B-1
* @index: Index that further identifies which data to access B-2
* @selector: Index that further identifies which data to access B-3
* @reserved_osf: spec reserved field B-4,5
* @length: number of descriptor bytes to read/write B-6,7
* @value: Attribute value to be written DW-5
* @reserved: spec reserved DW-6,7
* @length: number of descriptor bytes to read or write B-6,7
* @value: if @opcode == UPIU_QUERY_OPCODE_WRITE_ATTR, the value to be written B-6,7
* @reserved: reserved for future use DW-6,7
*/
struct utp_upiu_query {
__u8 opcode;
+6
View File
@@ -56,6 +56,9 @@
#define SOF_TKN_SCHED_LP_MODE 207
#define SOF_TKN_SCHED_MEM_USAGE 208
#define SOF_TKN_SCHED_USE_CHAIN_DMA 209
#define SOF_TKN_SCHED_KCPS 210
#define SOF_TKN_SCHED_DIRECTION 211
#define SOF_TKN_SCHED_DIRECTION_VALID 212
/* volume */
#define SOF_TKN_VOLUME_RAMP_STEP_TYPE 250
@@ -107,6 +110,9 @@
#define SOF_TKN_COMP_NO_WNAME_IN_KCONTROL_NAME 417
#define SOF_TKN_COMP_SCHED_DOMAIN 418
#define SOF_TKN_COMP_DOMAIN_ID 419
#define SOF_TKN_COMP_HEAP_BYTES_REQUIREMENT 420
#define SOF_TKN_COMP_STACK_BYTES_REQUIREMENT 421
/* SSP */
#define SOF_TKN_INTEL_SSP_CLKS_CONTROL 500
-63
View File
@@ -19,9 +19,6 @@
#include <linux/types.h>
/* Native single cycle endian swap insn */
#ifdef CONFIG_ARC_HAS_SWAPE
#define __arch_swab32(x) \
({ \
unsigned int tmp = x; \
@@ -32,66 +29,6 @@
tmp; \
})
#else
/* Several ways of Endian-Swap Emulation for ARC
* 0: kernel generic
* 1: ARC optimised "C"
* 2: ARC Custom instruction
*/
#define ARC_BSWAP_TYPE 1
#if (ARC_BSWAP_TYPE == 1) /******* Software only ********/
/* The kernel default implementation of htonl is
* return x<<24 | x>>24 |
* (x & (__u32)0x0000ff00UL)<<8 | (x & (__u32)0x00ff0000UL)>>8;
*
* This generates 9 instructions on ARC (excluding the ld/st)
*
* 8051fd8c: ld r3,[r7,20] ; Mem op : Get the value to be swapped
* 8051fd98: asl r5,r3,24 ; get 3rd Byte
* 8051fd9c: lsr r2,r3,24 ; get 0th Byte
* 8051fda0: and r4,r3,0xff00
* 8051fda8: asl r4,r4,8 ; get 1st Byte
* 8051fdac: and r3,r3,0x00ff0000
* 8051fdb4: or r2,r2,r5 ; combine 0th and 3rd Bytes
* 8051fdb8: lsr r3,r3,8 ; 2nd Byte at correct place in Dst Reg
* 8051fdbc: or r2,r2,r4 ; combine 0,3 Bytes with 1st Byte
* 8051fdc0: or r2,r2,r3 ; combine 0,3,1 Bytes with 2nd Byte
* 8051fdc4: st r2,[r1,20] ; Mem op : save result back to mem
*
* Joern suggested a better "C" algorithm which is great since
* (1) It is portable to any architecture
* (2) At the same time it takes advantage of ARC ISA (rotate intrns)
*/
#define __arch_swab32(x) \
({ unsigned long __in = (x), __tmp; \
__tmp = __in << 8 | __in >> 24; /* ror tmp,in,24 */ \
__in = __in << 24 | __in >> 8; /* ror in,in,8 */ \
__tmp ^= __in; \
__tmp &= 0xff00ff; \
__tmp ^ __in; \
})
#elif (ARC_BSWAP_TYPE == 2) /* Custom single cycle bswap instruction */
#define __arch_swab32(x) \
({ \
unsigned int tmp = x; \
__asm__( \
" .extInstruction bswap, 7, 0x00, SUFFIX_NONE, SYNTAX_2OP \n"\
" bswap %0, %1 \n"\
: "=r" (tmp) \
: "r" (tmp)); \
tmp; \
})
#endif /* ARC_BSWAP_TYPE=zzz */
#endif /* CONFIG_ARC_HAS_SWAPE */
#if !defined(__STRICT_ANSI__) || defined(__KERNEL__)
#define __SWAB_64_THRU_32__
#endif
+1
View File
@@ -351,6 +351,7 @@
#define __NR_file_getattr 468
#define __NR_file_setattr 469
#define __NR_listns 470
#define __NR_rseq_slice_yield 471
#endif /* _ASM_UNISTD_32_H */
-9
View File
@@ -89,15 +89,6 @@
#define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
#define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
/*
* Default endianness state
*/
#ifdef CONFIG_CPU_ENDIAN_BE8
#define PSR_ENDSTATE PSR_E_BIT
#else
#define PSR_ENDSTATE 0
#endif
/*
* These are 'magic' values for PTRACE_PEEKUSR that return info about where a
* process is located in memory.
+1
View File
@@ -424,5 +424,6 @@
#define __NR_file_getattr (__NR_SYSCALL_BASE + 468)
#define __NR_file_setattr (__NR_SYSCALL_BASE + 469)
#define __NR_listns (__NR_SYSCALL_BASE + 470)
#define __NR_rseq_slice_yield (__NR_SYSCALL_BASE + 471)
#endif /* _ASM_UNISTD_EABI_H */
+1
View File
@@ -436,5 +436,6 @@
#define __NR_file_getattr (__NR_SYSCALL_BASE + 468)
#define __NR_file_setattr (__NR_SYSCALL_BASE + 469)
#define __NR_listns (__NR_SYSCALL_BASE + 470)
#define __NR_rseq_slice_yield (__NR_SYSCALL_BASE + 471)
#endif /* _ASM_UNISTD_OABI_H */
+1
View File
@@ -347,6 +347,7 @@
#define __NR_file_getattr 468
#define __NR_file_setattr 469
#define __NR_listns 470
#define __NR_rseq_slice_yield 471
#endif /* _ASM_UNISTD_32_H */
-29
View File
@@ -1,29 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef _ASM_SIGNAL_H
#define _ASM_SIGNAL_H
extern unsigned long __rt_sigtramp_template[2];
void do_signal(struct pt_regs *regs);
#include <asm-generic/signal.h>
#endif
+1
View File
@@ -346,6 +346,7 @@
#define __NR_file_getattr 468
#define __NR_file_setattr 469
#define __NR_listns 470
#define __NR_rseq_slice_yield 471
#endif /* _ASM_UNISTD_32_H */
+1
View File
@@ -18,5 +18,6 @@
#define HWCAP_LOONGARCH_LBT_MIPS (1 << 12)
#define HWCAP_LOONGARCH_PTW (1 << 13)
#define HWCAP_LOONGARCH_LSPW (1 << 14)
#define HWCAP_LOONGARCH_SCQ (1 << 15)
#endif /* _ASM_HWCAP_H */
+1
View File
@@ -105,6 +105,7 @@ struct kvm_fpu {
#define KVM_LOONGARCH_VM_FEAT_PV_STEALTIME 7
#define KVM_LOONGARCH_VM_FEAT_PTW 8
#define KVM_LOONGARCH_VM_FEAT_MSGINT 9
#define KVM_LOONGARCH_VM_FEAT_PV_PREEMPT 10
/* Device Control API on vcpu fd */
#define KVM_LOONGARCH_VCPU_CPUCFG 0
+1
View File
@@ -15,6 +15,7 @@
#define CPUCFG_KVM_FEATURE (CPUCFG_KVM_BASE + 4)
#define KVM_FEATURE_IPI 1
#define KVM_FEATURE_STEAL_TIME 2
#define KVM_FEATURE_PREEMPT 3
/* BIT 24 - 31 are features configurable by user space vmm */
#define KVM_FEATURE_VIRT_EXTIOI 24
#define KVM_FEATURE_USER_HCALL 25
+2
View File
@@ -292,6 +292,7 @@
#define __NR_landlock_create_ruleset 444
#define __NR_landlock_add_rule 445
#define __NR_landlock_restrict_self 446
#define __NR_memfd_secret 447
#define __NR_process_mrelease 448
#define __NR_futex_waitv 449
#define __NR_set_mempolicy_home_node 450
@@ -315,6 +316,7 @@
#define __NR_file_getattr 468
#define __NR_file_setattr 469
#define __NR_listns 470
#define __NR_rseq_slice_yield 471
#endif /* _ASM_UNISTD_32_H */
+2
View File
@@ -300,6 +300,7 @@
#define __NR_landlock_create_ruleset 444
#define __NR_landlock_add_rule 445
#define __NR_landlock_restrict_self 446
#define __NR_memfd_secret 447
#define __NR_process_mrelease 448
#define __NR_futex_waitv 449
#define __NR_set_mempolicy_home_node 450
@@ -323,6 +324,7 @@
#define __NR_file_getattr 468
#define __NR_file_setattr 469
#define __NR_listns 470
#define __NR_rseq_slice_yield 471
#endif /* _ASM_UNISTD_64_H */
+1
View File
@@ -443,6 +443,7 @@
#define __NR_file_getattr 468
#define __NR_file_setattr 469
#define __NR_listns 470
#define __NR_rseq_slice_yield 471
#endif /* _ASM_UNISTD_32_H */
+2
View File
@@ -50,6 +50,7 @@
#define EDOTDOT 73 /* RFS specific error */
#define EMULTIHOP 74 /* Multihop attempted */
#define EBADMSG 77 /* Not a data message */
#define EFSBADCRC EBADMSG /* Bad CRC detected */
#define ENAMETOOLONG 78 /* File name too long */
#define EOVERFLOW 79 /* Value too large for defined data type */
#define ENOTUNIQ 80 /* Name not unique on network */
@@ -88,6 +89,7 @@
#define EISCONN 133 /* Transport endpoint is already connected */
#define ENOTCONN 134 /* Transport endpoint is not connected */
#define EUCLEAN 135 /* Structure needs cleaning */
#define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */
#define ENOTNAM 137 /* Not a XENIX named type file */
#define ENAVAIL 138 /* No XENIX semaphores available */
#define EISNAM 139 /* Is a named type file */
+1
View File
@@ -399,5 +399,6 @@
#define __NR_file_getattr (__NR_Linux + 468)
#define __NR_file_setattr (__NR_Linux + 469)
#define __NR_listns (__NR_Linux + 470)
#define __NR_rseq_slice_yield (__NR_Linux + 471)
#endif /* _ASM_UNISTD_N32_H */
+1
View File
@@ -375,5 +375,6 @@
#define __NR_file_getattr (__NR_Linux + 468)
#define __NR_file_setattr (__NR_Linux + 469)
#define __NR_listns (__NR_Linux + 470)
#define __NR_rseq_slice_yield (__NR_Linux + 471)
#endif /* _ASM_UNISTD_N64_H */
+1
View File
@@ -445,5 +445,6 @@
#define __NR_file_getattr (__NR_Linux + 468)
#define __NR_file_setattr (__NR_Linux + 469)
#define __NR_listns (__NR_Linux + 470)
#define __NR_rseq_slice_yield (__NR_Linux + 471)
#endif /* _ASM_UNISTD_O32_H */
+1
View File
@@ -452,6 +452,7 @@
#define __NR_file_getattr 468
#define __NR_file_setattr 469
#define __NR_listns 470
#define __NR_rseq_slice_yield 471
#endif /* _ASM_UNISTD_32_H */
+1
View File
@@ -424,6 +424,7 @@
#define __NR_file_getattr 468
#define __NR_file_setattr 469
#define __NR_listns 470
#define __NR_rseq_slice_yield 471
#endif /* _ASM_UNISTD_64_H */
+4
View File
@@ -86,6 +86,7 @@ struct riscv_hwprobe {
#define RISCV_HWPROBE_EXT_ZICBOP (1ULL << 60)
#define RISCV_HWPROBE_EXT_ZILSD (1ULL << 61)
#define RISCV_HWPROBE_EXT_ZCLSD (1ULL << 62)
#define RISCV_HWPROBE_EXT_ZICFILP (1ULL << 63)
#define RISCV_HWPROBE_KEY_CPUPERF_0 5
#define RISCV_HWPROBE_MISALIGNED_UNKNOWN (0 << 0)
@@ -113,6 +114,9 @@ struct riscv_hwprobe {
#define RISCV_HWPROBE_KEY_VENDOR_EXT_SIFIVE_0 13
#define RISCV_HWPROBE_KEY_VENDOR_EXT_MIPS_0 14
#define RISCV_HWPROBE_KEY_ZICBOP_BLOCK_SIZE 15
#define RISCV_HWPROBE_KEY_IMA_EXT_1 16
#define RISCV_HWPROBE_EXT_ZICFISS (1ULL << 0)
/* Increase RISCV_HWPROBE_MAX_KEY when adding items. */
/* Flags */
+3
View File
@@ -192,6 +192,9 @@ enum KVM_RISCV_ISA_EXT_ID {
KVM_RISCV_ISA_EXT_ZFBFMIN,
KVM_RISCV_ISA_EXT_ZVFBFMIN,
KVM_RISCV_ISA_EXT_ZVFBFWMA,
KVM_RISCV_ISA_EXT_ZCLSD,
KVM_RISCV_ISA_EXT_ZILSD,
KVM_RISCV_ISA_EXT_ZALASR,
KVM_RISCV_ISA_EXT_MAX,
};
+37
View File
@@ -9,6 +9,7 @@
#ifndef __ASSEMBLER__
#include <linux/types.h>
#include <linux/const.h>
#define PTRACE_GETFDPIC 33
@@ -127,6 +128,42 @@ struct __riscv_v_regset_state {
*/
#define RISCV_MAX_VLENB (8192)
struct __sc_riscv_cfi_state {
unsigned long ss_ptr; /* shadow stack pointer */
};
#define PTRACE_CFI_BRANCH_LANDING_PAD_EN_BIT 0
#define PTRACE_CFI_BRANCH_LANDING_PAD_LOCK_BIT 1
#define PTRACE_CFI_BRANCH_EXPECTED_LANDING_PAD_BIT 2
#define PTRACE_CFI_SHADOW_STACK_EN_BIT 3
#define PTRACE_CFI_SHADOW_STACK_LOCK_BIT 4
#define PTRACE_CFI_SHADOW_STACK_PTR_BIT 5
#define PTRACE_CFI_BRANCH_LANDING_PAD_EN_STATE _BITUL(PTRACE_CFI_BRANCH_LANDING_PAD_EN_BIT)
#define PTRACE_CFI_BRANCH_LANDING_PAD_LOCK_STATE \
_BITUL(PTRACE_CFI_BRANCH_LANDING_PAD_LOCK_BIT)
#define PTRACE_CFI_BRANCH_EXPECTED_LANDING_PAD_STATE \
_BITUL(PTRACE_CFI_BRANCH_EXPECTED_LANDING_PAD_BIT)
#define PTRACE_CFI_SHADOW_STACK_EN_STATE _BITUL(PTRACE_CFI_SHADOW_STACK_EN_BIT)
#define PTRACE_CFI_SHADOW_STACK_LOCK_STATE _BITUL(PTRACE_CFI_SHADOW_STACK_LOCK_BIT)
#define PTRACE_CFI_SHADOW_STACK_PTR_STATE _BITUL(PTRACE_CFI_SHADOW_STACK_PTR_BIT)
#define PTRACE_CFI_STATE_INVALID_MASK ~(PTRACE_CFI_BRANCH_LANDING_PAD_EN_STATE | \
PTRACE_CFI_BRANCH_LANDING_PAD_LOCK_STATE | \
PTRACE_CFI_BRANCH_EXPECTED_LANDING_PAD_STATE | \
PTRACE_CFI_SHADOW_STACK_EN_STATE | \
PTRACE_CFI_SHADOW_STACK_LOCK_STATE | \
PTRACE_CFI_SHADOW_STACK_PTR_STATE)
struct __cfi_status {
__u64 cfi_state;
};
struct user_cfi_state {
struct __cfi_status cfi_status;
__u64 shstk_ptr;
};
#endif /* __ASSEMBLER__ */
#endif /* _ASM_RISCV_PTRACE_H */
+1
View File
@@ -10,6 +10,7 @@
/* The Magic number for signal context frame header. */
#define RISCV_V_MAGIC 0x53465457
#define RISCV_ZICFISS_MAGIC 0x9487
#define END_MAGIC 0x0
/* The size of END signal context header. */
+1
View File
@@ -318,6 +318,7 @@
#define __NR_file_getattr 468
#define __NR_file_setattr 469
#define __NR_listns 470
#define __NR_rseq_slice_yield 471
#endif /* _ASM_UNISTD_32_H */
+1
View File
@@ -328,6 +328,7 @@
#define __NR_file_getattr 468
#define __NR_file_setattr 469
#define __NR_listns 470
#define __NR_rseq_slice_yield 471
#endif /* _ASM_UNISTD_64_H */
-103
View File
@@ -1,103 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*************************************************************************
*
* enables user programs to display messages and control encryption
* on s390 tape devices
*
* Copyright IBM Corp. 2001, 2006
* Author(s): Michael Holzheu <holzheu@de.ibm.com>
*
*************************************************************************/
#ifndef _TAPE390_H
#define _TAPE390_H
#define TAPE390_DISPLAY _IOW('d', 1, struct display_struct)
/*
* The TAPE390_DISPLAY ioctl calls the Load Display command
* which transfers 17 bytes of data from the channel to the subsystem:
* - 1 format control byte, and
* - two 8-byte messages
*
* Format control byte:
* 0-2: New Message Overlay
* 3: Alternate Messages
* 4: Blink Message
* 5: Display Low/High Message
* 6: Reserved
* 7: Automatic Load Request
*
*/
typedef struct display_struct {
char cntrl;
char message1[8];
char message2[8];
} display_struct;
/*
* Tape encryption support
*/
struct tape390_crypt_info {
char capability;
char status;
char medium_status;
} __attribute__ ((packed));
/* Macros for "capable" field */
#define TAPE390_CRYPT_SUPPORTED_MASK 0x01
#define TAPE390_CRYPT_SUPPORTED(x) \
((x.capability & TAPE390_CRYPT_SUPPORTED_MASK))
/* Macros for "status" field */
#define TAPE390_CRYPT_ON_MASK 0x01
#define TAPE390_CRYPT_ON(x) (((x.status) & TAPE390_CRYPT_ON_MASK))
/* Macros for "medium status" field */
#define TAPE390_MEDIUM_LOADED_MASK 0x01
#define TAPE390_MEDIUM_ENCRYPTED_MASK 0x02
#define TAPE390_MEDIUM_ENCRYPTED(x) \
(((x.medium_status) & TAPE390_MEDIUM_ENCRYPTED_MASK))
#define TAPE390_MEDIUM_LOADED(x) \
(((x.medium_status) & TAPE390_MEDIUM_LOADED_MASK))
/*
* The TAPE390_CRYPT_SET ioctl is used to switch on/off encryption.
* The "encryption_capable" and "tape_status" fields are ignored for this ioctl!
*/
#define TAPE390_CRYPT_SET _IOW('d', 2, struct tape390_crypt_info)
/*
* The TAPE390_CRYPT_QUERY ioctl is used to query the encryption state.
*/
#define TAPE390_CRYPT_QUERY _IOR('d', 3, struct tape390_crypt_info)
/* Values for "kekl1/2_type" and "kekl1/2_type_on_tape" fields */
#define TAPE390_KEKL_TYPE_NONE 0
#define TAPE390_KEKL_TYPE_LABEL 1
#define TAPE390_KEKL_TYPE_HASH 2
struct tape390_kekl {
unsigned char type;
unsigned char type_on_tape;
char label[65];
} __attribute__ ((packed));
struct tape390_kekl_pair {
struct tape390_kekl kekl[2];
} __attribute__ ((packed));
/*
* The TAPE390_KEKL_SET ioctl is used to set Key Encrypting Key labels.
*/
#define TAPE390_KEKL_SET _IOW('d', 4, struct tape390_kekl_pair)
/*
* The TAPE390_KEKL_QUERY ioctl is used to query Key Encrypting Key labels.
*/
#define TAPE390_KEKL_QUERY _IOR('d', 5, struct tape390_kekl_pair)
#endif

Some files were not shown because too many files have changed in this diff Show More