diff --git a/Documentation/ABI/testing/sysfs-driver-hqm b/Documentation/ABI/testing/sysfs-driver-hqm index 197104b15df39e..08b95c341a706f 100644 --- a/Documentation/ABI/testing/sysfs-driver-hqm +++ b/Documentation/ABI/testing/sysfs-driver-hqm @@ -1,3 +1,142 @@ +What: /sys/bus/pci/devices/.../sequence_numbers/group_sns_per_queue +Date: August 3, 2018 +KernelVersion: TBD +Contact: gage.eads@intel.com +Description: Interface for configuring HQM load-balanced sequence numbers. + + The HQM has a fixed number of sequence numbers used for ordered + scheduling. They are divided among four sequence number groups. + A group can be configured to contain one queue with 1,024 + sequence numbers, or two queues with 512 sequence numbers each, + and so on, down to 32 queues with 32 sequence numbers each. + + When a load-balanced queue is configured with non-zero sequence + numbers, the driver finds a group configured for the same + number of sequence numbers and an available slot. If no such + groups are found, the queue cannot be configured. + + Once the first ordered queue is configured, the sequence number + configurations are locked. The driver returns an error on writes + to locked sequence number configurations. When all ordered + queues are unconfigured, the sequence number configurations can + be changed again. + + This file is only accessible for physical function HQM devices. + +What: /sys/bus/pci/devices/.../total_resources/num_atomic_inflights +What: /sys/bus/pci/devices/.../total_resources/num_dir_credit_pools +What: /sys/bus/pci/devices/.../total_resources/num_dir_credits +What: /sys/bus/pci/devices/.../total_resources/num_dir_ports +What: /sys/bus/pci/devices/.../total_resources/num_hist_list_entries +What: /sys/bus/pci/devices/.../total_resources/num_ldb_credit_pools +What: /sys/bus/pci/devices/.../total_resources/num_ldb_credits +What: /sys/bus/pci/devices/.../total_resources/num_ldb_ports +What: /sys/bus/pci/devices/.../total_resources/num_ldb_queues +What: /sys/bus/pci/devices/.../total_resources/num_sched_domains +Date: August 7, 2019 +KernelVersion: TBD +Contact: gage.eads@intel.com +Description: + The total_resources subdirectory contains read-only files that + indicate the total number of resources in the device. + + num_atomic_inflights: Total number of atomic inflights in the + device. Atomic inflights refers to the + on-device storage used by the atomic + scheduler. + + num_dir_credit_pools: Total number of directed credit pools in + the device. + + num_dir_credits: Total number of directed credits in the + device. + + num_dir_ports: Total number of directed ports (and + queues) in the device. + + num_hist_list_entries: Total number of history list entries in + the device. + + num_ldb_credit_pools: Total number of load-balanced credit + pools in the device. + + num_ldb_credits: Total number of load-balanced credits in + the device. + + num_ldb_ports: Total number of load-balanced ports in + the device. + + num_ldb_queues: Total number of load-balanced queues in + the device. + + num_sched_domains: Total number of scheduling domains in the + device. + +What: /sys/bus/pci/devices/.../avail_resources/num_atomic_inflights +What: /sys/bus/pci/devices/.../avail_resources/num_dir_credit_pools +What: /sys/bus/pci/devices/.../avail_resources/num_dir_credits +What: /sys/bus/pci/devices/.../avail_resources/num_dir_ports +What: /sys/bus/pci/devices/.../avail_resources/num_hist_list_entries +What: /sys/bus/pci/devices/.../avail_resources/num_ldb_credit_pools +What: /sys/bus/pci/devices/.../avail_resources/num_ldb_credits +What: /sys/bus/pci/devices/.../avail_resources/num_ldb_ports +What: /sys/bus/pci/devices/.../avail_resources/num_ldb_queues +What: /sys/bus/pci/devices/.../avail_resources/num_sched_domains +What: /sys/bus/pci/devices/.../avail_resources/max_ctg_atm_inflights +What: /sys/bus/pci/devices/.../avail_resources/max_ctg_hl_entries +Date: August 7, 2019 +KernelVersion: TBD +Contact: gage.eads@intel.com +Description: + The avail_resources subdirectory contains read-only files that + indicate the available number of resources in the device. + "Available" here means resources that are not currently in use + by an application or, in the case of a physical function + device, assigned to a virtual function. + + num_atomic_inflights: Available number of atomic inflights in + the device. + + num_dir_credit_pools: Available number of directed credits in + the device. + + num_dir_ports: Available number of directed ports (and + queues) in the device. + + num_hist_list_entries: Available number of history list entries + in the device. + + num_ldb_credit_pools: Available number of load-balanced credit + pools in the device. + + num_ldb_credits: Available number of load-balanced credits + in the device. + + num_ldb_ports: Available number of load-balanced ports + in the device. + + num_ldb_queues: Available number of load-balanced queues + in the device. + + num_sched_domains: Available number of scheduling domains + in the device. + + max_ctg_atm_inflights: Maximum contiguous atomic inflights + available in the device. + + Each scheduling domain is created with + an allocation of atomic inflights, and + each domain's allocation of inflights + must be contiguous. + + max_ctg_hl_entries: Maximum contiguous history list entries + available in the device. + + Each scheduling domain is created with + an allocation of history list entries, + and each domain's allocation of entries + must be contiguous. + What: /sys/bus/pci/drivers/hqm/module/parameters/log_level Date: August 3, 2018 KernelVersion: TBD @@ -15,3 +154,38 @@ Description: Interface for setting the driver's log level. configuration descriptions, and function entry and exit points. These messages are verbose, but they give a clear view into the driver's behavior. + +What: /sys/bus/pci/drivers/hqm/module/parameters/reset_timeout_s +Date: August 3, 2018 +KernelVersion: TBD +Contact: gage.eads@intel.com +Description: Interface for setting the driver's reset timeout. + When a device reset (FLR) is issued, the driver waits for + user-space to stop using the device before allowing the FLR to + proceed, with a timeout. The device is considered in use if + there are any open domain device file descriptors or memory + mapped producer ports. (For PF device resets, this includes all + VF-owned domains and producer ports.) + + The amount of time the driver waits for userspace to stop using + the device is controlled by the module parameter + reset_timeout_s, which is in units of seconds and defaults to + 5. If reset_timeout_s seconds elapse and any user is still + using the device, the driver zaps those processes' memory + mappings and marks their device file descriptors as invalid. + This is necessary because user processes that do not relinquish + their device mappings can interfere with processes that use the + device after the reset completes. To ensure that user processes + have enough time to clean up, reset_timeout_s can be increased. + +What: /sys/bus/pci/devices/.../dev_id +Date: August 6, 2019 +KernelVersion: TBD +Contact: gage.eads@intel.com +Description: Device ID used in /dev, i.e. /dev/hqm + + Each HQM PF and VF device is granted a unique ID by the kernel + driver, and this ID is used to construct the device's /dev + directory: /dev/hqm. This sysfs file can be read to + determine a device's ID, which allows the user to map a device + file to a PCI BDF. diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 94df0868804bcb..57990d36b46be7 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -189,14 +189,15 @@ cfi-sections := $(call as-instr,.cfi_sections .debug_frame,-DCONFIG_AS_CFI_SECTI # does binutils support specific instructions? asinstr += $(call as-instr,pshufb %xmm0$(comma)%xmm0,-DCONFIG_AS_SSSE3=1) +sse2_instr := $(call as-instr,movapd %xmm0$(comma)%xmm0,-DCONFIG_AS_SSE2=1) avx_instr := $(call as-instr,vxorps %ymm0$(comma)%ymm1$(comma)%ymm2,-DCONFIG_AS_AVX=1) avx2_instr :=$(call as-instr,vpbroadcastb %xmm0$(comma)%ymm1,-DCONFIG_AS_AVX2=1) avx512_instr :=$(call as-instr,vpmovm2b %k1$(comma)%zmm5,-DCONFIG_AS_AVX512=1) sha1_ni_instr :=$(call as-instr,sha1msg1 %xmm0$(comma)%xmm1,-DCONFIG_AS_SHA1_NI=1) sha256_ni_instr :=$(call as-instr,sha256msg1 %xmm0$(comma)%xmm1,-DCONFIG_AS_SHA256_NI=1) -KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr) -KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr) +KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr) $(sse2_instr) +KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr) $(sse2_instr) KBUILD_LDFLAGS := -m elf_$(UTS_MACHINE) diff --git a/drivers/misc/hqm/Makefile b/drivers/misc/hqm/Makefile index 35141fb6662db2..28d219bde62b52 100644 --- a/drivers/misc/hqm/Makefile +++ b/drivers/misc/hqm/Makefile @@ -4,5 +4,11 @@ obj-$(CONFIG_INTEL_HQM) := hqm.o -hqm-objs := \ - hqm_main.o \ +hqm-objs := \ + hqm_main.o \ + hqm_intr.o \ + hqm_ioctl.o \ + hqm_mem.o \ + hqm_pf_ops.o \ + hqm_resource.o \ + hqm_smon.o \ diff --git a/drivers/misc/hqm/hqm_dp_ops.h b/drivers/misc/hqm/hqm_dp_ops.h new file mode 100644 index 00000000000000..ea63bd014c1e76 --- /dev/null +++ b/drivers/misc/hqm/hqm_dp_ops.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * Copyright(c) 2017-2019 Intel Corporation + */ + +#ifndef __HQM_OPS_DP_H +#define __HQM_OPS_DP_H + +#include +#include +#include + +/* CPU feature enumeration macros */ +#define CPUID_DIRSTR_BIT 27 +#define CPUID_DIRSTR64B_BIT 28 + +static inline bool movdir64b_supported(void) +{ + int eax, ebx, ecx, edx; + + asm volatile("mov $7, %%eax\t\n" + "mov $0, %%ecx\t\n" + "cpuid\t\n" + : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)); + + return ecx & (1 << CPUID_DIRSTR64B_BIT); +} + +/** + * movntdq_asm() - execute a movntdq instruction + * @addr: mapped producer port address + * @data0: least-significant 8B to move + * @data1: most-significant 8B to move + * + * This function executes movntdq, moving @data0 and @data1 into the address + * @addr. + */ +static inline void movntdq_asm(long long __iomem *addr, + long long data0, + long long data1) +{ +#ifdef CONFIG_AS_SSE2 + __asm__ __volatile__("movq %1, %%xmm0\n" + "movhps %2, %%xmm0\n" + "movntdq %%xmm0, %0" + : "=m" (*addr) : "r" (data0), "m" (data1)); +#endif +} + +static inline void hqm_movntdq(void *qe4, void __iomem *pp_addr) +{ + /* Move entire 64B cache line of QEs, 128 bits (16B) at a time. */ + long long *_qe = (long long *)qe4; + + kernel_fpu_begin(); + movntdq_asm(pp_addr + 0, _qe[0], _qe[1]); + /* (see comment below) */ + wmb(); + movntdq_asm(pp_addr + 0, _qe[2], _qe[3]); + /* (see comment below) */ + wmb(); + movntdq_asm(pp_addr + 0, _qe[4], _qe[5]); + /* (see comment below) */ + wmb(); + movntdq_asm(pp_addr + 0, _qe[6], _qe[7]); + kernel_fpu_end(); + /* movntdq requires an sfence between writes to the PP MMIO address */ + wmb(); +} + +static inline void hqm_movdir64b(void *qe4, void __iomem *pp_addr) +{ + asm volatile(".byte 0x66, 0x0f, 0x38, 0xf8, 0x02" + : + : "a" (pp_addr), "d" (qe4)); +} + +/* objtool's instruction decoder doesn't recognize the hard-coded machine + * instructions for movdir64b, which causes it to emit "undefined stack state" + * and "falls through" warnings. For now, ignore the functions. + */ +STACK_FRAME_NON_STANDARD(hqm_movdir64b); + +#endif /* __HQM_OPS_DP_H */ diff --git a/drivers/misc/hqm/hqm_hw_types.h b/drivers/misc/hqm/hqm_hw_types.h index 366b952fcfd1a4..89db6857ff3a19 100644 --- a/drivers/misc/hqm/hqm_hw_types.h +++ b/drivers/misc/hqm/hqm_hw_types.h @@ -5,6 +5,10 @@ #ifndef __HQM_HW_TYPES_H #define __HQM_HW_TYPES_H +#include +#include "hqm_osdep_types.h" +#include "hqm_osdep_list.h" + #define HQM_MAX_NUM_VFS 16 #define HQM_MAX_NUM_DOMAINS 32 #define HQM_MAX_NUM_LDB_QUEUES 128 @@ -26,7 +30,334 @@ #define HQM_MAX_QID_EMPTY_CHECK_LOOPS (32 * 64 * 1024 * (800 / 30)) #define HQM_HZ 800000000 +/* Used for HQM A-stepping workaround for hardware write buffer lock up issue */ +#define HQM_A_STEP_MAX_PORTS 128 + #define HQM_PF_DEV_ID 0x270B #define HQM_VF_DEV_ID 0x270C +/* Interrupt related macros */ +#define HQM_PF_NUM_NON_CQ_INTERRUPT_VECTORS 8 +#define HQM_PF_NUM_CQ_INTERRUPT_VECTORS 64 +#define HQM_PF_TOTAL_NUM_INTERRUPT_VECTORS \ + (HQM_PF_NUM_NON_CQ_INTERRUPT_VECTORS + \ + HQM_PF_NUM_CQ_INTERRUPT_VECTORS) +#define HQM_PF_NUM_COMPRESSED_MODE_VECTORS \ + (HQM_PF_NUM_NON_CQ_INTERRUPT_VECTORS + 1) +#define HQM_PF_NUM_PACKED_MODE_VECTORS HQM_PF_TOTAL_NUM_INTERRUPT_VECTORS +#define HQM_PF_COMPRESSED_MODE_CQ_VECTOR_ID HQM_PF_NUM_NON_CQ_INTERRUPT_VECTORS + +#define HQM_VF_NUM_NON_CQ_INTERRUPT_VECTORS 1 +#define HQM_VF_NUM_CQ_INTERRUPT_VECTORS 31 +#define HQM_VF_BASE_CQ_VECTOR_ID 0 +#define HQM_VF_LAST_CQ_VECTOR_ID 30 +#define HQM_VF_MBOX_VECTOR_ID 31 +#define HQM_VF_TOTAL_NUM_INTERRUPT_VECTORS \ + (HQM_VF_NUM_NON_CQ_INTERRUPT_VECTORS + \ + HQM_VF_NUM_CQ_INTERRUPT_VECTORS) + +#define HQM_PF_NUM_ALARM_INTERRUPT_VECTORS 4 +/* HQM ALARM interrupts */ +#define HQM_INT_ALARM 0 +/* VF to PF Mailbox Service Request */ +#define HQM_INT_VF_TO_PF_MBOX 1 +/* HCW Ingress Errors */ +#define HQM_INT_INGRESS_ERROR 3 + +#define HQM_ALARM_HW_SOURCE_SYS 0 +#define HQM_ALARM_HW_SOURCE_HQM 1 + +#define HQM_ALARM_HW_UNIT_CHP 1 +#define HQM_ALARM_HW_UNIT_LSP 3 + +#define HQM_ALARM_HW_CHP_AID_OUT_OF_CREDITS 6 +#define HQM_ALARM_HW_CHP_AID_ILLEGAL_ENQ 7 +#define HQM_ALARM_HW_LSP_AID_EXCESS_TOKEN_POPS 15 +#define HQM_ALARM_SYS_AID_ILLEGAL_HCW 0 +#define HQM_ALARM_SYS_AID_ILLEGAL_QID 3 +#define HQM_ALARM_SYS_AID_DISABLED_QID 4 +#define HQM_ALARM_SYS_AID_ILLEGAL_CQID 6 + +/* Hardware-defined base addresses */ +#define HQM_LDB_PP_BASE 0x2100000 +#define HQM_LDB_PP_STRIDE 0x1000 +#define HQM_LDB_PP_BOUND \ + (HQM_LDB_PP_BASE + HQM_LDB_PP_STRIDE * HQM_MAX_NUM_LDB_PORTS) +#define HQM_DIR_PP_BASE 0x2000000 +#define HQM_DIR_PP_STRIDE 0x1000 +#define HQM_DIR_PP_BOUND \ + (HQM_DIR_PP_BASE + HQM_DIR_PP_STRIDE * HQM_MAX_NUM_DIR_PORTS) + +struct flow_ids { + u32 base; + u32 bound; +}; + +struct hqm_resource_id { + u32 phys_id; + u32 virt_id; + u8 vf_owned; + u8 vf_id; +}; + +struct hqm_freelist { + u32 base; + u32 bound; + u32 offset; +}; + +static inline u32 hqm_freelist_count(struct hqm_freelist *list) +{ + return (list->bound - list->base) - list->offset; +} + +struct hqm_hcw { + u64 data; + /* Word 3 */ + u16 opaque; + u8 qid; + u8 sched_type:2; + u8 priority:3; + u8 msg_type:3; + /* Word 4 */ + u16 lock_id; + u8 meas_lat:1; + u8 rsvd1:2; + u8 no_dec:1; + u8 cmp_id:4; + u8 cq_token:1; + u8 qe_comp:1; + u8 qe_frag:1; + u8 qe_valid:1; + u8 int_arm:1; + u8 error:1; + u8 rsvd:2; +}; + +struct hqm_ldb_queue { + struct hqm_list_entry domain_list; + struct hqm_list_entry func_list; + struct hqm_resource_id id; + struct hqm_resource_id domain_id; + u32 num_qid_inflights; + struct hqm_freelist aqed_freelist; + u8 sn_cfg_valid; + u32 sn_group; + u32 sn_slot; + u32 num_mappings; + u8 num_pending_additions; + u8 owned; + u8 configured; +}; + +/* Directed ports and queues are paired by nature, so the driver tracks them + * with a single data structure. + */ +struct hqm_dir_pq_pair { + struct hqm_list_entry domain_list; + struct hqm_list_entry func_list; + struct hqm_resource_id id; + struct hqm_resource_id domain_id; + u8 ldb_pool_used; + u8 dir_pool_used; + u8 queue_configured; + u8 port_configured; + u8 owned; + u8 enabled; + u32 ref_cnt; +}; + +enum hqm_qid_map_state { + /* The slot doesn't contain a valid queue mapping */ + HQM_QUEUE_UNMAPPED, + /* The slot contains a valid queue mapping */ + HQM_QUEUE_MAPPED, + /* The driver is mapping a queue into this slot */ + HQM_QUEUE_MAP_IN_PROGRESS, + /* The driver is unmapping a queue from this slot */ + HQM_QUEUE_UNMAP_IN_PROGRESS, + /* The driver is unmapping a queue from this slot, and once complete + * will replace it with another mapping. + */ + HQM_QUEUE_UNMAP_IN_PROGRESS_PENDING_MAP, +}; + +struct hqm_ldb_port_qid_map { + u16 qid; + u8 priority; + u16 pending_qid; + u8 pending_priority; + enum hqm_qid_map_state state; +}; + +struct hqm_ldb_port { + struct hqm_list_entry domain_list; + struct hqm_list_entry func_list; + struct hqm_resource_id id; + struct hqm_resource_id domain_id; + u8 ldb_pool_used; + u8 dir_pool_used; + u8 init_tkn_cnt; + u32 hist_list_entry_base; + u32 hist_list_entry_limit; + /* The qid_map represents the hardware QID mapping state. */ + struct hqm_ldb_port_qid_map qid_map[HQM_MAX_NUM_QIDS_PER_LDB_CQ]; + u32 ref_cnt; + u8 num_pending_removals; + u8 num_mappings; + u8 owned; + u8 enabled; + u8 configured; +}; + +struct hqm_credit_pool { + struct hqm_list_entry domain_list; + struct hqm_list_entry func_list; + struct hqm_resource_id id; + struct hqm_resource_id domain_id; + u32 total_credits; + u32 avail_credits; + u8 owned; + u8 configured; +}; + +struct hqm_sn_group { + u32 mode; + u32 sequence_numbers_per_queue; + u32 slot_use_bitmap; + u32 id; +}; + +static inline bool hqm_sn_group_full(struct hqm_sn_group *group) +{ + u32 mask[6] = { + 0xffffffff, /* 32 SNs per queue */ + 0x0000ffff, /* 64 SNs per queue */ + 0x000000ff, /* 128 SNs per queue */ + 0x0000000f, /* 256 SNs per queue */ + 0x00000003, /* 512 SNs per queue */ + 0x00000001}; /* 1024 SNs per queue */ + + return group->slot_use_bitmap == mask[group->mode]; +} + +static inline int hqm_sn_group_alloc_slot(struct hqm_sn_group *group) +{ + int bound[6] = {32, 16, 8, 4, 2, 1}; + int i; + + for (i = 0; i < bound[group->mode]; i++) { + if (!(group->slot_use_bitmap & (1 << i))) { + group->slot_use_bitmap |= 1 << i; + return i; + } + } + + return -1; +} + +static inline void hqm_sn_group_free_slot(struct hqm_sn_group *group, int slot) +{ + group->slot_use_bitmap &= ~(1 << slot); +} + +static inline int hqm_sn_group_used_slots(struct hqm_sn_group *group) +{ + int i, cnt = 0; + + for (i = 0; i < 32; i++) + cnt += !!(group->slot_use_bitmap & (1 << i)); + + return cnt; +} + +struct hqm_domain { + struct hqm_function_resources *parent_func; + struct hqm_list_entry func_list; + struct hqm_list_head used_ldb_queues; + struct hqm_list_head used_ldb_ports; + struct hqm_list_head used_dir_pq_pairs; + struct hqm_list_head used_ldb_credit_pools; + struct hqm_list_head used_dir_credit_pools; + struct hqm_list_head avail_ldb_queues; + struct hqm_list_head avail_ldb_ports; + struct hqm_list_head avail_dir_pq_pairs; + struct hqm_list_head avail_ldb_credit_pools; + struct hqm_list_head avail_dir_credit_pools; + u32 total_hist_list_entries; + u32 avail_hist_list_entries; + u32 hist_list_entry_base; + u32 hist_list_entry_offset; + struct hqm_freelist qed_freelist; + struct hqm_freelist dqed_freelist; + struct hqm_freelist aqed_freelist; + struct hqm_resource_id id; + int num_pending_removals; + int num_pending_additions; + u8 configured; + u8 started; +}; + +struct hqm_bitmap; + +struct hqm_function_resources { + u32 num_avail_domains; + struct hqm_list_head avail_domains; + struct hqm_list_head used_domains; + u32 num_avail_ldb_queues; + struct hqm_list_head avail_ldb_queues; + u32 num_avail_ldb_ports; + struct hqm_list_head avail_ldb_ports; + u32 num_avail_dir_pq_pairs; + struct hqm_list_head avail_dir_pq_pairs; + struct hqm_bitmap *avail_hist_list_entries; + struct hqm_bitmap *avail_qed_freelist_entries; + struct hqm_bitmap *avail_dqed_freelist_entries; + struct hqm_bitmap *avail_aqed_freelist_entries; + u32 num_avail_ldb_credit_pools; + struct hqm_list_head avail_ldb_credit_pools; + u32 num_avail_dir_credit_pools; + struct hqm_list_head avail_dir_credit_pools; + u32 num_enabled_ldb_ports; /* (PF only) */ + u8 locked; /* (VF only) */ +}; + +/* After initialization, each resource in hqm_hw_resources is located in one of + * the following lists: + * -- The PF's available resources list. These are unconfigured resources owned + * by the PF and not allocated to an HQM scheduling domain. + * -- A VF's available resources list. These are VF-owned unconfigured + * resources not allocated to an HQM scheduling domain. + * -- A domain's available resources list. These are domain-owned unconfigured + * resources. + * -- A domain's used resources list. These are are domain-owned configured + * resources. + * + * A resource moves to a new list when a VF or domain is created or destroyed, + * or when the resource is configured. + */ +struct hqm_hw_resources { + struct hqm_ldb_queue ldb_queues[HQM_MAX_NUM_LDB_QUEUES]; + struct hqm_ldb_port ldb_ports[HQM_MAX_NUM_LDB_PORTS]; + struct hqm_dir_pq_pair dir_pq_pairs[HQM_MAX_NUM_DIR_PORTS]; + struct hqm_credit_pool ldb_credit_pools[HQM_MAX_NUM_LDB_CREDIT_POOLS]; + struct hqm_credit_pool dir_credit_pools[HQM_MAX_NUM_DIR_CREDIT_POOLS]; + struct hqm_sn_group sn_groups[HQM_MAX_NUM_SEQUENCE_NUMBER_GROUPS]; +}; + +struct hqm_hw { + /* BAR 0 address */ + void __iomem *csr_kva; + unsigned long csr_phys_addr; + /* BAR 2 address */ + void __iomem *func_kva; + unsigned long func_phys_addr; + + /* Resource tracking */ + struct hqm_hw_resources rsrcs; + struct hqm_function_resources pf; + struct hqm_function_resources vf[HQM_MAX_NUM_VFS]; + struct hqm_domain domains[HQM_MAX_NUM_DOMAINS]; +}; + #endif /* __HQM_HW_TYPES_H */ diff --git a/drivers/misc/hqm/hqm_intr.c b/drivers/misc/hqm/hqm_intr.c new file mode 100644 index 00000000000000..9c7969463882dd --- /dev/null +++ b/drivers/misc/hqm/hqm_intr.c @@ -0,0 +1,150 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2017-2019 Intel Corporation */ + +#include +#include + +#include "hqm_resource.h" +#include "hqm_main.h" +#include "hqm_intr.h" + +void hqm_wake_thread(struct hqm_dev *dev, + struct hqm_cq_intr *intr, + enum hqm_wake_reason reason) +{ + switch (reason) { + case WAKE_CQ_INTR: + WRITE_ONCE(intr->wake, true); + break; + case WAKE_PORT_DISABLED: + WRITE_ONCE(intr->disabled, true); + break; + default: + break; + } + + wake_up_interruptible(&intr->wq_head); +} + +static inline bool wake_condition(struct hqm_cq_intr *intr, + struct hqm_dev *dev, + struct hqm_status *status) +{ + return (READ_ONCE(intr->wake) || + READ_ONCE(dev->reset_active) || + !READ_ONCE(status->valid) || + READ_ONCE(intr->disabled)); +} + +struct hqm_dequeue_qe { + u8 rsvd0[15]; + u8 cq_gen:1; + u8 rsvd1:7; +} __packed; + +/** + * hqm_cq_empty() - determine whether a CQ is empty + * @dev: struct hqm_dev pointer. + * @user_cq_va: User VA pointing to next CQ entry. + * @cq_gen: Current CQ generation bit. + * + * Return: + * Returns 1 if empty, 0 if non-empty, or < 0 if an error occurs. + */ +static int hqm_cq_empty(struct hqm_dev *dev, u64 user_cq_va, u8 cq_gen) +{ + struct hqm_dequeue_qe qe; + + if (copy_from_user(&qe, (void __user *)user_cq_va, sizeof(qe))) { + HQM_ERR(dev->hqm_device, + "[%s()] Invalid cq_va pointer\n", __func__); + return -EFAULT; + } + + return qe.cq_gen != cq_gen; +} + +int hqm_block_on_cq_interrupt(struct hqm_dev *dev, + struct hqm_status *status, + int domain_id, + int port_id, + bool is_ldb, + u64 cq_va, + u8 cq_gen, + bool arm) +{ + struct hqm_cq_intr *intr; + int ret = 0; + + if (is_ldb && port_id >= HQM_MAX_NUM_LDB_PORTS) + return -EINVAL; + if (!is_ldb && port_id >= HQM_MAX_NUM_DIR_PORTS) + return -EINVAL; + + if (is_ldb) + intr = &dev->intr.ldb_cq_intr[port_id]; + else + intr = &dev->intr.dir_cq_intr[port_id]; + + /* If the user assigns more CQs to a VF resource group than there are + * interrupt vectors (31 per VF), then some of its CQs won't be + * configured for interrupts. Also on A-stepping hardware, the software + * workaround for a write-buffer lockup issue requires CQs to not be + * configured for interrupts. + */ + if (unlikely(!intr->configured)) + return -EINVAL; + + /* This function requires that only one thread process the CQ at a time. + * Otherwise, the wake condition could become false in the time between + * the ISR calling wake_up_interruptible() and the thread checking its + * wake condition. + */ + mutex_lock(&intr->mutex); + + /* Return early if the port's interrupt is disabled */ + if (READ_ONCE(intr->disabled)) { + mutex_unlock(&intr->mutex); + return -EACCES; + } + + HQM_INFO(dev->hqm_device, + "Thread is blocking on %s port %d's interrupt\n", + (is_ldb) ? "LDB" : "DIR", port_id); + + /* Don't block if the CQ is non-empty */ + ret = hqm_cq_empty(dev, cq_va, cq_gen); + if (ret != 1) + goto error; + + if (arm) { + ret = dev->ops->arm_cq_interrupt(dev, + domain_id, + port_id, + is_ldb); + if (ret) + goto error; + } + + ret = wait_event_interruptible(intr->wq_head, + wake_condition(intr, dev, status)); + + if (ret == 0) { + if (READ_ONCE(dev->reset_active) || + !READ_ONCE(status->valid)) + ret = -EINTR; + else if (READ_ONCE(intr->disabled)) + ret = -EACCES; + } + + HQM_INFO(dev->hqm_device, + "Thread is unblocked from %s port %d's interrupt\n", + (is_ldb) ? "LDB" : "DIR", port_id); + + WRITE_ONCE(intr->wake, false); + +error: + mutex_unlock(&intr->mutex); + + return ret; +} diff --git a/drivers/misc/hqm/hqm_intr.h b/drivers/misc/hqm/hqm_intr.h new file mode 100644 index 00000000000000..3e0288bab0817d --- /dev/null +++ b/drivers/misc/hqm/hqm_intr.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * Copyright(c) 2017-2019 Intel Corporation + */ + +#ifndef __HQM_INTR_H +#define __HQM_INTR_H + +#include +#include "hqm_main.h" + +int hqm_block_on_cq_interrupt(struct hqm_dev *dev, + struct hqm_status *status, + int domain_id, + int port_id, + bool is_ldb, + u64 cq_va, + u8 cq_gen, + bool arm); + +enum hqm_wake_reason { + WAKE_CQ_INTR, + WAKE_DEV_RESET, + WAKE_PORT_DISABLED +}; + +void hqm_wake_thread(struct hqm_dev *dev, + struct hqm_cq_intr *intr, + enum hqm_wake_reason reason); + +#endif /* __HQM_INTR_H */ diff --git a/drivers/misc/hqm/hqm_ioctl.c b/drivers/misc/hqm/hqm_ioctl.c new file mode 100644 index 00000000000000..2fd8ee7ab39a37 --- /dev/null +++ b/drivers/misc/hqm/hqm_ioctl.c @@ -0,0 +1,1061 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2017-2019 Intel Corporation */ + +#include +#include +#include +#include +#include + +#include "hqm_ioctl.h" +#include "hqm_main.h" +#include "hqm_mem.h" +#include "hqm_intr.h" +#include +#include "hqm_resource.h" + +typedef int (*hqm_domain_ioctl_callback_fn_t)(struct hqm_dev *dev, + struct hqm_status *status, + unsigned long arg, + u32 domain_id); + +/* The HQM domain ioctl callback template minimizes replication of boilerplate + * code to copy arguments, acquire and release the resource lock, and execute + * the command. The arguments and response structure name should have the + * format hqm__args. + */ +#define HQM_DOMAIN_IOCTL_CALLBACK_TEMPLATE(lower_name) \ +static int hqm_domain_ioctl_##lower_name(struct hqm_dev *dev, \ + struct hqm_status *status, \ + unsigned long user_arg, \ + u32 domain_id) \ +{ \ + struct hqm_##lower_name##_args arg; \ + struct hqm_cmd_response response = {0}; \ + int ret; \ + response.status = 0; \ + \ + HQM_INFO(dev->hqm_device, "Entering %s()\n", __func__); \ + \ + if (copy_from_user(&arg, \ + (void __user *)user_arg, \ + sizeof(struct hqm_##lower_name##_args))) {\ + HQM_ERR(dev->hqm_device, \ + "[%s()] Invalid ioctl argument pointer\n", \ + __func__); \ + return -EFAULT; \ + } \ + \ + mutex_lock(&dev->resource_mutex); \ + \ + ret = dev->ops->lower_name(&dev->hw, \ + domain_id, \ + &arg, \ + &response); \ + \ + mutex_unlock(&dev->resource_mutex); \ + \ + if (copy_to_user((void __user *)arg.response, \ + &response, \ + sizeof(struct hqm_cmd_response))) { \ + HQM_ERR(dev->hqm_device, \ + "[%s()] Invalid ioctl response pointer\n", \ + __func__); \ + return -EFAULT; \ + } \ + \ + HQM_INFO(dev->hqm_device, "Exiting %s()\n", __func__); \ + \ + return ret; \ +} + +HQM_DOMAIN_IOCTL_CALLBACK_TEMPLATE(create_ldb_pool) +HQM_DOMAIN_IOCTL_CALLBACK_TEMPLATE(create_dir_pool) +HQM_DOMAIN_IOCTL_CALLBACK_TEMPLATE(create_ldb_queue) +HQM_DOMAIN_IOCTL_CALLBACK_TEMPLATE(create_dir_queue) +HQM_DOMAIN_IOCTL_CALLBACK_TEMPLATE(start_domain) +HQM_DOMAIN_IOCTL_CALLBACK_TEMPLATE(map_qid) +HQM_DOMAIN_IOCTL_CALLBACK_TEMPLATE(unmap_qid) +HQM_DOMAIN_IOCTL_CALLBACK_TEMPLATE(get_ldb_queue_depth) +HQM_DOMAIN_IOCTL_CALLBACK_TEMPLATE(get_dir_queue_depth) +HQM_DOMAIN_IOCTL_CALLBACK_TEMPLATE(pending_port_unmaps) + +/* Port enable/disable ioctls don't use the callback template macro because + * they have additional CQ interrupt management logic. + */ +static int hqm_domain_ioctl_enable_ldb_port(struct hqm_dev *dev, + struct hqm_status *status, + unsigned long user_arg, + u32 domain_id) +{ + struct hqm_enable_ldb_port_args arg; + struct hqm_cmd_response response = {0}; + int ret; + + response.status = 0; + + HQM_INFO(dev->hqm_device, "Entering %s()\n", __func__); + + if (copy_from_user(&arg, + (void __user *)user_arg, + sizeof(struct hqm_enable_ldb_port_args))) { + HQM_ERR(dev->hqm_device, + "[%s()] Invalid ioctl argument pointer\n", + __func__); + return -EFAULT; + } + + mutex_lock(&dev->resource_mutex); + + ret = dev->ops->enable_ldb_port(&dev->hw, domain_id, &arg, &response); + + /* Allow threads to block on this port's CQ interrupt */ + if (!ret) + WRITE_ONCE(dev->intr.ldb_cq_intr[arg.port_id].disabled, false); + + mutex_unlock(&dev->resource_mutex); + + if (copy_to_user((void __user *)arg.response, + &response, + sizeof(struct hqm_cmd_response))) { + HQM_ERR(dev->hqm_device, + "[%s()] Invalid ioctl response pointer\n", + __func__); + return -EFAULT; + } + + HQM_INFO(dev->hqm_device, "Exiting %s()\n", __func__); + + return ret; +} + +static int hqm_domain_ioctl_enable_dir_port(struct hqm_dev *dev, + struct hqm_status *status, + unsigned long user_arg, + u32 domain_id) +{ + struct hqm_enable_dir_port_args arg; + struct hqm_cmd_response response = {0}; + int ret; + + response.status = 0; + + HQM_INFO(dev->hqm_device, "Entering %s()\n", __func__); + + if (copy_from_user(&arg, + (void __user *)user_arg, + sizeof(struct hqm_enable_dir_port_args))) { + HQM_ERR(dev->hqm_device, + "[%s()] Invalid ioctl argument pointer\n", + __func__); + return -EFAULT; + } + + mutex_lock(&dev->resource_mutex); + + ret = dev->ops->enable_dir_port(&dev->hw, domain_id, &arg, &response); + + /* Allow threads to block on this port's CQ interrupt */ + if (!ret) + WRITE_ONCE(dev->intr.dir_cq_intr[arg.port_id].disabled, false); + + mutex_unlock(&dev->resource_mutex); + + if (copy_to_user((void __user *)arg.response, + &response, + sizeof(struct hqm_cmd_response))) { + HQM_ERR(dev->hqm_device, + "[%s()] Invalid ioctl response pointer\n", + __func__); + return -EFAULT; + } + + HQM_INFO(dev->hqm_device, "Exiting %s()\n", __func__); + + return ret; +} + +static int hqm_domain_ioctl_disable_ldb_port(struct hqm_dev *dev, + struct hqm_status *status, + unsigned long user_arg, + u32 domain_id) +{ + struct hqm_disable_ldb_port_args arg; + struct hqm_cmd_response response = {0}; + int ret; + + response.status = 0; + + HQM_INFO(dev->hqm_device, "Entering %s()\n", __func__); + + if (copy_from_user(&arg, + (void __user *)user_arg, + sizeof(struct hqm_disable_ldb_port_args))) { + HQM_ERR(dev->hqm_device, + "[%s()] Invalid ioctl argument pointer\n", + __func__); + return -EFAULT; + } + + mutex_lock(&dev->resource_mutex); + + ret = dev->ops->disable_ldb_port(&dev->hw, domain_id, &arg, &response); + + /* Wake threads blocked on this port's CQ interrupt, and prevent + * subsequent attempts to block on it. + */ + if (!ret) + hqm_wake_thread(dev, + &dev->intr.ldb_cq_intr[arg.port_id], + WAKE_PORT_DISABLED); + + mutex_unlock(&dev->resource_mutex); + + if (copy_to_user((void __user *)arg.response, + &response, + sizeof(struct hqm_cmd_response))) { + HQM_ERR(dev->hqm_device, + "[%s()] Invalid ioctl response pointer\n", + __func__); + return -EFAULT; + } + + HQM_INFO(dev->hqm_device, "Exiting %s()\n", __func__); + + return ret; +} + +static int hqm_domain_ioctl_disable_dir_port(struct hqm_dev *dev, + struct hqm_status *status, + unsigned long user_arg, + u32 domain_id) +{ + struct hqm_disable_dir_port_args arg; + struct hqm_cmd_response response = {0}; + int ret; + + response.status = 0; + + HQM_INFO(dev->hqm_device, "Entering %s()\n", __func__); + + if (copy_from_user(&arg, + (void __user *)user_arg, + sizeof(struct hqm_disable_dir_port_args))) { + HQM_ERR(dev->hqm_device, + "[%s()] Invalid ioctl argument pointer\n", + __func__); + return -EFAULT; + } + + mutex_lock(&dev->resource_mutex); + + ret = dev->ops->disable_dir_port(&dev->hw, domain_id, &arg, &response); + + /* Wake threads blocked on this port's CQ interrupt, and prevent + * subsequent attempts to block on it. + */ + if (!ret) + hqm_wake_thread(dev, + &dev->intr.dir_cq_intr[arg.port_id], + WAKE_PORT_DISABLED); + + mutex_unlock(&dev->resource_mutex); + + if (copy_to_user((void __user *)arg.response, + &response, + sizeof(struct hqm_cmd_response))) { + HQM_ERR(dev->hqm_device, + "[%s()] Invalid ioctl response pointer\n", + __func__); + return -EFAULT; + } + + HQM_INFO(dev->hqm_device, "Exiting %s()\n", __func__); + + return ret; +} + +/* Port creation ioctls don't use the callback template macro because they have + * a number of OS-dependent memory operations. + */ +static int hqm_domain_ioctl_create_ldb_port(struct hqm_dev *dev, + struct hqm_status *status, + unsigned long user_arg, + u32 domain_id) +{ + struct hqm_create_ldb_port_args arg; + struct hqm_cmd_response response; + struct hqm_domain_dev *domain; + dma_addr_t pc_dma_base = 0; + dma_addr_t cq_dma_base = 0; + void *pc_base = NULL; + void *cq_base = NULL; + int ret; + + HQM_INFO(dev->hqm_device, "Entering %s()\n", __func__); + + response.status = 0; + + if (copy_from_user(&arg, (void __user *)user_arg, sizeof(arg))) { + HQM_ERR(dev->hqm_device, + "[%s()] Invalid ioctl argument pointer\n", __func__); + return -EFAULT; + } + + mutex_lock(&dev->resource_mutex); + + if (domain_id >= HQM_MAX_NUM_DOMAINS) { + response.status = HQM_ST_INVALID_DOMAIN_ID; + ret = -EPERM; + goto unlock; + } + + domain = &dev->sched_domains[domain_id]; + + cq_base = dma_alloc_coherent(&dev->pdev->dev, + HQM_LDB_CQ_MAX_SIZE, + &cq_dma_base, + GFP_KERNEL); + if (!cq_base) { + response.status = HQM_ST_NO_MEMORY; + ret = -ENOMEM; + goto unlock; + } + + pc_base = dma_alloc_coherent(&dev->pdev->dev, + PAGE_SIZE, + &pc_dma_base, + GFP_KERNEL); + if (!pc_base) { + response.status = HQM_ST_NO_MEMORY; + ret = -ENOMEM; + goto unlock; + } + + ret = dev->ops->create_ldb_port(&dev->hw, + domain_id, + &arg, + (uintptr_t)pc_dma_base, + (uintptr_t)cq_dma_base, + &response); + if (ret) + goto unlock; + + /* HQM A-stepping workaround for hardware write buffer lock up issue: + * limit the maximum configured ports to < 128 and disable CQ occupancy + * interrupts. + */ + if (dev->revision >= HQM_REV_B0) { + u16 threshold = arg.cq_depth_threshold; + + ret = dev->ops->enable_ldb_cq_interrupts(dev, + response.id, + threshold); + if (ret) + /* Internal error, don't unwind port creation */ + goto unlock; + } + + /* Fill out the per-port memory tracking structure */ + dev->ldb_port_mem[response.id].domain_id = domain_id; + dev->ldb_port_mem[response.id].cq_base = cq_base; + dev->ldb_port_mem[response.id].pc_base = pc_base; + dev->ldb_port_mem[response.id].cq_dma_base = cq_dma_base; + dev->ldb_port_mem[response.id].pc_dma_base = pc_dma_base; + dev->ldb_port_mem[response.id].valid = true; + +unlock: + if (ret) { + HQM_ERR(dev->hqm_device, "[%s()]: Error %s\n", + __func__, hqm_error_strings[response.status]); + + if (cq_dma_base) + dma_free_coherent(&dev->pdev->dev, + HQM_LDB_CQ_MAX_SIZE, + cq_base, + cq_dma_base); + if (pc_dma_base) + dma_free_coherent(&dev->pdev->dev, + PAGE_SIZE, + pc_base, + pc_dma_base); + } else { + HQM_INFO(dev->hqm_device, "CQ PA: 0x%llx\n", + virt_to_phys(cq_base)); + HQM_INFO(dev->hqm_device, "CQ IOVA: 0x%llx\n", cq_dma_base); + } + + mutex_unlock(&dev->resource_mutex); + + if (copy_to_user((void __user *)arg.response, + &response, + sizeof(struct hqm_cmd_response))) { + HQM_ERR(dev->hqm_device, + "[%s()] Invalid ioctl response pointer\n", __func__); + return -EFAULT; + } + + HQM_INFO(dev->hqm_device, "Exiting %s()\n", __func__); + + return ret; +} + +static int hqm_domain_ioctl_create_dir_port(struct hqm_dev *dev, + struct hqm_status *status, + unsigned long user_arg, + u32 domain_id) +{ + struct hqm_create_dir_port_args arg; + struct hqm_cmd_response response; + struct hqm_domain_dev *domain; + dma_addr_t pc_dma_base = 0; + dma_addr_t cq_dma_base = 0; + void *pc_base = NULL; + void *cq_base = NULL; + int ret; + + HQM_INFO(dev->hqm_device, "Entering %s()\n", __func__); + + response.status = 0; + + if (copy_from_user(&arg, (void __user *)user_arg, sizeof(arg))) { + HQM_ERR(dev->hqm_device, + "[%s()] Invalid ioctl argument pointer\n", __func__); + return -EFAULT; + } + + mutex_lock(&dev->resource_mutex); + + if (domain_id >= HQM_MAX_NUM_DOMAINS) { + response.status = HQM_ST_INVALID_DOMAIN_ID; + ret = -EPERM; + goto unlock; + } + + domain = &dev->sched_domains[domain_id]; + + cq_base = dma_alloc_coherent(&dev->pdev->dev, + HQM_DIR_CQ_MAX_SIZE, + &cq_dma_base, + GFP_KERNEL); + if (!cq_base) { + response.status = HQM_ST_NO_MEMORY; + ret = -ENOMEM; + goto unlock; + } + + pc_base = dma_alloc_coherent(&dev->pdev->dev, + PAGE_SIZE, + &pc_dma_base, + GFP_KERNEL); + if (!pc_base) { + response.status = HQM_ST_NO_MEMORY; + ret = -ENOMEM; + goto unlock; + } + + ret = dev->ops->create_dir_port(&dev->hw, + domain_id, + &arg, + (uintptr_t)pc_dma_base, + (uintptr_t)cq_dma_base, + &response); + if (ret) + goto unlock; + + /* HQM A-stepping workaround for hardware write buffer lock up issue: + * limit the maximum configured ports to < 128 and disable CQ occupancy + * interrupts. + */ + if (dev->revision >= HQM_REV_B0) { + u16 threshold = arg.cq_depth_threshold; + + ret = dev->ops->enable_dir_cq_interrupts(dev, + response.id, + threshold); + if (ret) + /* Internal error, don't unwind port creation */ + goto unlock; + } + + /* Fill out the per-port memory tracking structure */ + dev->dir_port_mem[response.id].domain_id = domain_id; + dev->dir_port_mem[response.id].cq_base = cq_base; + dev->dir_port_mem[response.id].pc_base = pc_base; + dev->dir_port_mem[response.id].cq_dma_base = cq_dma_base; + dev->dir_port_mem[response.id].pc_dma_base = pc_dma_base; + dev->dir_port_mem[response.id].valid = true; + +unlock: + if (ret) { + HQM_ERR(dev->hqm_device, "[%s()]: Error %s\n", + __func__, hqm_error_strings[response.status]); + + if (cq_dma_base) + dma_free_coherent(&dev->pdev->dev, + HQM_DIR_CQ_MAX_SIZE, + cq_base, + cq_dma_base); + if (pc_dma_base) + dma_free_coherent(&dev->pdev->dev, + PAGE_SIZE, + pc_base, + pc_dma_base); + } else { + HQM_INFO(dev->hqm_device, "CQ PA: 0x%llx\n", + virt_to_phys(cq_base)); + HQM_INFO(dev->hqm_device, "CQ IOVA: 0x%llx\n", cq_dma_base); + } + + mutex_unlock(&dev->resource_mutex); + + if (copy_to_user((void __user *)arg.response, + &response, + sizeof(struct hqm_cmd_response))) { + HQM_ERR(dev->hqm_device, + "[%s()] Invalid ioctl response pointer\n", __func__); + return -EFAULT; + } + + HQM_INFO(dev->hqm_device, "Exiting %s()\n", __func__); + + return ret; +} + +static int hqm_domain_ioctl_block_on_cq_interrupt(struct hqm_dev *dev, + struct hqm_status *status, + unsigned long user_arg, + u32 domain_id) +{ + struct hqm_block_on_cq_interrupt_args arg; + struct hqm_cmd_response response; + int ret = 0; + + response.status = 0; + + HQM_INFO(dev->hqm_device, "Entering %s()\n", __func__); + + if (copy_from_user(&arg, + (void __user *)user_arg, + sizeof(struct hqm_block_on_cq_interrupt_args))) { + HQM_ERR(dev->hqm_device, + "[%s()] Invalid ioctl argument pointer\n", __func__); + return -EFAULT; + } + + ret = hqm_block_on_cq_interrupt(dev, + status, + domain_id, + arg.port_id, + arg.is_ldb, + arg.cq_va, + arg.cq_gen, + arg.arm); + + if (copy_to_user((void __user *)arg.response, + &response, + sizeof(struct hqm_cmd_response))) { + HQM_ERR(dev->hqm_device, + "[%s()] Invalid ioctl response pointer\n", __func__); + return -EFAULT; + } + + HQM_INFO(dev->hqm_device, "Exiting %s()\n", __func__); + + return ret; +} + +static int hqm_domain_ioctl_enqueue_domain_alert(struct hqm_dev *dev, + struct hqm_status *status, + unsigned long user_arg, + u32 domain_id) +{ + struct hqm_enqueue_domain_alert_args arg; + struct hqm_domain_dev *domain; + struct hqm_domain_alert alert; + + HQM_INFO(dev->hqm_device, "Entering %s()\n", __func__); + + if (copy_from_user(&arg, + (void __user *)user_arg, + sizeof(struct hqm_enqueue_domain_alert_args))) { + HQM_ERR(dev->hqm_device, + "[%s()] Invalid ioctl argument pointer\n", __func__); + return -EFAULT; + } + + domain = &dev->sched_domains[domain_id]; + + /* Grab the alert mutex to access the read and write indexes */ + if (mutex_lock_interruptible(&domain->alert_mutex)) + return -ERESTARTSYS; + + /* If there's no space for this notification, return */ + if ((domain->alert_wr_idx - domain->alert_rd_idx) == + (HQM_DOMAIN_ALERT_RING_SIZE - 1)) { + mutex_unlock(&domain->alert_mutex); + return 0; + } + + alert.alert_id = HQM_DOMAIN_ALERT_USER; + alert.aux_alert_data = arg.aux_alert_data; + + domain->alerts[domain->alert_wr_idx++] = alert; + + mutex_unlock(&domain->alert_mutex); + + wake_up_interruptible(&domain->wq_head); + + HQM_INFO(dev->hqm_device, "Exiting %s()\n", __func__); + + return 0; +} + +static hqm_domain_ioctl_callback_fn_t +hqm_domain_ioctl_callback_fns[NUM_HQM_DOMAIN_CMD] = { + hqm_domain_ioctl_create_ldb_pool, + hqm_domain_ioctl_create_dir_pool, + hqm_domain_ioctl_create_ldb_queue, + hqm_domain_ioctl_create_dir_queue, + hqm_domain_ioctl_create_ldb_port, + hqm_domain_ioctl_create_dir_port, + hqm_domain_ioctl_start_domain, + hqm_domain_ioctl_map_qid, + hqm_domain_ioctl_unmap_qid, + hqm_domain_ioctl_enable_ldb_port, + hqm_domain_ioctl_enable_dir_port, + hqm_domain_ioctl_disable_ldb_port, + hqm_domain_ioctl_disable_dir_port, + hqm_domain_ioctl_block_on_cq_interrupt, + hqm_domain_ioctl_enqueue_domain_alert, + hqm_domain_ioctl_get_ldb_queue_depth, + hqm_domain_ioctl_get_dir_queue_depth, + hqm_domain_ioctl_pending_port_unmaps, +}; + +int hqm_domain_ioctl_dispatcher(struct hqm_dev *dev, + struct hqm_status *st, + unsigned int cmd, + unsigned long arg, + u32 id) +{ + if (_IOC_NR(cmd) >= NUM_HQM_DOMAIN_CMD) { + HQM_ERR(dev->hqm_device, + "[%s()] Unexpected HQM DOMAIN command %d\n", + __func__, _IOC_NR(cmd)); + return -1; + } + + return hqm_domain_ioctl_callback_fns[_IOC_NR(cmd)](dev, st, arg, id); +} + +typedef int (*hqm_ioctl_callback_fn_t)(struct hqm_dev *dev, unsigned long arg); + +/* [7:0]: device revision, [15:8]: device version */ +#define HQM_SET_DEVICE_VERSION(ver, rev) (((ver) << 8) | (rev)) + +static int hqm_ioctl_get_device_version(struct hqm_dev *dev, + unsigned long user_arg) +{ + struct hqm_get_device_version_args arg; + struct hqm_cmd_response response; + + HQM_INFO(dev->hqm_device, "Entering %s()\n", __func__); + + response.status = 0; + response.id = HQM_SET_DEVICE_VERSION(1, dev->revision); + + if (copy_from_user(&arg, (void __user *)user_arg, sizeof(arg))) { + HQM_ERR(dev->hqm_device, + "[%s()] Invalid ioctl argument pointer\n", __func__); + return -EFAULT; + } + + if (copy_to_user((void __user *)arg.response, + &response, + sizeof(struct hqm_cmd_response))) { + HQM_ERR(dev->hqm_device, + "[%s()] Invalid ioctl response pointer\n", __func__); + return -EFAULT; + } + + HQM_INFO(dev->hqm_device, "Exiting %s()\n", __func__); + + return 0; +} + +static int hqm_ioctl_create_sched_domain(struct hqm_dev *dev, + unsigned long user_arg) +{ + struct hqm_create_sched_domain_args arg; + struct hqm_cmd_response response; + int ret; + + response.status = 0; + + HQM_INFO(dev->hqm_device, "Entering %s()\n", __func__); + + if (copy_from_user(&arg, + (void __user *)user_arg, + sizeof(struct hqm_create_sched_domain_args))) { + HQM_ERR(dev->hqm_device, + "[%s()] Invalid ioctl argument pointer\n", __func__); + return -EFAULT; + } + + mutex_lock(&dev->resource_mutex); + + if (dev->domain_reset_failed) { + response.status = HQM_ST_DOMAIN_RESET_FAILED; + ret = -EINVAL; + goto unlock; + } + + ret = dev->ops->create_sched_domain(&dev->hw, &arg, &response); + if (ret) + goto unlock; + + ret = hqm_add_domain_device_file(dev, response.id); + if (ret) + goto unlock; + +unlock: + mutex_unlock(&dev->resource_mutex); + + if (copy_to_user((void __user *)arg.response, + &response, + sizeof(struct hqm_cmd_response))) { + HQM_ERR(dev->hqm_device, + "[%s()] Invalid ioctl response pointer\n", __func__); + return -EFAULT; + } + + HQM_INFO(dev->hqm_device, "Exiting %s()\n", __func__); + + return ret; +} + +static int hqm_ioctl_get_num_resources(struct hqm_dev *dev, + unsigned long user_arg) +{ + struct hqm_get_num_resources_args arg; + int ret; + + HQM_INFO(dev->hqm_device, "Entering %s()\n", __func__); + + mutex_lock(&dev->resource_mutex); + + ret = dev->ops->get_num_resources(&dev->hw, &arg); + + mutex_unlock(&dev->resource_mutex); + + if (copy_to_user((void __user *)user_arg, + &arg, + sizeof(struct hqm_get_num_resources_args))) { + HQM_ERR(dev->hqm_device, "Invalid HQM resources pointer\n"); + return -EFAULT; + } + + HQM_INFO(dev->hqm_device, "Exiting %s()\n", __func__); + + return ret; +} + +static int hqm_ioctl_get_driver_version(struct hqm_dev *dev, + unsigned long user_arg) +{ + struct hqm_get_driver_version_args arg; + struct hqm_cmd_response response; + + HQM_INFO(dev->hqm_device, "Entering %s()\n", __func__); + + response.status = 0; + response.id = HQM_VERSION; + + if (copy_from_user(&arg, (void __user *)user_arg, sizeof(arg))) { + HQM_ERR(dev->hqm_device, + "[%s()] Invalid ioctl argument pointer\n", __func__); + return -EFAULT; + } + + if (copy_to_user((void __user *)arg.response, + &response, + sizeof(struct hqm_cmd_response))) { + HQM_ERR(dev->hqm_device, + "[%s()] Invalid ioctl response pointer\n", __func__); + return -EFAULT; + } + + HQM_INFO(dev->hqm_device, "Exiting %s()\n", __func__); + + return 0; +} + +static int hqm_ioctl_sample_perf_counters(struct hqm_dev *dev, + unsigned long user_arg) +{ + struct hqm_sample_perf_counters_args arg; + struct hqm_cmd_response response; + int ret = 0; + + if (copy_from_user(&arg, (void __user *)user_arg, sizeof(arg))) { + pr_err("Invalid ioctl argument pointer\n"); + return -EFAULT; + } + + response.status = 0; + + if (arg.measurement_duration_us == 0 || + arg.measurement_duration_us > 60000000) { + response.status = HQM_ST_INVALID_MEASUREMENT_DURATION; + ret = -EINVAL; + goto done; + } + + if (arg.perf_metric_group_id > 10) { + response.status = HQM_ST_INVALID_PERF_METRIC_GROUP_ID; + ret = -EINVAL; + goto done; + } + + ret = dev->ops->measure_perf(dev, &arg, &response); + +done: + if (copy_to_user((void __user *)arg.response, + &response, + sizeof(struct hqm_cmd_response))) { + pr_err("Invalid ioctl response pointer\n"); + return -EFAULT; + } + + return ret; +} + +static int hqm_ioctl_set_sn_allocation(struct hqm_dev *dev, + unsigned long user_arg) +{ + struct hqm_set_sn_allocation_args arg; + struct hqm_cmd_response response; + unsigned int group; + unsigned long num; + int ret; + + HQM_INFO(dev->hqm_device, "Entering %s()\n", __func__); + + if (copy_from_user(&arg, + (void __user *)user_arg, + sizeof(struct hqm_set_sn_allocation_args))) { + HQM_ERR(dev->hqm_device, + "[%s()] Invalid ioctl argument pointer\n", + __func__); + return -EFAULT; + } + + response.status = 0; + + mutex_lock(&dev->resource_mutex); + + group = arg.group; + num = arg.num; + + /* Only the PF can modify the SN allocations */ + if (dev->type == HQM_1_PF) + ret = hqm_set_group_sequence_numbers(&dev->hw, group, num); + else + ret = -EPERM; + + mutex_unlock(&dev->resource_mutex); + + if (copy_to_user((void __user *)arg.response, + &response, + sizeof(struct hqm_cmd_response))) { + HQM_ERR(dev->hqm_device, + "[%s()] Invalid ioctl response pointer\n", + __func__); + return -EFAULT; + } + + HQM_INFO(dev->hqm_device, "Exiting %s()\n", __func__); + + return ret; +} + +static int hqm_ioctl_get_sn_allocation(struct hqm_dev *dev, + unsigned long user_arg) +{ + struct hqm_get_sn_allocation_args arg; + struct hqm_cmd_response response; + int ret; + + HQM_INFO(dev->hqm_device, "Entering %s()\n", __func__); + + if (copy_from_user(&arg, + (void __user *)user_arg, + sizeof(struct hqm_get_sn_allocation_args))) { + HQM_ERR(dev->hqm_device, + "[%s()] Invalid ioctl argument pointer\n", + __func__); + return -EFAULT; + } + + response.status = 0; + + mutex_lock(&dev->resource_mutex); + + ret = dev->ops->get_sn_allocation(&dev->hw, arg.group); + + response.id = ret; + + ret = (ret > 0) ? 0 : ret; + + mutex_unlock(&dev->resource_mutex); + + if (copy_to_user((void __user *)arg.response, + &response, + sizeof(struct hqm_cmd_response))) { + HQM_ERR(dev->hqm_device, + "[%s()] Invalid ioctl response pointer\n", + __func__); + return -EFAULT; + } + + HQM_INFO(dev->hqm_device, "Exiting %s()\n", __func__); + + return ret; +} + +static int hqm_ioctl_measure_sched_count(struct hqm_dev *dev, + unsigned long user_arg) +{ + struct hqm_measure_sched_count_args arg; + struct hqm_cmd_response response; + int ret = 0; + + if (copy_from_user(&arg, (void __user *)user_arg, sizeof(arg))) { + pr_err("Invalid ioctl argument pointer\n"); + return -EFAULT; + } + + response.status = 0; + + if (arg.measurement_duration_us == 0 || + arg.measurement_duration_us > 60000000) { + response.status = HQM_ST_INVALID_MEASUREMENT_DURATION; + ret = -EINVAL; + goto done; + } + + ret = dev->ops->measure_sched_count(dev, &arg, &response); + +done: + if (copy_to_user((void __user *)arg.response, + &response, + sizeof(struct hqm_cmd_response))) { + pr_err("Invalid ioctl response pointer\n"); + return -EFAULT; + } + + return ret; +} + +static int hqm_ioctl_query_cq_poll_mode(struct hqm_dev *dev, + unsigned long user_arg) +{ + struct hqm_query_cq_poll_mode_args arg; + struct hqm_cmd_response response; + int ret; + + if (copy_from_user(&arg, + (void __user *)user_arg, + sizeof(struct hqm_query_cq_poll_mode_args))) { + HQM_ERR(dev->hqm_device, + "[%s()] Invalid ioctl argument pointer\n", + __func__); + return -EFAULT; + } + + mutex_lock(&dev->resource_mutex); + + ret = dev->ops->query_cq_poll_mode(dev, &response); + + mutex_unlock(&dev->resource_mutex); + + if (copy_to_user((void __user *)arg.response, + &response, + sizeof(struct hqm_cmd_response))) { + pr_err("Invalid ioctl response pointer\n"); + return -EFAULT; + } + + return ret; +} + +static int hqm_ioctl_get_sn_occupancy(struct hqm_dev *dev, + unsigned long user_arg) +{ + struct hqm_get_sn_occupancy_args arg; + struct hqm_cmd_response response; + int ret; + + HQM_INFO(dev->hqm_device, "Entering %s()\n", __func__); + + if (copy_from_user(&arg, + (void __user *)user_arg, + sizeof(struct hqm_get_sn_occupancy_args))) { + HQM_ERR(dev->hqm_device, + "[%s()] Invalid ioctl argument pointer\n", + __func__); + return -EFAULT; + } + + response.status = 0; + + mutex_lock(&dev->resource_mutex); + + ret = dev->ops->get_sn_occupancy(&dev->hw, arg.group); + + response.id = ret; + + ret = (ret > 0) ? 0 : ret; + + mutex_unlock(&dev->resource_mutex); + + if (copy_to_user((void __user *)arg.response, + &response, + sizeof(struct hqm_cmd_response))) { + HQM_ERR(dev->hqm_device, + "[%s()] Invalid ioctl response pointer\n", + __func__); + return -EFAULT; + } + + HQM_INFO(dev->hqm_device, "Exiting %s()\n", __func__); + + return ret; +} + +static hqm_ioctl_callback_fn_t hqm_ioctl_callback_fns[NUM_HQM_CMD] = { + hqm_ioctl_get_device_version, + hqm_ioctl_create_sched_domain, + hqm_ioctl_get_num_resources, + hqm_ioctl_get_driver_version, + hqm_ioctl_sample_perf_counters, + hqm_ioctl_set_sn_allocation, + hqm_ioctl_get_sn_allocation, + hqm_ioctl_measure_sched_count, + hqm_ioctl_query_cq_poll_mode, + hqm_ioctl_get_sn_occupancy, +}; + +int hqm_ioctl_dispatcher(struct hqm_dev *dev, + unsigned int cmd, + unsigned long arg) +{ + if (_IOC_NR(cmd) >= NUM_HQM_CMD) { + HQM_ERR(dev->hqm_device, "[%s()] Unexpected HQM command %d\n", + __func__, _IOC_NR(cmd)); + return -1; + } + + return hqm_ioctl_callback_fns[_IOC_NR(cmd)](dev, arg); +} diff --git a/drivers/misc/hqm/hqm_ioctl.h b/drivers/misc/hqm/hqm_ioctl.h new file mode 100644 index 00000000000000..d844978d84449d --- /dev/null +++ b/drivers/misc/hqm/hqm_ioctl.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * Copyright(c) 2017-2019 Intel Corporation + */ + +#ifndef __HQM_IOCTL_H +#define __HQM_IOCTL_H + +#include "hqm_main.h" + +int hqm_ioctl_dispatcher(struct hqm_dev *dev, + unsigned int cmd, + unsigned long arg); + +int hqm_domain_ioctl_dispatcher(struct hqm_dev *dev, + struct hqm_status *status, + unsigned int cmd, + unsigned long arg, + uint32_t domain_id); + +#endif /* __HQM_IOCTL_H */ diff --git a/drivers/misc/hqm/hqm_main.c b/drivers/misc/hqm/hqm_main.c index 9c97b79526e360..88e553b66d1efd 100644 --- a/drivers/misc/hqm/hqm_main.c +++ b/drivers/misc/hqm/hqm_main.c @@ -9,13 +9,21 @@ #include #include #include +#include #include #include #include #include #include +#include "hqm_resource.h" +#include "hqm_mbox.h" +#include "hqm_ioctl.h" +#include "hqm_mem.h" +#include "hqm_intr.h" #include "hqm_main.h" +#include + #define TO_STR2(s) #s #define TO_STR(s) TO_STR2(s) @@ -36,6 +44,10 @@ hqm_driver_copyright[] = "Copyright(c) 2017-2019 Intel Corporation"; unsigned int hqm_log_level = HQM_LOG_LEVEL_ERR; module_param_named(log_level, hqm_log_level, uint, 0644); MODULE_PARM_DESC(log_level, "Driver log level (0: None, 1: Error, 2: Info)"); +static unsigned int hqm_reset_timeout_s = HQM_DEFAULT_RESET_TIMEOUT_S; +module_param_named(reset_timeout_s, hqm_reset_timeout_s, uint, 0644); +MODULE_PARM_DESC(reset_timeout_s, + "Wait time (in seconds) after reset is requested given for app shutdown until driver zaps VMAs"); /* The driver lock protects driver data structures that may be used by multiple * devices. @@ -50,22 +62,435 @@ static dev_t hqm_dev_number_base; /****** Devfs callbacks ******/ /*****************************/ +static int hqm_open_domain_device_file(struct hqm_dev *dev, struct file *f) +{ + struct hqm_domain_dev *domain; + u32 domain_id; + + domain_id = HQM_FILE_ID_FROM_DEV_T(hqm_dev_number_base, + f->f_inode->i_rdev); + + if (domain_id >= HQM_MAX_NUM_DOMAINS) { + HQM_ERR(dev->hqm_device, + "[%s()] Internal error\n", __func__); + return -EINVAL; + } + + HQM_INFO(dev->hqm_device, + "Opening domain %d's device file\n", domain_id); + + domain = &dev->sched_domains[domain_id]; + + /* Race condition: thread A opens a domain file just after + * thread B does the last close and resets the domain. + */ + if (!domain->status) + return -ENOENT; + + f->private_data = domain->status; + + domain->status->refcnt++; + + return 0; +} + +static int hqm_open_device_file(struct hqm_dev *dev, struct file *f) +{ + struct hqm_status *status; + + HQM_INFO(dev->hqm_device, "Opening HQM device file\n"); + + status = dev->status; + + if (!status) { + status = devm_kzalloc(dev->hqm_device, + sizeof(*status), + GFP_KERNEL); + if (!status) + return -ENOMEM; + + status->valid = true; + status->refcnt = 0; + + dev->status = status; + } + + f->private_data = status; + + status->refcnt++; + + return 0; +} + static int hqm_open(struct inode *i, struct file *f) { + struct hqm_dev *dev; + int ret = 0; + + dev = container_of(f->f_inode->i_cdev, struct hqm_dev, cdev); + + mutex_lock(&dev->resource_mutex); + + /* See hqm_reset_notify() for more details */ + if (dev->reset_active) { + HQM_ERR(dev->hqm_device, + "[%s()] The HQM is being reset; applications cannot use it during this time.\n", + __func__); + ret = -EINVAL; + goto end; + } + + if (!IS_HQM_DEV_FILE(hqm_dev_number_base, f->f_inode->i_rdev)) + ret = hqm_open_domain_device_file(dev, f); + else + ret = hqm_open_device_file(dev, f); + if (ret) + goto end; + + dev->ops->inc_pm_refcnt(dev->pdev, true); + +end: + mutex_unlock(&dev->resource_mutex); + + return ret; +} + +int hqm_add_domain_device_file(struct hqm_dev *hqm_dev, u32 domain_id) +{ + struct hqm_status *status; + struct device *dev; + + HQM_INFO(hqm_dev->hqm_device, + "Creating domain %d's device file\n", domain_id); + + status = devm_kzalloc(hqm_dev->hqm_device, sizeof(*status), GFP_KERNEL); + if (!status) + return -ENOMEM; + + status->valid = true; + status->refcnt = 0; + + /* Create a new device in order to create a /dev/ domain node. This + * device is a child of the HQM PCI device. + */ + dev = device_create(hqm_class, + hqm_dev->hqm_device->parent, + MKDEV(MAJOR(hqm_dev->dev_number), + MINOR(hqm_dev->dev_number) + domain_id), + hqm_dev, + "hqm%d/domain%d", + HQM_DEV_ID_FROM_DEV_T(hqm_dev_number_base, + hqm_dev->dev_number), + domain_id); + + if (IS_ERR_VALUE(PTR_ERR(dev))) { + HQM_ERR(hqm_dev->hqm_device, + "%s: device_create() returned %ld\n", + hqm_driver_name, PTR_ERR(dev)); + + devm_kfree(hqm_dev->hqm_device, status); + return PTR_ERR(dev); + } + + hqm_dev->sched_domains[domain_id].status = status; + + return 0; +} + +static int hqm_reset_device(struct pci_dev *pdev) +{ + int ret; + + ret = pci_save_state(pdev); + if (ret) + return ret; + + ret = __pci_reset_function_locked(pdev); + if (ret) + return ret; + + pci_restore_state(pdev); + return 0; } +static void hqm_reset_hardware_state(struct hqm_dev *dev, bool issue_flr) +{ + if (issue_flr) + hqm_reset_device(dev->pdev); + + /* Reinitialize interrupt configuration */ + dev->ops->reinit_interrupts(dev); + + /* Reset configuration done through the sysfs */ + dev->ops->sysfs_reapply(dev); + + /* Reinitialize any other hardware state */ + dev->ops->init_hardware(dev); +} + +static int hqm_total_device_file_refcnt(struct hqm_dev *dev) +{ + int cnt = 0, i; + + for (i = 0; i < HQM_MAX_NUM_DOMAINS; i++) + if (dev->sched_domains[i].status) + cnt += dev->sched_domains[i].status->refcnt; + + if (dev->status) + cnt += dev->status->refcnt; + + return cnt; +} + +static int hqm_close_domain_device_file(struct hqm_dev *dev, + struct hqm_status *status, + u32 domain_id, + bool skip_reset) +{ + bool valid = status->valid; + int ret = 0; + + devm_kfree(dev->hqm_device, status); + + + dev->sched_domains[domain_id].alert_rd_idx = 0; + dev->sched_domains[domain_id].alert_wr_idx = 0; + + /* Check if the domain was reset, its device file destroyed, and its + * memory released during FLR handling. + */ + if (!valid) + return 0; + + dev->sched_domains[domain_id].status = NULL; + + device_destroy(hqm_class, + MKDEV(MAJOR(dev->dev_number), + MINOR(dev->dev_number) + + domain_id)); + + if (!skip_reset) + ret = dev->ops->reset_domain(dev, domain_id); + + /* Unpin all memory pages associated with the domain */ + hqm_release_domain_memory(dev, domain_id); + + if (ret) { + dev->domain_reset_failed = true; + HQM_ERR(dev->hqm_device, + "Internal error: Domain reset failed. To recover, reset the device.\n"); + } + + return ret; +} + static int hqm_close(struct inode *i, struct file *f) { + struct hqm_status *status = f->private_data; + struct hqm_dev *dev; + int ret = 0; + + dev = container_of(f->f_inode->i_cdev, struct hqm_dev, cdev); + + mutex_lock(&dev->resource_mutex); + + if (!IS_HQM_DEV_FILE(hqm_dev_number_base, f->f_inode->i_rdev)) { + struct hqm_domain_dev *domain; + u32 domain_id; + + domain_id = HQM_FILE_ID_FROM_DEV_T(hqm_dev_number_base, + f->f_inode->i_rdev); + + if (domain_id >= HQM_MAX_NUM_DOMAINS) { + HQM_ERR(dev->hqm_device, + "[%s()] Internal error\n", __func__); + ret = -1; + goto end; + } + + domain = &dev->sched_domains[domain_id]; + + HQM_INFO(dev->hqm_device, + "Closing domain %d's device file\n", domain_id); + + status->refcnt--; + + if (status->refcnt == 0) + ret = hqm_close_domain_device_file(dev, + status, + domain_id, + false); + } else { + HQM_INFO(dev->hqm_device, "Closing HQM device file\n"); + + status->refcnt--; + + if (status->refcnt == 0) { + devm_kfree(dev->hqm_device, status); + dev->status = NULL; + } + } + + dev->ops->dec_pm_refcnt(dev->pdev); + +end: + mutex_unlock(&dev->resource_mutex); + + return ret; +} + +static bool hqm_domain_valid(struct hqm_dev *dev, struct hqm_status *status) +{ + bool ret; + + mutex_lock(&dev->resource_mutex); + + ret = status->valid; + + mutex_unlock(&dev->resource_mutex); + + return ret; +} + +static bool hqm_domain_alerts_avail(struct hqm_domain_dev *domain) +{ + bool ret; + + mutex_lock(&domain->alert_mutex); + + ret = domain->alert_rd_idx != domain->alert_wr_idx; + + mutex_unlock(&domain->alert_mutex); + + return ret; +} + +static int hqm_read_domain_alert(struct hqm_dev *dev, + struct hqm_status *status, + int domain_id, + struct hqm_domain_alert *alert, + bool nonblock) +{ + struct hqm_domain_dev *domain = &dev->sched_domains[domain_id]; + + /* Grab the alert semaphore to access the read and write indexes */ + if (mutex_lock_interruptible(&domain->alert_mutex)) + return -ERESTARTSYS; + + while (domain->alert_rd_idx == domain->alert_wr_idx) { + /* Release the alert semaphore before putting the thread on the + * wait queue. + */ + mutex_unlock(&domain->alert_mutex); + + if (nonblock) + return -EWOULDBLOCK; + + HQM_INFO(dev->hqm_device, + "Thread %d is blocking waiting for an alert in domain %d\n", + current->pid, domain_id); + + if (wait_event_interruptible(domain->wq_head, + hqm_domain_alerts_avail(domain) || + !hqm_domain_valid(dev, status))) + return -ERESTARTSYS; + + /* See hqm_reset_notify() for more details */ + if (!hqm_domain_valid(dev, status)) { + alert->alert_id = HQM_DOMAIN_ALERT_DEVICE_RESET; + return 0; + } + + if (mutex_lock_interruptible(&domain->alert_mutex)) + return -ERESTARTSYS; + } + + /* The alert indexes are not equal, so there is an alert available. */ + memcpy(alert, &domain->alerts[domain->alert_rd_idx], sizeof(*alert)); + + domain->alert_rd_idx++; + + mutex_unlock(&domain->alert_mutex); + + return 0; +} + +static int hqm_check_and_inc_active_users(struct hqm_dev *dev, + struct hqm_status *status) +{ + mutex_lock(&dev->resource_mutex); + + if (!status->valid) { + mutex_unlock(&dev->resource_mutex); + return -EINVAL; + } + + dev->active_users++; + + mutex_unlock(&dev->resource_mutex); + return 0; } +static void hqm_dec_active_users(struct hqm_dev *dev) +{ + mutex_lock(&dev->resource_mutex); + + dev->active_users--; + + mutex_unlock(&dev->resource_mutex); +} + static ssize_t hqm_read(struct file *f, char __user *buf, size_t len, loff_t *offset) { - return 0; + struct hqm_status *status = f->private_data; + struct hqm_domain_alert alert; + struct hqm_dev *dev; + u32 domain_id; + int ret; + + if (IS_HQM_DEV_FILE(hqm_dev_number_base, f->f_inode->i_rdev)) + return 0; + + if (len != sizeof(alert)) + return -EINVAL; + + domain_id = HQM_FILE_ID_FROM_DEV_T(hqm_dev_number_base, + f->f_inode->i_rdev); + + dev = container_of(f->f_inode->i_cdev, struct hqm_dev, cdev); + + if (hqm_check_and_inc_active_users(dev, status)) { + alert.alert_id = HQM_DOMAIN_ALERT_DEVICE_RESET; + goto copy; + } + + /* See hqm_user.h for details on domain alert notifications */ + + ret = hqm_read_domain_alert(dev, + status, + domain_id, + &alert, + f->f_flags & O_NONBLOCK); + + hqm_dec_active_users(dev); + + if (ret) + return ret; + +copy: + if (copy_to_user(buf, &alert, sizeof(alert))) + return -EFAULT; + + HQM_INFO(dev->hqm_device, + "Thread %d received alert 0x%llx, with aux data 0x%llx\n", + current->pid, ((u64 *)&alert)[0], ((u64 *)&alert)[1]); + + return sizeof(alert); } static ssize_t hqm_write(struct file *f, @@ -73,17 +498,205 @@ static ssize_t hqm_write(struct file *f, size_t len, loff_t *offset) { + struct hqm_dev *dev; + + dev = container_of(f->f_inode->i_cdev, struct hqm_dev, cdev); + + HQM_INFO(dev->hqm_device, "[%s()]\n", __func__); + return 0; } +/* The kernel driver inserts VMAs into the device's VMA list whenever an mmap + * is performed or the VMA is cloned (e.g. during fork()). + */ +static int hqm_insert_vma(struct hqm_dev *dev, struct vm_area_struct *vma) +{ + struct hqm_vma_node *node; + + node = devm_kzalloc(dev->hqm_device, sizeof(*node), GFP_KERNEL); + if (!node) + return -ENOMEM; + + node->vma = vma; + + mutex_lock(&dev->resource_mutex); + + list_add(&node->list, &dev->vma_list); + + mutex_unlock(&dev->resource_mutex); + + return 0; +} + +/* The kernel driver deletes VMAs from the device's VMA list when the VMA is + * closed (e.g. during process exit). + */ +static void hqm_delete_vma(struct hqm_dev *dev, struct vm_area_struct *vma) +{ + struct hqm_vma_node *vma_node; + struct list_head *node; + + mutex_lock(&dev->resource_mutex); + + list_for_each(node, &dev->vma_list) { + vma_node = list_entry(node, struct hqm_vma_node, list); + if (vma_node->vma == vma) { + list_del(&vma_node->list); + devm_kfree(dev->hqm_device, vma_node); + break; + } + } + + mutex_unlock(&dev->resource_mutex); +} + +static void +hqm_vma_open(struct vm_area_struct *vma) +{ + struct hqm_dev *dev = vma->vm_private_data; + + hqm_insert_vma(dev, vma); +} + +static void +hqm_vma_close(struct vm_area_struct *vma) +{ + struct hqm_dev *dev = vma->vm_private_data; + + hqm_delete_vma(dev, vma); +} + +static const struct vm_operations_struct hqm_vma_ops = { + .open = hqm_vma_open, + .close = hqm_vma_close, +}; + +static int hqm_mmap(struct file *f, struct vm_area_struct *vma) +{ + struct hqm_dev *dev; + u32 domain_id; + int ret; + + dev = container_of(f->f_inode->i_cdev, struct hqm_dev, cdev); + + /* mmap operations must go through scheduling domain device files */ + if (IS_HQM_DEV_FILE(hqm_dev_number_base, f->f_inode->i_rdev)) + return -EINVAL; + + ret = hqm_check_and_inc_active_users(dev, f->private_data); + if (ret) + return ret; + + domain_id = HQM_FILE_ID_FROM_DEV_T(hqm_dev_number_base, + f->f_inode->i_rdev); + + if (domain_id >= HQM_MAX_NUM_DOMAINS) { + HQM_ERR(dev->hqm_device, + "[%s()] Internal error\n", __func__); + ret = -EINVAL; + goto end; + } + + ret = hqm_insert_vma(dev, vma); + if (ret) + goto end; + + mutex_lock(&dev->resource_mutex); + + ret = dev->ops->mmap(f, vma, domain_id); + + mutex_unlock(&dev->resource_mutex); + + if (ret) + hqm_delete_vma(dev, vma); + + vma->vm_ops = &hqm_vma_ops; + vma->vm_private_data = dev; + +end: + hqm_dec_active_users(dev); + + return ret; +} + +static long +hqm_ioctl(struct file *f, unsigned int cmd, unsigned long arg) +{ + struct hqm_dev *dev; + u32 domain_id; + int ret; + + dev = container_of(f->f_inode->i_cdev, struct hqm_dev, cdev); + + domain_id = HQM_FILE_ID_FROM_DEV_T(hqm_dev_number_base, + f->f_inode->i_rdev); + + if (domain_id > HQM_MAX_NUM_DOMAINS) { + HQM_ERR(dev->hqm_device, + "[%s()] Internal error\n", __func__); + return -EINVAL; + } + + ret = hqm_check_and_inc_active_users(dev, f->private_data); + if (ret) + return ret; + + if (IS_HQM_DEV_FILE(hqm_dev_number_base, f->f_inode->i_rdev)) + ret = hqm_ioctl_dispatcher(dev, cmd, arg); + else + ret = hqm_domain_ioctl_dispatcher(dev, f->private_data, + cmd, arg, domain_id); + + hqm_dec_active_users(dev); + + return ret; +} + static const struct file_operations hqm_fops = { .owner = THIS_MODULE, .open = hqm_open, .release = hqm_close, .read = hqm_read, .write = hqm_write, + .mmap = hqm_mmap, + .unlocked_ioctl = hqm_ioctl, }; +static void hqm_assign_ops(struct hqm_dev *hqm_dev, + const struct pci_device_id *pdev_id) +{ + hqm_dev->type = pdev_id->driver_data; + + switch (pdev_id->driver_data) { + case HQM_1_PF: + hqm_dev->ops = &hqm_pf_ops; + break; + } +} + +static inline void hqm_set_device_revision(struct hqm_dev *hqm_dev) +{ + switch (boot_cpu_data.x86_stepping) { + case 0: + hqm_dev->revision = HQM_REV_A0; + break; + case 1: + hqm_dev->revision = HQM_REV_A1; + break; + case 2: + hqm_dev->revision = HQM_REV_A2; + break; + case 3: + hqm_dev->revision = HQM_REV_A3; + break; + default: + /* Treat all revisions >= 4 as B0 */ + hqm_dev->revision = HQM_REV_B0; + break; + } +} + static bool hqm_id_in_use[HQM_MAX_NUM_DEVICES]; static int hqm_find_next_available_id(void) @@ -133,10 +746,14 @@ static int hqm_probe(struct pci_dev *pdev, list_add(&hqm_dev->list, &hqm_dev_list); mutex_unlock(&driver_lock); + hqm_assign_ops(hqm_dev, pdev_id); + pci_set_drvdata(pdev, hqm_dev); hqm_dev->pdev = pdev; + hqm_set_device_revision(hqm_dev); + hqm_dev->id = hqm_find_next_available_id(); if (hqm_dev->id == -1) { HQM_ERR(&pdev->dev, "probe: insufficient device IDs\n"); @@ -171,10 +788,73 @@ static int hqm_probe(struct pci_dev *pdev, pci_enable_pcie_error_reporting(pdev); + INIT_LIST_HEAD(&hqm_dev->vma_list); + + ret = hqm_dev->ops->map_pci_bar_space(hqm_dev, pdev); + if (ret) + goto map_pci_bar_fail; + + ret = hqm_dev->ops->cdev_add(hqm_dev, hqm_dev_number_base, &hqm_fops); + if (ret) + goto cdev_add_fail; + + ret = hqm_dev->ops->device_create(hqm_dev, pdev, hqm_class); + if (ret) + goto device_add_fail; + + dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + + ret = hqm_dev->ops->sysfs_create(hqm_dev); + if (ret) + goto sysfs_create_fail; + + ret = hqm_reset_device(pdev); + if (ret) + goto hqm_reset_fail; + + ret = hqm_dev->ops->init_interrupts(hqm_dev, pdev); + if (ret) + goto init_interrupts_fail; + + ret = hqm_dev->ops->init_driver_state(hqm_dev); + if (ret) + goto init_driver_state_fail; + + ret = hqm_resource_init(&hqm_dev->hw); + if (ret) + goto resource_init_fail; + + + hqm_dev->ops->init_hardware(hqm_dev); + + /* The driver puts the device to sleep (D3hot) while there are no + * scheduling domains to service. The usage counter of a PCI device at + * probe time is 2, so decrement it twice here. (The PCI layer has + * already called pm_runtime_enable().) + */ + hqm_dev->ops->dec_pm_refcnt(pdev); + hqm_dev->ops->dec_pm_refcnt(pdev); + hqm_set_id_in_use(hqm_dev->id, true); return 0; +resource_init_fail: + hqm_dev->ops->free_driver_state(hqm_dev); +init_driver_state_fail: + hqm_dev->ops->free_interrupts(hqm_dev, pdev); +init_interrupts_fail: +hqm_reset_fail: + hqm_dev->ops->sysfs_destroy(hqm_dev); +sysfs_create_fail: + hqm_dev->ops->device_destroy(hqm_dev, hqm_class); +device_add_fail: + hqm_dev->ops->cdev_del(hqm_dev); +cdev_add_fail: + hqm_dev->ops->unmap_pci_bar_space(hqm_dev, pdev); +map_pci_bar_fail: + pci_disable_pcie_error_reporting(pdev); + pci_release_regions(pdev); pci_request_regions_fail: pci_disable_device(pdev); pci_enable_device_fail: @@ -191,6 +871,7 @@ static int hqm_probe(struct pci_dev *pdev, static void hqm_remove(struct pci_dev *pdev) { struct hqm_dev *hqm_dev; + int i; /* Undo all the hqm_probe() operations */ HQM_INFO(&pdev->dev, "Cleaning up the HQM driver for removal\n"); @@ -198,6 +879,47 @@ static void hqm_remove(struct pci_dev *pdev) hqm_set_id_in_use(hqm_dev->id, false); + /* Undo the PM operations in hqm_probe(). */ + hqm_dev->ops->inc_pm_refcnt(pdev, false); + hqm_dev->ops->inc_pm_refcnt(pdev, false); + + hqm_dev->ops->free_driver_state(hqm_dev); + + hqm_dev->ops->free_interrupts(hqm_dev, pdev); + + + hqm_resource_free(&hqm_dev->hw); + + hqm_release_device_memory(hqm_dev); + + hqm_dev->ops->sysfs_destroy(hqm_dev); + + /* If a domain is created without its device file ever being opened, it + * needs to be destroyed here. + */ + for (i = 0; i < HQM_MAX_NUM_DOMAINS; i++) { + struct hqm_status *status; + + status = hqm_dev->sched_domains[i].status; + + if (!status) + continue; + + if (status->refcnt == 0) + device_destroy(hqm_class, + MKDEV(MAJOR(hqm_dev->dev_number), + MINOR(hqm_dev->dev_number) + + i)); + + devm_kfree(hqm_dev->hqm_device, status); + } + + hqm_dev->ops->device_destroy(hqm_dev, hqm_class); + + hqm_dev->ops->cdev_del(hqm_dev); + + hqm_dev->ops->unmap_pci_bar_space(hqm_dev, pdev); + pci_disable_pcie_error_reporting(pdev); pci_release_regions(pdev); @@ -211,24 +933,332 @@ static void hqm_remove(struct pci_dev *pdev) devm_kfree(&pdev->dev, hqm_dev); } +#ifdef CONFIG_PM +static int hqm_runtime_suspend(struct device *dev) +{ + struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); + struct hqm_dev *hqm_dev = pci_get_drvdata(pdev); + + HQM_INFO(hqm_dev->hqm_device, "Suspending device operation\n"); + + /* Return and let the PCI subsystem put the device in D3hot. */ + + return 0; +} + +static int hqm_runtime_resume(struct device *dev) +{ + struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); + struct hqm_dev *hqm_dev = pci_get_drvdata(pdev); + + /* The PCI subsystem put the device in D0, now reinitialize the + * device's state. + */ + + HQM_INFO(hqm_dev->hqm_device, "Resuming device operation\n"); + + hqm_reset_hardware_state(hqm_dev, true); + + return 0; +} +#endif + static struct pci_device_id hqm_id_table[] = { - { PCI_VDEVICE(INTEL, HQM_PF_DEV_ID) }, + { PCI_VDEVICE(INTEL, HQM_PF_DEV_ID), .driver_data = HQM_1_PF }, { 0 } }; MODULE_DEVICE_TABLE(pci, hqm_id_table); +bool hqm_in_use(struct hqm_dev *dev) +{ + return (hqm_total_device_file_refcnt(dev) != 0 || + (dev->active_users > 0) || + !list_empty(&dev->vma_list)); +} + +static void hqm_wait_for_idle(struct hqm_dev *hqm_dev) +{ + int i; + + for (i = 0; i < hqm_reset_timeout_s * 10; i++) { + bool idle; + + mutex_lock(&hqm_dev->resource_mutex); + + /* Check for any application threads in the driver, extant + * mmaps, or open device files. + */ + idle = !hqm_in_use(hqm_dev); + + mutex_unlock(&hqm_dev->resource_mutex); + + if (idle) + return; + + cond_resched(); + msleep(100); + } + + HQM_ERR(hqm_dev->hqm_device, + "PF driver timed out waiting for applications to idle\n"); +} + +void hqm_zap_vma_entries(struct hqm_dev *hqm_dev) +{ + struct hqm_vma_node *vma_node; + struct list_head *node; + + HQM_INFO(hqm_dev->hqm_device, "Zapping memory mappings\n"); + + list_for_each(node, &hqm_dev->vma_list) { + unsigned long size; + + vma_node = list_entry(node, struct hqm_vma_node, list); + + size = vma_node->vma->vm_end - vma_node->vma->vm_start; + + zap_vma_ptes(vma_node->vma, vma_node->vma->vm_start, size); + } +} + +static void hqm_disable_device_files(struct hqm_dev *hqm_dev) +{ + int i; + + /* Set all status->valid flags to false to prevent existing device + * files from being used to enter the device driver. + */ + for (i = 0; i < HQM_MAX_NUM_DOMAINS; i++) { + struct hqm_status *status; + + status = hqm_dev->sched_domains[i].status; + + if (!status) + continue; + + status->valid = false; + + device_destroy(hqm_class, + MKDEV(MAJOR(hqm_dev->dev_number), + MINOR(hqm_dev->dev_number) + i)); + + /* If the domain device file was created but never opened, free + * the file private memory here. Otherwise, it will be freed + * in the file close callback when refcnt reaches 0. + */ + if (status->refcnt == 0) + devm_kfree(hqm_dev->hqm_device, status); + } + + if (hqm_dev->status) + hqm_dev->status->valid = false; +} + +static void hqm_wake_threads(struct hqm_dev *hqm_dev) +{ + int i; + + /* Wake any blocked device file readers. These threads will return the + * HQM_DOMAIN_ALERT_DEVICE_RESET alert, and well-behaved applications + * will close their fds and unmap HQM memory as a result. + */ + for (i = 0; i < HQM_MAX_NUM_DOMAINS; i++) + wake_up_interruptible(&hqm_dev->sched_domains[i].wq_head); + + wake_up_interruptible(&hqm_dev->measurement_wq); + + /* Wake threads blocked on a CQ interrupt */ + for (i = 0; i < HQM_MAX_NUM_LDB_PORTS; i++) + hqm_wake_thread(hqm_dev, + &hqm_dev->intr.ldb_cq_intr[i], + WAKE_DEV_RESET); + + for (i = 0; i < HQM_MAX_NUM_DIR_PORTS; i++) + hqm_wake_thread(hqm_dev, + &hqm_dev->intr.dir_cq_intr[i], + WAKE_DEV_RESET); +} + +void hqm_stop_users(struct hqm_dev *hqm_dev) +{ + + /* Disable existing device files to prevent applications from enter the + * device driver through file operations. (New files can't be opened + * while the resource mutex is held.) + */ + hqm_disable_device_files(hqm_dev); + + /* Wake any threads blocked in the kernel */ + hqm_wake_threads(hqm_dev); +} + +static void hqm_reset_prepare(struct pci_dev *pdev) +{ + /* Unexpected FLR. Applications may be actively using the device at + * the same time, which poses two problems: + * - If applications continue to enqueue to the hardware they will + * cause hardware errors, because the FLR will have reset the + * scheduling domains, ports, and queues. + * - When the applications end, they must not trigger the driver's + * domain reset code. The domain reset procedure would fail because + * the device's registers will have been reset by the FLR. + * + * To avoid these problems, the driver handles unexpected resets as + * follows: + * 1. Set the reset_active flag. This flag blocks new device files + * from being opened and is used as a wakeup condition in the + * driver's wait queues. + * 2. If this is a PF FLR and there are active VFs, send them a + * pre-reset notification, so they can stop any VF applications. + * 3. Disable all device files (set the per-file valid flag to false, + * which prevents the file from being used after FLR completes) and + * wake any threads on a wait queue. + * 4. If the HQM is not in use -- i.e. no open device files or memory + * mappings, and no VFs in use (PF FLR only) -- the FLR can begin. + * 5. Else, the driver waits (up to a user-specified timeout, default + * 5s) for software to stop using the driver and the device. If the + * timeout elapses, the driver zaps any remaining MMIO mappings. + * + * After the FLR: + * 1. Clear the per-domain status pointers (the memory is freed in + * either hqm_close or hqm_stop_users). + * 2. Release any remaining allocated port or CQ memory, now that it's + * guaranteed the device is unconfigured and won't write to memory. + * 3. Reset software and hardware state + * 4. Notify VFs that the FLR is complete. + * 5. Set reset_active to false. + */ + + struct hqm_dev *hqm_dev; + + hqm_dev = pci_get_drvdata(pdev); + + HQM_INFO(hqm_dev->hqm_device, "HQM driver reset prepare\n"); + + mutex_lock(&hqm_dev->resource_mutex); + + /* Block any new device files from being opened */ + hqm_dev->reset_active = true; + + /* If the device has 1+ VFs, even if they're not in use, it will not be + * suspended. To avoid having to handle two cases (reset while device + * suspended and reset while device active), increment the device's PM + * refcnt here, to guarantee that the device is in D0 for the duration + * of the reset. + */ + hqm_dev->ops->inc_pm_refcnt(pdev, true); + + /* Stop existing applications from continuing to use the device by + * blocking kernel driver interfaces and waking any threads on wait + * queues, but don't zap VMA entries yet. If this is a PF FLR, notify + * any VFs of the impending FLR so they can stop their users as well. + */ + hqm_stop_users(hqm_dev); + + /* If no software is using the device, there's nothing to clean up. */ + if (!hqm_in_use(hqm_dev)) + return; + + HQM_INFO(hqm_dev->hqm_device, "Waiting for users to stop\n"); + + /* Release the resource mutex so threads can complete their work and + * exit the driver + */ + mutex_unlock(&hqm_dev->resource_mutex); + + /* Wait until the device is idle or hqm_reset_timeout_s seconds elapse. + * If the timeout occurs, zap any remaining VMA entries to guarantee + * applications can't reach the device. + */ + hqm_wait_for_idle(hqm_dev); + + mutex_lock(&hqm_dev->resource_mutex); + + if (!hqm_in_use(hqm_dev)) + return; + + hqm_zap_vma_entries(hqm_dev); + + if (hqm_dev->active_users > 0) + HQM_ERR(hqm_dev->hqm_device, + "Internal error: %lu active_users in the driver during FLR\n", + hqm_dev->active_users); + + /* Don't release resource_mutex until after the FLR occurs. This + * prevents applications from accessing the device during reset. + */ +} + +static void hqm_reset_done(struct pci_dev *pdev) +{ + struct hqm_dev *hqm_dev; + int i; + + hqm_dev = pci_get_drvdata(pdev); + + /* Clear all status pointers, to be filled in by post-FLR applications + * using the device driver. + * + * Note that status memory isn't leaked -- it is either freed during + * hqm_stop_users() or in the file close callback. + */ + for (i = 0; i < HQM_MAX_NUM_DOMAINS; i++) + hqm_dev->sched_domains[i].status = NULL; + + hqm_dev->status = NULL; + + /* Free allocated CQ and PC memory. These are no longer accessible to + * user-space: either the applications closed, or their mappings were + * zapped in hqm_reset_prepare(). + */ + hqm_release_device_memory(hqm_dev); + + /* Reset resource allocation state */ + hqm_resource_reset(&hqm_dev->hw); + + /* Reset the hardware state, but don't issue an additional FLR */ + hqm_reset_hardware_state(hqm_dev, false); + + HQM_INFO(hqm_dev->hqm_device, "HQM driver reset done\n"); + + hqm_dev->reset_active = false; + + /* Undo the PM refcnt increment in hqm_reset_prepare(). */ + hqm_dev->ops->dec_pm_refcnt(hqm_dev->pdev); + + mutex_unlock(&hqm_dev->resource_mutex); +} + +static const struct pci_error_handlers hqm_err_handler = { + .reset_prepare = hqm_reset_prepare, + .reset_done = hqm_reset_done, +}; + +#ifdef CONFIG_PM +static const struct dev_pm_ops hqm_pm_ops = { + SET_RUNTIME_PM_OPS(hqm_runtime_suspend, hqm_runtime_resume, NULL) +}; +#endif + static struct pci_driver hqm_pci_driver = { .name = (char *)hqm_driver_name, .id_table = hqm_id_table, .probe = hqm_probe, .remove = hqm_remove, +#ifdef CONFIG_PM + .driver.pm = &hqm_pm_ops, +#endif + .err_handler = &hqm_err_handler, }; static int __init hqm_init_module(void) { int err; - pr_info("%s\n", hqm_driver_name); + pr_info("%s - version %d.%d.%d\n", hqm_driver_name, + HQM_VERSION_MAJOR_NUMBER, + HQM_VERSION_MINOR_NUMBER, + HQM_VERSION_REVISION_NUMBER); pr_info("%s\n", hqm_driver_copyright); hqm_class = class_create(THIS_MODULE, hqm_driver_name); diff --git a/drivers/misc/hqm/hqm_main.h b/drivers/misc/hqm/hqm_main.h index d97e68818cc5e2..58163772d0096a 100644 --- a/drivers/misc/hqm/hqm_main.h +++ b/drivers/misc/hqm/hqm_main.h @@ -5,6 +5,7 @@ #ifndef __HQM_MAIN_H #define __HQM_MAIN_H +#include #include #include #include @@ -13,10 +14,11 @@ #include #include -#include "hqm_hw_types.h" - static const char hqm_driver_name[] = KBUILD_MODNAME; +#include "hqm_hw_types.h" +#include + #define HQM_NUM_FUNCS_PER_DEVICE (1 + HQM_MAX_NUM_VFS) #define HQM_MAX_NUM_DEVICES (HQM_NUM_FUNCS_PER_DEVICE) #define HQM_NUM_DEV_FILES_PER_DEVICE (HQM_MAX_NUM_DOMAINS + 1) @@ -31,12 +33,271 @@ static const char hqm_driver_name[] = KBUILD_MODNAME; #define IS_HQM_DEV_FILE(base, file) (HQM_FILE_ID_FROM_DEV_T(base, file) == \ HQM_MAX_NUM_DOMAINS) +#define HQM_DEFAULT_RESET_TIMEOUT_S 5 + +/* The notification entry is located in the same memory page as the popcount + * cache line. + */ +#define HQM_NOTIFY_ENTRY_OFFSET 2048 + +extern struct list_head hqm_dev_list; +extern struct mutex driver_lock; + +enum hqm_device_type { + HQM_1_PF, +}; + +struct hqm_dev; + +struct hqm_device_ops { + /* Device create? (maybe) */ + int (*map_pci_bar_space)(struct hqm_dev *dev, struct pci_dev *pdev); + void (*unmap_pci_bar_space)(struct hqm_dev *dev, struct pci_dev *pdev); + int (*mmap)(struct file *f, struct vm_area_struct *vma, u32 id); + void (*inc_pm_refcnt)(struct pci_dev *pdev, bool resume); + void (*dec_pm_refcnt)(struct pci_dev *pdev); + bool (*pm_refcnt_status_suspended)(struct pci_dev *pdev); + int (*init_driver_state)(struct hqm_dev *dev); + void (*free_driver_state)(struct hqm_dev *dev); + int (*device_create)(struct hqm_dev *hqm_dev, + struct pci_dev *pdev, + struct class *hqm_class); + void (*device_destroy)(struct hqm_dev *hqm_dev, + struct class *hqm_class); + int (*cdev_add)(struct hqm_dev *hqm_dev, + dev_t base, + const struct file_operations *fops); + void (*cdev_del)(struct hqm_dev *hqm_dev); + int (*sysfs_create)(struct hqm_dev *hqm_dev); + void (*sysfs_destroy)(struct hqm_dev *hqm_dev); + void (*sysfs_reapply)(struct hqm_dev *dev); + int (*init_interrupts)(struct hqm_dev *dev, struct pci_dev *pdev); + int (*enable_ldb_cq_interrupts)(struct hqm_dev *dev, + int port_id, + u16 thresh); + int (*enable_dir_cq_interrupts)(struct hqm_dev *dev, + int port_id, + u16 thresh); + int (*arm_cq_interrupt)(struct hqm_dev *dev, + int domain_id, + int port_id, + bool is_ldb); + void (*reinit_interrupts)(struct hqm_dev *dev); + void (*free_interrupts)(struct hqm_dev *dev, struct pci_dev *pdev); + void (*init_hardware)(struct hqm_dev *dev); + int (*create_sched_domain)(struct hqm_hw *hw, + struct hqm_create_sched_domain_args *args, + struct hqm_cmd_response *resp); + int (*create_ldb_pool)(struct hqm_hw *hw, + u32 domain_id, + struct hqm_create_ldb_pool_args *args, + struct hqm_cmd_response *resp); + int (*create_dir_pool)(struct hqm_hw *hw, + u32 domain_id, + struct hqm_create_dir_pool_args *args, + struct hqm_cmd_response *resp); + int (*create_ldb_queue)(struct hqm_hw *hw, + u32 domain_id, + struct hqm_create_ldb_queue_args *args, + struct hqm_cmd_response *resp); + int (*create_dir_queue)(struct hqm_hw *hw, + u32 domain_id, + struct hqm_create_dir_queue_args *args, + struct hqm_cmd_response *resp); + int (*create_ldb_port)(struct hqm_hw *hw, + u32 domain_id, + struct hqm_create_ldb_port_args *args, + uintptr_t pop_count_dma_base, + uintptr_t cq_dma_base, + struct hqm_cmd_response *resp); + int (*create_dir_port)(struct hqm_hw *hw, + u32 domain_id, + struct hqm_create_dir_port_args *args, + uintptr_t pop_count_dma_base, + uintptr_t cq_dma_base, + struct hqm_cmd_response *resp); + int (*start_domain)(struct hqm_hw *hw, + u32 domain_id, + struct hqm_start_domain_args *args, + struct hqm_cmd_response *resp); + int (*map_qid)(struct hqm_hw *hw, + u32 domain_id, + struct hqm_map_qid_args *args, + struct hqm_cmd_response *resp); + int (*unmap_qid)(struct hqm_hw *hw, + u32 domain_id, + struct hqm_unmap_qid_args *args, + struct hqm_cmd_response *resp); + int (*enable_ldb_port)(struct hqm_hw *hw, + u32 domain_id, + struct hqm_enable_ldb_port_args *args, + struct hqm_cmd_response *resp); + int (*disable_ldb_port)(struct hqm_hw *hw, + u32 domain_id, + struct hqm_disable_ldb_port_args *args, + struct hqm_cmd_response *resp); + int (*enable_dir_port)(struct hqm_hw *hw, + u32 domain_id, + struct hqm_enable_dir_port_args *args, + struct hqm_cmd_response *resp); + int (*disable_dir_port)(struct hqm_hw *hw, + u32 domain_id, + struct hqm_disable_dir_port_args *args, + struct hqm_cmd_response *resp); + int (*get_num_resources)(struct hqm_hw *hw, + struct hqm_get_num_resources_args *args); + int (*reset_domain)(struct hqm_dev *dev, u32 domain_id); + int (*measure_perf)(struct hqm_dev *dev, + struct hqm_sample_perf_counters_args *args, + struct hqm_cmd_response *response); + int (*measure_sched_count)(struct hqm_dev *dev, + struct hqm_measure_sched_count_args *args, + struct hqm_cmd_response *response); + int (*ldb_port_owned_by_domain)(struct hqm_hw *hw, + u32 domain_id, + u32 port_id); + int (*dir_port_owned_by_domain)(struct hqm_hw *hw, + u32 domain_id, + u32 port_id); + int (*get_sn_allocation)(struct hqm_hw *hw, u32 group_id); + int (*get_sn_occupancy)(struct hqm_hw *hw, u32 group_id); + int (*get_ldb_queue_depth)(struct hqm_hw *hw, + u32 domain_id, + struct hqm_get_ldb_queue_depth_args *args, + struct hqm_cmd_response *resp); + int (*get_dir_queue_depth)(struct hqm_hw *hw, + u32 domain_id, + struct hqm_get_dir_queue_depth_args *args, + struct hqm_cmd_response *resp); + int (*pending_port_unmaps)(struct hqm_hw *hw, + u32 domain_id, + struct hqm_pending_port_unmaps_args *args, + struct hqm_cmd_response *resp); + int (*query_cq_poll_mode)(struct hqm_dev *dev, + struct hqm_cmd_response *user_resp); +}; + +extern struct hqm_device_ops hqm_pf_ops; + +struct hqm_port_memory { + void *cq_base; + dma_addr_t cq_dma_base; + void *pc_base; + dma_addr_t pc_dma_base; + int domain_id; + u8 valid; +}; + +struct hqm_status { + u8 valid; + u32 refcnt; +}; + +#define HQM_DOMAIN_ALERT_RING_SIZE 256 + +struct hqm_domain_dev { + struct hqm_status *status; + struct hqm_domain_alert alerts[HQM_DOMAIN_ALERT_RING_SIZE]; + u8 alert_rd_idx; + u8 alert_wr_idx; + /* The alert mutex protects access to the alert ring and its read and + * write indexes. + */ + struct mutex alert_mutex; + wait_queue_head_t wq_head; +}; + +struct hqm_cq_intr { + u8 wake; + u8 configured; + /* vector is in the range [0,63] */ + u8 vector; + /* disabled is true if the port is disabled. In that + * case, the driver doesn't allow applications to block on the + * port's interrupt. + */ + u8 disabled; + wait_queue_head_t wq_head; + /* The CQ interrupt mutex guarantees one thread is blocking on a CQ's + * interrupt at a time. + */ + struct mutex mutex; +} __aligned(64); + +struct hqm_intr { + int num_vectors; + int base_vector; + int mode; + u64 packed_vector_bitmap; + /* The PF has more interrupt vectors than the VF, so we + * simply over-allocate in the case of the VF driver + */ + u8 isr_registered[HQM_PF_TOTAL_NUM_INTERRUPT_VECTORS]; + struct hqm_cq_intr ldb_cq_intr[HQM_MAX_NUM_LDB_PORTS]; + struct hqm_cq_intr dir_cq_intr[HQM_MAX_NUM_DIR_PORTS]; + int num_ldb_ports; + int num_dir_ports; +}; + +struct hqm_vma_node { + struct list_head list; + struct vm_area_struct *vma; +}; + struct hqm_dev { int id; struct pci_dev *pdev; + struct hqm_hw hw; + struct cdev cdev; + enum hqm_device_type type; + u8 revision; + struct hqm_device_ops *ops; + dev_t dev_number; struct list_head list; + struct device *hqm_device; + struct hqm_status *status; + struct hqm_domain_dev sched_domains[HQM_MAX_NUM_DOMAINS]; + struct hqm_port_memory ldb_port_mem[HQM_MAX_NUM_LDB_PORTS]; + struct hqm_port_memory dir_port_mem[HQM_MAX_NUM_DIR_PORTS]; + struct list_head vma_list; + /* Number of threads currently executing a file read, mmap, or ioctl. + * Protected by the resource_mutex. + */ + unsigned long active_users; + /* The enqueue_four function enqueues four HCWs (one cache-line worth) + * to the HQM, using whichever mechanism is supported by the platform + * on which this driver is running. + */ + void (*enqueue_four)(void *qe4, void __iomem *pp_addr); + struct hqm_intr intr; + u8 domain_reset_failed; + u8 reset_active; + /* The resource mutex serializes access to driver data structures and + * hardware registers. + */ + struct mutex resource_mutex; + /* The measurement waitqueue holds threads sleeping in the measurement + * ioctls. + */ + wait_queue_head_t measurement_wq; + /* The measurement mutex serializes access to performance monitoring + * hardware. + */ + struct mutex measurement_mutex; + /* The alarm ISR mutex ensures only one bottom-half alarm + * handler executes at a time. + */ + struct mutex alarm_isr_mutex; + /* This workqueue thread is responsible for processing all CQ->QID unmap + * requests. + */ + struct workqueue_struct *wq; + struct work_struct work; + u8 worker_launched; }; +int hqm_add_domain_device_file(struct hqm_dev *hqm_dev, u32 domain_id); + /* Each subsequent log level expands on the previous level. */ #define HQM_LOG_LEVEL_NONE 0 #define HQM_LOG_LEVEL_ERR 1 @@ -56,4 +317,8 @@ extern unsigned int hqm_log_level; } \ } while (0) +bool hqm_in_use(struct hqm_dev *dev); +void hqm_stop_users(struct hqm_dev *hqm_dev); +void hqm_zap_vma_entries(struct hqm_dev *hqm_dev); + #endif /* __HQM_MAIN_H */ diff --git a/drivers/misc/hqm/hqm_mbox.h b/drivers/misc/hqm/hqm_mbox.h new file mode 100644 index 00000000000000..051aeae00ab189 --- /dev/null +++ b/drivers/misc/hqm/hqm_mbox.h @@ -0,0 +1,670 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * Copyright(c) 2016-2019 Intel Corporation + */ + +#ifndef __HQM_BASE_HQM_MBOX_H +#define __HQM_BASE_HQM_MBOX_H + +#include "hqm_regs.h" +#include "hqm_osdep_types.h" + +#define HQM_MBOX_INTERFACE_VERSION 1 + +/* The PF uses its PF->VF mailbox to send responses to VF requests, as well as + * to send requests of its own (e.g. notifying a VF of an impending FLR). + * To avoid communication race conditions, e.g. the PF sends a response and then + * sends a request before the VF reads the response, the PF->VF mailbox is + * divided into two sections: + * - Bytes 0-47: PF responses + * - Bytes 48-63: PF requests + * + * Partitioning the PF->VF mailbox allows responses and requests to occupy the + * mailbox simultaneously. + */ +#define HQM_PF2VF_RESP_BYTES 48 +#define HQM_PF2VF_RESP_BASE 0 +#define HQM_PF2VF_RESP_BASE_WORD (HQM_PF2VF_RESP_BASE / 4) + +#define HQM_PF2VF_REQ_BYTES \ + (HQM_FUNC_PF_PF2VF_MAILBOX_BYTES - HQM_PF2VF_RESP_BYTES) +#define HQM_PF2VF_REQ_BASE HQM_PF2VF_RESP_BYTES +#define HQM_PF2VF_REQ_BASE_WORD (HQM_PF2VF_REQ_BASE / 4) + +/* Similarly, the VF->PF mailbox is divided into two sections: + * - Bytes 0-239: VF requests + * - Bytes 240-255: VF responses + */ +#define HQM_VF2PF_REQ_BYTES 240 +#define HQM_VF2PF_REQ_BASE 0 +#define HQM_VF2PF_REQ_BASE_WORD (HQM_VF2PF_REQ_BASE / 4) + +#define HQM_VF2PF_RESP_BYTES \ + (HQM_FUNC_VF_VF2PF_MAILBOX_BYTES - HQM_VF2PF_REQ_BYTES) +#define HQM_VF2PF_RESP_BASE HQM_VF2PF_REQ_BYTES +#define HQM_VF2PF_RESP_BASE_WORD (HQM_VF2PF_RESP_BASE / 4) + +/* VF-initiated commands */ +enum hqm_mbox_cmd_type { + HQM_MBOX_CMD_REGISTER, + HQM_MBOX_CMD_UNREGISTER, + HQM_MBOX_CMD_GET_NUM_RESOURCES, + HQM_MBOX_CMD_CREATE_SCHED_DOMAIN, + HQM_MBOX_CMD_RESET_SCHED_DOMAIN, + HQM_MBOX_CMD_CREATE_LDB_POOL, + HQM_MBOX_CMD_CREATE_DIR_POOL, + HQM_MBOX_CMD_CREATE_LDB_QUEUE, + HQM_MBOX_CMD_CREATE_DIR_QUEUE, + HQM_MBOX_CMD_CREATE_LDB_PORT, + HQM_MBOX_CMD_CREATE_DIR_PORT, + HQM_MBOX_CMD_ENABLE_LDB_PORT, + HQM_MBOX_CMD_DISABLE_LDB_PORT, + HQM_MBOX_CMD_ENABLE_DIR_PORT, + HQM_MBOX_CMD_DISABLE_DIR_PORT, + HQM_MBOX_CMD_LDB_PORT_OWNED_BY_DOMAIN, + HQM_MBOX_CMD_DIR_PORT_OWNED_BY_DOMAIN, + HQM_MBOX_CMD_MAP_QID, + HQM_MBOX_CMD_UNMAP_QID, + HQM_MBOX_CMD_START_DOMAIN, + HQM_MBOX_CMD_ENABLE_LDB_PORT_INTR, + HQM_MBOX_CMD_ENABLE_DIR_PORT_INTR, + HQM_MBOX_CMD_ARM_CQ_INTR, + HQM_MBOX_CMD_GET_NUM_USED_RESOURCES, + HQM_MBOX_CMD_INIT_CQ_SCHED_COUNT, + HQM_MBOX_CMD_COLLECT_CQ_SCHED_COUNT, + HQM_MBOX_CMD_ACK_VF_FLR_DONE, + HQM_MBOX_CMD_GET_SN_ALLOCATION, + HQM_MBOX_CMD_GET_LDB_QUEUE_DEPTH, + HQM_MBOX_CMD_GET_DIR_QUEUE_DEPTH, + HQM_MBOX_CMD_PENDING_PORT_UNMAPS, + HQM_MBOX_CMD_QUERY_CQ_POLL_MODE, + HQM_MBOX_CMD_GET_SN_OCCUPANCY, + + /* NUM_QE_CMD_TYPES must be last */ + NUM_HQM_MBOX_CMD_TYPES, +}; + +static const char hqm_mbox_cmd_type_strings[][128] = { + "HQM_MBOX_CMD_REGISTER", + "HQM_MBOX_CMD_UNREGISTER", + "HQM_MBOX_CMD_GET_NUM_RESOURCES", + "HQM_MBOX_CMD_CREATE_SCHED_DOMAIN", + "HQM_MBOX_CMD_RESET_SCHED_DOMAIN", + "HQM_MBOX_CMD_CREATE_LDB_POOL", + "HQM_MBOX_CMD_CREATE_DIR_POOL", + "HQM_MBOX_CMD_CREATE_LDB_QUEUE", + "HQM_MBOX_CMD_CREATE_DIR_QUEUE", + "HQM_MBOX_CMD_CREATE_LDB_PORT", + "HQM_MBOX_CMD_CREATE_DIR_PORT", + "HQM_MBOX_CMD_ENABLE_LDB_PORT", + "HQM_MBOX_CMD_DISABLE_LDB_PORT", + "HQM_MBOX_CMD_ENABLE_DIR_PORT", + "HQM_MBOX_CMD_DISABLE_DIR_PORT", + "HQM_MBOX_CMD_LDB_PORT_OWNED_BY_DOMAIN", + "HQM_MBOX_CMD_DIR_PORT_OWNED_BY_DOMAIN", + "HQM_MBOX_CMD_MAP_QID", + "HQM_MBOX_CMD_UNMAP_QID", + "HQM_MBOX_CMD_START_DOMAIN", + "HQM_MBOX_CMD_ENABLE_LDB_PORT_INTR", + "HQM_MBOX_CMD_ENABLE_DIR_PORT_INTR", + "HQM_MBOX_CMD_ARM_CQ_INTR", + "HQM_MBOX_CMD_GET_NUM_USED_RESOURCES", + "HQM_MBOX_CMD_INIT_CQ_SCHED_COUNT", + "HQM_MBOX_CMD_COLLECT_CQ_SCHED_COUNT", + "HQM_MBOX_CMD_ACK_VF_FLR_DONE", + "HQM_MBOX_CMD_GET_SN_ALLOCATION", + "HQM_MBOX_CMD_GET_LDB_QUEUE_DEPTH", + "HQM_MBOX_CMD_GET_DIR_QUEUE_DEPTH", + "HQM_MBOX_CMD_PENDING_PORT_UNMAPS", + "HQM_MBOX_CMD_QUERY_CQ_POLL_MODE", + "HQM_MBOX_CMD_GET_SN_OCCUPANCY", +}; + +/* PF-initiated commands */ +enum hqm_mbox_vf_cmd_type { + HQM_MBOX_VF_CMD_DOMAIN_ALERT, + HQM_MBOX_VF_CMD_NOTIFICATION, + HQM_MBOX_VF_CMD_IN_USE, + + /* NUM_HQM_MBOX_VF_CMD_TYPES must be last */ + NUM_HQM_MBOX_VF_CMD_TYPES, +}; + +static const char hqm_mbox_vf_cmd_type_strings[][128] = { + "HQM_MBOX_VF_CMD_DOMAIN_ALERT", + "HQM_MBOX_VF_CMD_NOTIFICATION", + "HQM_MBOX_VF_CMD_IN_USE", +}; + +#define HQM_MBOX_CMD_TYPE(hdr) \ + (((struct hqm_mbox_req_hdr *)hdr)->type) +#define HQM_MBOX_CMD_STRING(hdr) \ + hqm_mbox_cmd_type_strings[HQM_MBOX_CMD_TYPE(hdr)] + +enum hqm_mbox_status_type { + HQM_MBOX_ST_SUCCESS, + HQM_MBOX_ST_INVALID_CMD_TYPE, + HQM_MBOX_ST_VERSION_MISMATCH, + HQM_MBOX_ST_EXPECTED_PHASE_ONE, + HQM_MBOX_ST_EXPECTED_PHASE_TWO, + HQM_MBOX_ST_INVALID_OWNER_VF, +}; + +static const char hqm_mbox_status_type_strings[][128] = { + "HQM_MBOX_ST_SUCCESS", + "HQM_MBOX_ST_INVALID_CMD_TYPE", + "HQM_MBOX_ST_VERSION_MISMATCH", + "HQM_MBOX_ST_EXPECTED_PHASE_ONE", + "HQM_MBOX_ST_EXPECTED_PHASE_TWO", + "HQM_MBOX_ST_INVALID_OWNER_VF", +}; + +#define HQM_MBOX_ST_TYPE(hdr) \ + (((struct hqm_mbox_resp_hdr *)hdr)->status) +#define HQM_MBOX_ST_STRING(hdr) \ + hqm_mbox_status_type_strings[HQM_MBOX_ST_TYPE(hdr)] + +/* This structure is always the first field in a request structure */ +struct hqm_mbox_req_hdr { + u32 type; +}; + +/* This structure is always the first field in a response structure */ +struct hqm_mbox_resp_hdr { + u32 status; +}; + +struct hqm_mbox_register_cmd_req { + struct hqm_mbox_req_hdr hdr; + u32 interface_version; +}; + +struct hqm_mbox_register_cmd_resp { + struct hqm_mbox_resp_hdr hdr; + u32 interface_version; + u8 pf_id; + u8 vf_id; + u8 is_auxiliary_vf; + u8 primary_vf_id; + u32 padding; +}; + +struct hqm_mbox_unregister_cmd_req { + struct hqm_mbox_req_hdr hdr; + u32 padding; +}; + +struct hqm_mbox_unregister_cmd_resp { + struct hqm_mbox_resp_hdr hdr; + u32 padding; +}; + +struct hqm_mbox_get_num_resources_cmd_req { + struct hqm_mbox_req_hdr hdr; + u32 padding; +}; + +struct hqm_mbox_get_num_resources_cmd_resp { + struct hqm_mbox_resp_hdr hdr; + u32 error_code; + u16 num_sched_domains; + u16 num_ldb_queues; + u16 num_ldb_ports; + u16 num_dir_ports; + u16 padding0; + u8 num_ldb_credit_pools; + u8 num_dir_credit_pools; + u32 num_atomic_inflights; + u32 max_contiguous_atomic_inflights; + u32 num_hist_list_entries; + u32 max_contiguous_hist_list_entries; + u16 num_ldb_credits; + u16 max_contiguous_ldb_credits; + u16 num_dir_credits; + u16 max_contiguous_dir_credits; + u32 padding1; +}; + +struct hqm_mbox_create_sched_domain_cmd_req { + struct hqm_mbox_req_hdr hdr; + u32 num_ldb_queues; + u32 num_ldb_ports; + u32 num_dir_ports; + u32 num_atomic_inflights; + u32 num_hist_list_entries; + u32 num_ldb_credits; + u32 num_dir_credits; + u32 num_ldb_credit_pools; + u32 num_dir_credit_pools; + char name[HQM_MAX_NAME_LEN]; +}; + +struct hqm_mbox_create_sched_domain_cmd_resp { + struct hqm_mbox_resp_hdr hdr; + u32 error_code; + u32 status; + u32 id; +}; + +struct hqm_mbox_reset_sched_domain_cmd_req { + struct hqm_mbox_req_hdr hdr; + u32 id; +}; + +struct hqm_mbox_reset_sched_domain_cmd_resp { + struct hqm_mbox_resp_hdr hdr; + u32 error_code; +}; + +struct hqm_mbox_create_credit_pool_cmd_req { + struct hqm_mbox_req_hdr hdr; + u32 domain_id; + u32 num_credits; + u32 padding; +}; + +struct hqm_mbox_create_credit_pool_cmd_resp { + struct hqm_mbox_resp_hdr hdr; + u32 error_code; + u32 status; + u32 id; +}; + +struct hqm_mbox_create_ldb_queue_cmd_req { + struct hqm_mbox_req_hdr hdr; + u32 domain_id; + u32 num_sequence_numbers; + u32 num_qid_inflights; + u32 num_atomic_inflights; + u32 padding; + char name[HQM_MAX_NAME_LEN]; +}; + +struct hqm_mbox_create_ldb_queue_cmd_resp { + struct hqm_mbox_resp_hdr hdr; + u32 error_code; + u32 status; + u32 id; +}; + +struct hqm_mbox_create_dir_queue_cmd_req { + struct hqm_mbox_req_hdr hdr; + u32 domain_id; + u32 port_id; + u32 padding0; +}; + +struct hqm_mbox_create_dir_queue_cmd_resp { + struct hqm_mbox_resp_hdr hdr; + u32 error_code; + u32 status; + u32 id; +}; + +struct hqm_mbox_create_ldb_port_cmd_req { + struct hqm_mbox_req_hdr hdr; + u32 domain_id; + u32 ldb_credit_pool_id; + u32 dir_credit_pool_id; + u64 pop_count_address; + u16 ldb_credit_high_watermark; + u16 ldb_credit_low_watermark; + u16 ldb_credit_quantum; + u16 dir_credit_high_watermark; + u16 dir_credit_low_watermark; + u16 dir_credit_quantum; + u32 padding0; + u16 cq_depth; + u16 cq_history_list_size; + u32 padding1; + u64 cq_base_address; + u64 nq_base_address; + u32 nq_size; + u32 padding2; +}; + +struct hqm_mbox_create_ldb_port_cmd_resp { + struct hqm_mbox_resp_hdr hdr; + u32 error_code; + u32 status; + u32 id; +}; + +struct hqm_mbox_create_dir_port_cmd_req { + struct hqm_mbox_req_hdr hdr; + u32 domain_id; + u32 ldb_credit_pool_id; + u32 dir_credit_pool_id; + u64 pop_count_address; + u16 ldb_credit_high_watermark; + u16 ldb_credit_low_watermark; + u16 ldb_credit_quantum; + u16 dir_credit_high_watermark; + u16 dir_credit_low_watermark; + u16 dir_credit_quantum; + u16 cq_depth; + u16 padding0; + u64 cq_base_address; + s32 queue_id; + u32 padding1; +}; + +struct hqm_mbox_create_dir_port_cmd_resp { + struct hqm_mbox_resp_hdr hdr; + u32 error_code; + u32 status; + u32 id; +}; + +struct hqm_mbox_enable_ldb_port_cmd_req { + struct hqm_mbox_req_hdr hdr; + u32 domain_id; + u32 port_id; + u32 padding; +}; + +struct hqm_mbox_enable_ldb_port_cmd_resp { + struct hqm_mbox_resp_hdr hdr; + u32 error_code; + u32 status; + u32 padding; +}; + +struct hqm_mbox_disable_ldb_port_cmd_req { + struct hqm_mbox_req_hdr hdr; + u32 domain_id; + u32 port_id; + u32 padding; +}; + +struct hqm_mbox_disable_ldb_port_cmd_resp { + struct hqm_mbox_resp_hdr hdr; + u32 error_code; + u32 status; + u32 padding; +}; + +struct hqm_mbox_enable_dir_port_cmd_req { + struct hqm_mbox_req_hdr hdr; + u32 domain_id; + u32 port_id; + u32 padding; +}; + +struct hqm_mbox_enable_dir_port_cmd_resp { + struct hqm_mbox_resp_hdr hdr; + u32 error_code; + u32 status; + u32 padding; +}; + +struct hqm_mbox_disable_dir_port_cmd_req { + struct hqm_mbox_req_hdr hdr; + u32 domain_id; + u32 port_id; + u32 padding; +}; + +struct hqm_mbox_disable_dir_port_cmd_resp { + struct hqm_mbox_resp_hdr hdr; + u32 error_code; + u32 status; + u32 padding; +}; + +struct hqm_mbox_ldb_port_owned_by_domain_cmd_req { + struct hqm_mbox_req_hdr hdr; + u32 domain_id; + u32 port_id; + u32 padding; +}; + +struct hqm_mbox_ldb_port_owned_by_domain_cmd_resp { + struct hqm_mbox_resp_hdr hdr; + s32 owned; +}; + +struct hqm_mbox_dir_port_owned_by_domain_cmd_req { + struct hqm_mbox_req_hdr hdr; + u32 domain_id; + u32 port_id; + u32 padding; +}; + +struct hqm_mbox_dir_port_owned_by_domain_cmd_resp { + struct hqm_mbox_resp_hdr hdr; + s32 owned; +}; + +struct hqm_mbox_map_qid_cmd_req { + struct hqm_mbox_req_hdr hdr; + u32 domain_id; + u32 port_id; + u32 qid; + u32 priority; + u32 padding0; +}; + +struct hqm_mbox_map_qid_cmd_resp { + struct hqm_mbox_resp_hdr hdr; + u32 error_code; + u32 status; + u32 id; +}; + +struct hqm_mbox_unmap_qid_cmd_req { + struct hqm_mbox_req_hdr hdr; + u32 domain_id; + u32 port_id; + u32 qid; +}; + +struct hqm_mbox_unmap_qid_cmd_resp { + struct hqm_mbox_resp_hdr hdr; + u32 error_code; + u32 status; + u32 padding; +}; + +struct hqm_mbox_start_domain_cmd_req { + struct hqm_mbox_req_hdr hdr; + u32 domain_id; +}; + +struct hqm_mbox_start_domain_cmd_resp { + struct hqm_mbox_resp_hdr hdr; + u32 error_code; + u32 status; + u32 padding; +}; + +struct hqm_mbox_enable_ldb_port_intr_cmd_req { + struct hqm_mbox_req_hdr hdr; + u16 port_id; + u16 thresh; + u16 vector; + u16 owner_vf; + u16 reserved[2]; +}; + +struct hqm_mbox_enable_ldb_port_intr_cmd_resp { + struct hqm_mbox_resp_hdr hdr; + u32 error_code; + u32 status; + u32 padding0; +}; + +struct hqm_mbox_enable_dir_port_intr_cmd_req { + struct hqm_mbox_req_hdr hdr; + u16 port_id; + u16 thresh; + u16 vector; + u16 owner_vf; + u16 reserved[2]; +}; + +struct hqm_mbox_enable_dir_port_intr_cmd_resp { + struct hqm_mbox_resp_hdr hdr; + u32 error_code; + u32 status; + u32 padding0; +}; + +struct hqm_mbox_arm_cq_intr_cmd_req { + struct hqm_mbox_req_hdr hdr; + u32 domain_id; + u32 port_id; + u32 is_ldb; +}; + +struct hqm_mbox_arm_cq_intr_cmd_resp { + struct hqm_mbox_resp_hdr hdr; + u32 error_code; + u32 status; + u32 padding0; +}; + +/* The alert_id and aux_alert_data follows the format of the alerts defined in + * hqm_types.h. The alert id contains an enum hqm_domain_alert_id value, and + * the aux_alert_data value varies depending on the alert. + */ +struct hqm_mbox_vf_alert_cmd_req { + struct hqm_mbox_req_hdr hdr; + u32 domain_id; + u32 alert_id; + u32 aux_alert_data; +}; + +enum hqm_mbox_vf_notification_type { + HQM_MBOX_VF_NOTIFICATION_PRE_RESET, + HQM_MBOX_VF_NOTIFICATION_POST_RESET, + + /* NUM_HQM_MBOX_VF_NOTIFICATION_TYPES must be last */ + NUM_HQM_MBOX_VF_NOTIFICATION_TYPES, +}; + +struct hqm_mbox_vf_notification_cmd_req { + struct hqm_mbox_req_hdr hdr; + u32 notification; +}; + +struct hqm_mbox_vf_in_use_cmd_req { + struct hqm_mbox_req_hdr hdr; + u32 padding; +}; + +struct hqm_mbox_vf_in_use_cmd_resp { + struct hqm_mbox_resp_hdr hdr; + u32 in_use; +}; + +struct hqm_mbox_init_cq_sched_count_measure_cmd_req { + struct hqm_mbox_req_hdr hdr; + u32 duration_us; +}; + +struct hqm_mbox_init_cq_sched_count_measure_cmd_resp { + struct hqm_mbox_resp_hdr hdr; + u32 error_code; +}; + +struct hqm_mbox_collect_cq_sched_count_cmd_req { + struct hqm_mbox_req_hdr hdr; + u16 cq_id; + u16 is_ldb; +}; + +struct hqm_mbox_collect_cq_sched_count_cmd_resp { + struct hqm_mbox_resp_hdr hdr; + u32 error_code; + u32 elapsed; + u32 padding; + u64 cq_sched_count; +}; + +struct hqm_mbox_ack_vf_flr_done_cmd_req { + struct hqm_mbox_req_hdr hdr; + u32 padding; +}; + +struct hqm_mbox_ack_vf_flr_done_cmd_resp { + struct hqm_mbox_resp_hdr hdr; + u32 error_code; + u32 status; + u32 padding; +}; + +struct hqm_mbox_get_sn_allocation_cmd_req { + struct hqm_mbox_req_hdr hdr; + u32 group_id; +}; + +struct hqm_mbox_get_sn_allocation_cmd_resp { + struct hqm_mbox_resp_hdr hdr; + u32 num; +}; + +struct hqm_mbox_get_ldb_queue_depth_cmd_req { + struct hqm_mbox_req_hdr hdr; + u32 domain_id; + u32 queue_id; + u32 padding; +}; + +struct hqm_mbox_get_ldb_queue_depth_cmd_resp { + struct hqm_mbox_resp_hdr hdr; + u32 error_code; + u32 status; + u32 depth; +}; + +struct hqm_mbox_get_dir_queue_depth_cmd_req { + struct hqm_mbox_req_hdr hdr; + u32 domain_id; + u32 queue_id; + u32 padding; +}; + +struct hqm_mbox_get_dir_queue_depth_cmd_resp { + struct hqm_mbox_resp_hdr hdr; + u32 error_code; + u32 status; + u32 depth; +}; + +struct hqm_mbox_pending_port_unmaps_cmd_req { + struct hqm_mbox_req_hdr hdr; + u32 domain_id; + u32 port_id; + u32 padding; +}; + +struct hqm_mbox_pending_port_unmaps_cmd_resp { + struct hqm_mbox_resp_hdr hdr; + u32 error_code; + u32 status; + u32 num; +}; + +struct hqm_mbox_query_cq_poll_mode_cmd_req { + struct hqm_mbox_req_hdr hdr; + u32 padding; +}; + +struct hqm_mbox_query_cq_poll_mode_cmd_resp { + struct hqm_mbox_resp_hdr hdr; + u32 error_code; + u32 status; + u32 mode; +}; + +struct hqm_mbox_get_sn_occupancy_cmd_req { + struct hqm_mbox_req_hdr hdr; + u32 group_id; +}; + +struct hqm_mbox_get_sn_occupancy_cmd_resp { + struct hqm_mbox_resp_hdr hdr; + u32 num; +}; + +#endif /* __HQM_BASE_HQM_MBOX_H */ diff --git a/drivers/misc/hqm/hqm_mem.c b/drivers/misc/hqm/hqm_mem.c new file mode 100644 index 00000000000000..3ab1b6dd2e4606 --- /dev/null +++ b/drivers/misc/hqm/hqm_mem.c @@ -0,0 +1,67 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2017-2019 Intel Corporation */ + +#include +#include +#include + +#include "hqm_main.h" +#include + +#include "hqm_mem.h" + +void hqm_release_domain_memory(struct hqm_dev *dev, u32 domain_id) +{ + struct hqm_port_memory *port_mem; + struct hqm_domain_dev *domain; + int i; + + domain = &dev->sched_domains[domain_id]; + + HQM_INFO(dev->hqm_device, + "Releasing pages pinned for domain %d\n", domain_id); + + for (i = 0; i < HQM_MAX_NUM_LDB_PORTS; i++) { + port_mem = &dev->ldb_port_mem[i]; + + if (port_mem->domain_id == domain_id && port_mem->valid) { + dma_free_coherent(&dev->pdev->dev, + HQM_LDB_CQ_MAX_SIZE, + port_mem->cq_base, + port_mem->cq_dma_base); + + dma_free_coherent(&dev->pdev->dev, + PAGE_SIZE, + port_mem->pc_base, + port_mem->pc_dma_base); + + port_mem->valid = false; + } + } + + for (i = 0; i < HQM_MAX_NUM_DIR_PORTS; i++) { + port_mem = &dev->dir_port_mem[i]; + + if (port_mem->domain_id == domain_id && port_mem->valid) { + dma_free_coherent(&dev->pdev->dev, + HQM_DIR_CQ_MAX_SIZE, + port_mem->cq_base, + port_mem->cq_dma_base); + + dma_free_coherent(&dev->pdev->dev, + PAGE_SIZE, + port_mem->pc_base, + port_mem->pc_dma_base); + + port_mem->valid = false; + } + } +} + +void hqm_release_device_memory(struct hqm_dev *dev) +{ + int i; + + for (i = 0; i < HQM_MAX_NUM_DOMAINS; i++) + hqm_release_domain_memory(dev, i); +} diff --git a/drivers/misc/hqm/hqm_mem.h b/drivers/misc/hqm/hqm_mem.h new file mode 100644 index 00000000000000..74f87983ac759b --- /dev/null +++ b/drivers/misc/hqm/hqm_mem.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * Copyright(c) 2017-2019 Intel Corporation + */ + +#ifndef __HQM_MEM_H +#define __HQM_MEM_H + +#include "hqm_main.h" +#include + +void hqm_release_domain_memory(struct hqm_dev *dev, u32 domain_id); + +void hqm_release_device_memory(struct hqm_dev *dev); + +#endif /* __HQM_MEM_H */ diff --git a/drivers/misc/hqm/hqm_osdep.h b/drivers/misc/hqm/hqm_osdep.h new file mode 100644 index 00000000000000..57ee3ff60bd13f --- /dev/null +++ b/drivers/misc/hqm/hqm_osdep.h @@ -0,0 +1,320 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * Copyright(c) 2017-2019 Intel Corporation + */ + +#ifndef __HQM_OSDEP_H +#define __HQM_OSDEP_H + +#include +#include +#include +#include +#include +#include +#include "hqm_main.h" +#include "hqm_resource.h" +#include + +#define HQM_PCI_REG_READ(addr) ioread32(addr) +#define HQM_PCI_REG_WRITE(reg, value) iowrite32(value, reg) + +/* Read/write register 'reg' in the CSR BAR space */ +#define HQM_CSR_REG_ADDR(a, reg) ((a)->csr_kva + (reg)) +#define HQM_CSR_RD(hw, reg) \ + HQM_PCI_REG_READ(HQM_CSR_REG_ADDR((hw), (reg))) +#define HQM_CSR_WR(hw, reg, value) \ + HQM_PCI_REG_WRITE(HQM_CSR_REG_ADDR((hw), (reg)), (value)) + +/* Read/write register 'reg' in the func BAR space */ +#define HQM_FUNC_REG_ADDR(a, reg) ((a)->func_kva + (reg)) +#define HQM_FUNC_RD(hw, reg) \ + HQM_PCI_REG_READ(HQM_FUNC_REG_ADDR((hw), (reg))) +#define HQM_FUNC_WR(hw, reg, value) \ + HQM_PCI_REG_WRITE(HQM_FUNC_REG_ADDR((hw), (reg)), (value)) + +/* Macros that prevent the compiler from optimizing away memory accesses */ +#define OS_READ_ONCE(x) READ_ONCE(x) +#define OS_WRITE_ONCE(x, y) WRITE_ONCE(x, y) + +/** + * os_udelay() - busy-wait for a number of microseconds + * @usecs: delay duration. + */ +static inline void os_udelay(int usecs) +{ + udelay(usecs); +} + +/** + * os_msleep() - sleep for a number of milliseconds + * @usecs: delay duration. + */ +static inline void os_msleep(int msecs) +{ + msleep(msecs); +} + +/** + * os_map_producer_port() - map a producer port into the caller's address space + * @hw: hqm_hw handle for a particular device. + * @port_id: port ID + * @is_ldb: true for load-balanced port, false for a directed port + * + * This function maps the requested producer port memory into the caller's + * address space. + * + * Return: + * Returns the base address at which the PP memory was mapped, else NULL. + */ +static inline void __iomem *os_map_producer_port(struct hqm_hw *hw, + u8 port_id, + bool is_ldb) +{ + struct hqm_dev *hqm_dev; + unsigned long size; + uintptr_t address; + + hqm_dev = container_of(hw, struct hqm_dev, hw); + + address = hqm_dev->hw.func_phys_addr; + + if (is_ldb) { + address += HQM_LDB_PP_BASE + (HQM_LDB_PP_STRIDE * port_id); + size = HQM_LDB_PP_STRIDE; + } else { + address += HQM_DIR_PP_BASE + (HQM_DIR_PP_STRIDE * port_id); + size = HQM_DIR_PP_STRIDE; + } + + return devm_ioremap(&hqm_dev->pdev->dev, address, size); +} + +/** + * os_unmap_producer_port() - unmap a producer port + * @addr: mapped producer port address + * + * This function undoes os_map_producer_port() by unmapping the producer port + * memory from the caller's address space. + * + * Return: + * Returns the base address at which the PP memory was mapped, else NULL. + */ +static inline void os_unmap_producer_port(struct hqm_hw *hw, void __iomem *addr) +{ + struct hqm_dev *hqm_dev; + + hqm_dev = container_of(hw, struct hqm_dev, hw); + + devm_iounmap(&hqm_dev->pdev->dev, addr); +} + +/** + * os_enqueue_four_hcws() - enqueue four HCWs to HQM + * @hw: hqm_hw handle for a particular device. + * @hcw: pointer to the 64B-aligned contiguous HCW memory + * @addr: producer port address + */ +static inline void os_enqueue_four_hcws(struct hqm_hw *hw, + struct hqm_hcw *hcw, + void __iomem *addr) +{ + struct hqm_dev *hqm_dev; + + hqm_dev = container_of(hw, struct hqm_dev, hw); + + hqm_dev->enqueue_four(hcw, addr); +} + +/** + * os_fence_hcw() - fence an HCW to ensure it arrives at the device + * @hw: hqm_hw handle for a particular device. + * @pp_addr: producer port address + */ +static inline void os_fence_hcw(struct hqm_hw *hw, void __iomem *pp_addr) +{ + /* To ensure outstanding HCWs reach the device, read the PP address. IA + * memory ordering prevents reads from passing older writes, and the + * mfence also ensures this. + */ + mb(); + + READ_ONCE(pp_addr); +} + +/** + * os_notify_user_space() - notify user space + * @hw: hqm_hw handle for a particular device. + * @domain_id: ID of domain to notify. + * @alert_id: alert ID. + * @aux_alert_data: additional alert data. + * + * This function notifies user space of an alert (such as a remote queue + * unregister or hardware alarm). + * + * Return: + * Returns 0 upon success, <0 otherwise. + */ +static inline int os_notify_user_space(struct hqm_hw *hw, + u32 domain_id, + u64 alert_id, + u64 aux_alert_data) +{ + struct hqm_domain_dev *domain_dev; + struct hqm_domain_alert alert; + struct hqm_dev *hqm_dev; + + hqm_dev = container_of(hw, struct hqm_dev, hw); + + domain_dev = &hqm_dev->sched_domains[domain_id]; + + if (hw->domains[domain_id].id.vf_owned) { + WARN_ON(hw->domains[domain_id].id.vf_owned); + return -EINVAL; + } + + /* Grab the alert mutex to access the read and write indexes */ + if (mutex_lock_interruptible(&domain_dev->alert_mutex)) + return -ERESTARTSYS; + + /* If there's no space for this notification, return */ + if ((domain_dev->alert_wr_idx - domain_dev->alert_rd_idx) == + (HQM_DOMAIN_ALERT_RING_SIZE - 1)) { + mutex_unlock(&domain_dev->alert_mutex); + return 0; + } + + alert.alert_id = alert_id; + alert.aux_alert_data = aux_alert_data; + + domain_dev->alerts[domain_dev->alert_wr_idx++] = alert; + + mutex_unlock(&domain_dev->alert_mutex); + + /* Wake any blocked readers */ + wake_up_interruptible(&domain_dev->wq_head); + + return 0; +} + +/** + * HQM_BASE_ERR() - log an error message + * @hqm: hqm_hw handle for a particular device. + * @...: variable string args. + */ +#define HQM_BASE_ERR(hqm, ...) do { \ + struct hqm_dev *dev; \ + dev = container_of(hqm, struct hqm_dev, hw); \ + HQM_ERR(dev->hqm_device, __VA_ARGS__); \ +} while (0) + +/** + * HQM_BASE_INFO() - log an info message + * @hqm: hqm_hw handle for a particular device. + * @...: variable string args. + */ +#define HQM_BASE_INFO(hqm, ...) do { \ + struct hqm_dev *dev; \ + dev = container_of(hqm, struct hqm_dev, hw); \ + HQM_INFO(dev->hqm_device, __VA_ARGS__); \ +} while (0) + +/*** Workqueue scheduling functions ***/ + +/* The workqueue callback runs until it completes all outstanding QID->CQ + * map and unmap requests. To prevent deadlock, this function gives other + * threads a chance to grab the resource mutex and configure hardware. + */ +static void hqm_complete_queue_map_unmap(struct work_struct *work) +{ + struct hqm_dev *hqm_dev; + int ret; + + hqm_dev = container_of(work, struct hqm_dev, work); + + mutex_lock(&hqm_dev->resource_mutex); + + ret = hqm_finish_unmap_qid_procedures(&hqm_dev->hw); + ret += hqm_finish_map_qid_procedures(&hqm_dev->hw); + + if (ret != 0) + /* Relinquish the CPU so the application can process its CQs, + * so this function doesn't deadlock. + */ + queue_work(hqm_dev->wq, &hqm_dev->work); + else + hqm_dev->worker_launched = false; + + mutex_unlock(&hqm_dev->resource_mutex); +} + +/** + * os_schedule_work() - launch a thread to process pending map and unmap work + * @hw: hqm_hw handle for a particular device. + * + * This function launches a kernel thread that will run until all pending + * map and unmap procedures are complete. + */ +static inline void os_schedule_work(struct hqm_hw *hw) +{ + struct hqm_dev *hqm_dev; + + hqm_dev = container_of(hw, struct hqm_dev, hw); + + INIT_WORK(&hqm_dev->work, hqm_complete_queue_map_unmap); + + queue_work(hqm_dev->wq, &hqm_dev->work); + + hqm_dev->worker_launched = true; +} + +/** + * os_worker_active() - query whether the map/unmap worker thread is active + * @hw: hqm_hw handle for a particular device. + * + * This function returns a boolean indicating whether a thread (launched by + * os_schedule_work()) is active. This function is used to determine + * whether or not to launch a worker thread. + */ +static inline bool os_worker_active(struct hqm_hw *hw) +{ + struct hqm_dev *hqm_dev; + + hqm_dev = container_of(hw, struct hqm_dev, hw); + + return hqm_dev->worker_launched; +} + +enum hqm_dev_revision { + HQM_A0, + HQM_A1, + HQM_A2, + HQM_A3, + HQM_B0, +}; + +/** + * os_get_dev_revision() - query the device_revision + * @hw: hqm_hw handle for a particular device. + */ +static inline enum hqm_dev_revision os_get_dev_revision(struct hqm_hw *hw) +{ + struct hqm_dev *hqm_dev = container_of(hw, struct hqm_dev, hw); + + if (hqm_dev->revision == HQM_REV_A0) + return HQM_A0; + if (hqm_dev->revision == HQM_REV_A1) + return HQM_A1; + if (hqm_dev->revision == HQM_REV_A2) + return HQM_A2; + if (hqm_dev->revision == HQM_REV_A3) + return HQM_A3; + if (hqm_dev->revision == HQM_REV_B0) + return HQM_B0; + + /* Internal error. Report it and assume the most limited revision. */ + HQM_BASE_ERR(hw, "Internal error: invalid device revision\n"); + + return HQM_A0; +} + +#endif /* __HQM_OSDEP_H */ diff --git a/drivers/misc/hqm/hqm_osdep_bitmap.h b/drivers/misc/hqm/hqm_osdep_bitmap.h new file mode 100644 index 00000000000000..5f4f903092a34d --- /dev/null +++ b/drivers/misc/hqm/hqm_osdep_bitmap.h @@ -0,0 +1,403 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * Copyright(c) 2017-2019 Intel Corporation + */ + +#ifndef __HQM_OSDEP_BITMAP_H +#define __HQM_OSDEP_BITMAP_H + +#include +#include +#include "hqm_main.h" + +/*************************/ +/*** Bitmap operations ***/ +/*************************/ +struct hqm_bitmap { + unsigned long *map; + unsigned int len; + struct hqm_hw *hw; +}; + +/** + * hqm_bitmap_alloc() - alloc a bitmap data structure + * @bitmap: pointer to hqm_bitmap structure pointer. + * @len: number of entries in the bitmap. + * + * This function allocates a bitmap and initializes it with length @len. All + * entries are initially zero. + * + * Return: + * Returns 0 upon success, < 0 otherwise. + * + * Errors: + * EINVAL - bitmap is NULL or len is 0. + * ENOMEM - could not allocate memory for the bitmap data structure. + */ +static inline int hqm_bitmap_alloc(struct hqm_hw *hw, + struct hqm_bitmap **bitmap, + unsigned int len) +{ + struct hqm_bitmap *bm; + struct hqm_dev *dev; + + dev = container_of(hw, struct hqm_dev, hw); + + if (!bitmap || len == 0) + return -EINVAL; + + bm = devm_kmalloc(&dev->pdev->dev, + sizeof(struct hqm_bitmap), + GFP_KERNEL); + if (!bm) + return -ENOMEM; + + bm->map = devm_kmalloc_array(&dev->pdev->dev, + BITS_TO_LONGS(len), + sizeof(unsigned long), + GFP_KERNEL); + if (!bm->map) + return -ENOMEM; + + bm->len = len; + bm->hw = hw; + + *bitmap = bm; + + return 0; +} + +/** + * hqm_bitmap_free() - free a previously allocated bitmap data structure + * @bitmap: pointer to hqm_bitmap structure. + * + * This function frees a bitmap that was allocated with hqm_bitmap_alloc(). + */ +static inline void hqm_bitmap_free(struct hqm_bitmap *bitmap) +{ + struct hqm_dev *dev; + + if (!bitmap) + return; + + dev = container_of(bitmap->hw, struct hqm_dev, hw); + + devm_kfree(&dev->pdev->dev, bitmap->map); + + devm_kfree(&dev->pdev->dev, bitmap); +} + +/** + * hqm_bitmap_fill() - fill a bitmap with all 1s + * @bitmap: pointer to hqm_bitmap structure. + * + * This function sets all bitmap values to 1. + * + * Return: + * Returns 0 upon success, < 0 otherwise. + * + * Errors: + * EINVAL - bitmap is NULL or is uninitialized. + */ +static inline int hqm_bitmap_fill(struct hqm_bitmap *bitmap) +{ + if (!bitmap || !bitmap->map) + return -EINVAL; + + bitmap_fill(bitmap->map, bitmap->len); + + return 0; +} + +/** + * hqm_bitmap_fill() - fill a bitmap with all 0s + * @bitmap: pointer to hqm_bitmap structure. + * + * This function sets all bitmap values to 0. + * + * Return: + * Returns 0 upon success, < 0 otherwise. + * + * Errors: + * EINVAL - bitmap is NULL or is uninitialized. + */ +static inline int hqm_bitmap_zero(struct hqm_bitmap *bitmap) +{ + if (!bitmap || !bitmap->map) + return -EINVAL; + + bitmap_zero(bitmap->map, bitmap->len); + + return 0; +} + +/** + * hqm_bitmap_set() - set a bitmap entry + * @bitmap: pointer to hqm_bitmap structure. + * @bit: bit index. + * + * Return: + * Returns 0 upon success, < 0 otherwise. + * + * Errors: + * EINVAL - bitmap is NULL or is uninitialized, or bit is larger than the + * bitmap length. + */ +static inline int hqm_bitmap_set(struct hqm_bitmap *bitmap, + unsigned int bit) +{ + if (!bitmap || !bitmap->map) + return -EINVAL; + + if (bitmap->len <= bit) + return -EINVAL; + + bitmap_set(bitmap->map, bit, 1); + + return 0; +} + +/** + * hqm_bitmap_set_range() - set a range of bitmap entries + * @bitmap: pointer to hqm_bitmap structure. + * @bit: starting bit index. + * @len: length of the range. + * + * Return: + * Returns 0 upon success, < 0 otherwise. + * + * Errors: + * EINVAL - bitmap is NULL or is uninitialized, or the range exceeds the bitmap + * length. + */ +static inline int hqm_bitmap_set_range(struct hqm_bitmap *bitmap, + unsigned int bit, + unsigned int len) +{ + if (!bitmap || !bitmap->map) + return -EINVAL; + + if (bitmap->len <= bit) + return -EINVAL; + + bitmap_set(bitmap->map, bit, len); + + return 0; +} + +/** + * hqm_bitmap_clear() - clear a bitmap entry + * @bitmap: pointer to hqm_bitmap structure. + * @bit: bit index. + * + * Return: + * Returns 0 upon success, < 0 otherwise. + * + * Errors: + * EINVAL - bitmap is NULL or is uninitialized, or bit is larger than the + * bitmap length. + */ +static inline int hqm_bitmap_clear(struct hqm_bitmap *bitmap, + unsigned int bit) +{ + if (!bitmap || !bitmap->map) + return -EINVAL; + + if (bitmap->len <= bit) + return -EINVAL; + + bitmap_clear(bitmap->map, bit, 1); + + return 0; +} + +/** + * hqm_bitmap_clear_range() - clear a range of bitmap entries + * @bitmap: pointer to hqm_bitmap structure. + * @bit: starting bit index. + * @len: length of the range. + * + * Return: + * Returns 0 upon success, < 0 otherwise. + * + * Errors: + * EINVAL - bitmap is NULL or is uninitialized, or the range exceeds the bitmap + * length. + */ +static inline int hqm_bitmap_clear_range(struct hqm_bitmap *bitmap, + unsigned int bit, + unsigned int len) +{ + if (!bitmap || !bitmap->map) + return -EINVAL; + + if (bitmap->len <= bit) + return -EINVAL; + + bitmap_clear(bitmap->map, bit, len); + + return 0; +} + +/** + * hqm_bitmap_find_set_bit_range() - find a range of set bits + * @bitmap: pointer to hqm_bitmap structure. + * @len: length of the range. + * + * This function looks for a range of set bits of length @len. + * + * Return: + * Returns the base bit index upon success, < 0 otherwise. + * + * Errors: + * ENOENT - unable to find a length *len* range of set bits. + * EINVAL - bitmap is NULL or is uninitialized, or len is invalid. + */ +static inline int hqm_bitmap_find_set_bit_range(struct hqm_bitmap *bitmap, + unsigned int len) +{ + struct hqm_bitmap *complement_mask = NULL; + int ret; + + if (!bitmap || !bitmap->map || len == 0) + return -EINVAL; + + if (bitmap->len < len) + return -ENOENT; + + ret = hqm_bitmap_alloc(bitmap->hw, &complement_mask, bitmap->len); + if (ret) + return ret; + + hqm_bitmap_zero(complement_mask); + + bitmap_complement(complement_mask->map, bitmap->map, bitmap->len); + + ret = bitmap_find_next_zero_area(complement_mask->map, + complement_mask->len, + 0, + len, + 0); + + hqm_bitmap_free(complement_mask); + + /* No set bit range of length len? */ + return (ret >= (int)bitmap->len) ? -ENOENT : ret; +} + +/** + * hqm_bitmap_find_set_bit() - find the first set bit + * @bitmap: pointer to hqm_bitmap structure. + * + * This function looks for a single set bit. + * + * Return: + * Returns the base bit index upon success, < 0 otherwise. + * + * Errors: + * ENOENT - the bitmap contains no set bits. + * EINVAL - bitmap is NULL or is uninitialized, or len is invalid. + */ +static inline int hqm_bitmap_find_set_bit(struct hqm_bitmap *bitmap) +{ + return hqm_bitmap_find_set_bit_range(bitmap, 1); +} + +/** + * hqm_bitmap_count() - returns the number of set bits + * @bitmap: pointer to hqm_bitmap structure. + * + * This function looks for a single set bit. + * + * Return: + * Returns the number of set bits upon success, <0 otherwise. + * + * Errors: + * EINVAL - bitmap is NULL or is uninitialized. + */ +static inline int hqm_bitmap_count(struct hqm_bitmap *bitmap) +{ + if (!bitmap || !bitmap->map) + return -EINVAL; + + return bitmap_weight(bitmap->map, bitmap->len); +} + +/** + * hqm_bitmap_longest_set_range() - returns longest contiguous range of set bits + * @bitmap: pointer to hqm_bitmap structure. + * + * Return: + * Returns the bitmap's longest contiguous range of of set bits upon success, + * <0 otherwise. + * + * Errors: + * EINVAL - bitmap is NULL or is uninitialized. + */ +static inline int hqm_bitmap_longest_set_range(struct hqm_bitmap *bitmap) +{ + unsigned int bits_per_long; + unsigned int i, j; + int max_len, len; + + if (!bitmap || !bitmap->map) + return -EINVAL; + + if (hqm_bitmap_count(bitmap) == 0) + return 0; + + max_len = 0; + len = 0; + bits_per_long = sizeof(unsigned long) * BITS_PER_BYTE; + + for (i = 0; i < BITS_TO_LONGS(bitmap->len); i++) { + for (j = 0; j < bits_per_long; j++) { + if ((i * bits_per_long + j) >= bitmap->len) + break; + + len = (test_bit(j, &bitmap->map[i])) ? len + 1 : 0; + + if (len > max_len) + max_len = len; + } + } + + return max_len; +} + +/** + * hqm_bitmap_or() - store the logical 'or' of two bitmaps into a third + * @dest: pointer to hqm_bitmap structure, which will contain the results of + * the 'or' of src1 and src2. + * @src1: pointer to hqm_bitmap structure, will be 'or'ed with src2. + * @src2: pointer to hqm_bitmap structure, will be 'or'ed with src1. + * + * This function 'or's two bitmaps together and stores the result in a third + * bitmap. The source and destination bitmaps can be the same. + * + * Return: + * Returns the number of set bits upon success, <0 otherwise. + * + * Errors: + * EINVAL - One of the bitmaps is NULL or is uninitialized. + */ +static inline int hqm_bitmap_or(struct hqm_bitmap *dest, + struct hqm_bitmap *src1, + struct hqm_bitmap *src2) +{ + unsigned int min; + + if (!dest || !dest->map || + !src1 || !src1->map || + !src2 || !src2->map) + return -EINVAL; + + min = dest->len; + min = (min > src1->len) ? src1->len : min; + min = (min > src2->len) ? src2->len : min; + + bitmap_or(dest->map, src1->map, src2->map, min); + + return 0; +} + +#endif /* __HQM_OSDEP_BITMAP_H */ diff --git a/drivers/misc/hqm/hqm_osdep_list.h b/drivers/misc/hqm/hqm_osdep_list.h new file mode 100644 index 00000000000000..26f022f1223941 --- /dev/null +++ b/drivers/misc/hqm/hqm_osdep_list.h @@ -0,0 +1,96 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * Copyright(c) 2017-2019 Intel Corporation + */ + +#ifndef __HQM_OSDEP_LIST_H +#define __HQM_OSDEP_LIST_H + +#include + +/***********************/ +/*** List operations ***/ +/***********************/ +struct hqm_list_head { + struct list_head list_head; +}; + +struct hqm_list_entry { + struct list_head list_head; +}; + +/** + * hqm_list_init_head() - initialize the head of a list + * @head: list head + */ +static inline void hqm_list_init_head(struct hqm_list_head *head) +{ + INIT_LIST_HEAD(&head->list_head); +} + +/** + * hqm_list_add() - add an entry to a list + * @head: list head + * @entry: new list entry + */ +static inline void hqm_list_add(struct hqm_list_head *head, + struct hqm_list_entry *entry) +{ + list_add(&entry->list_head, &head->list_head); +} + +/** + * hqm_list_del() - delete an entry from a list + * @entry: list entry + * @head: list head + */ +static inline void hqm_list_del(struct hqm_list_head __always_unused *head, + struct hqm_list_entry *entry) +{ + list_del(&entry->list_head); +} + +/** + * hqm_list_empty() - check if a list is empty + * @head: list head + * + * Return: + * Returns 1 if empty, 0 if not. + */ +static inline int hqm_list_empty(struct hqm_list_head *head) +{ + return list_empty(&head->list_head); +} + +/** + * HQM_LIST_HEAD() - retrieve the head of the list + * @head: list head + * @type: type of the list variable + * @name: name of the hqm_list_entry field within the containing struct + */ +#define HQM_LIST_HEAD(head, type, name) \ + list_first_entry_or_null(&(head).list_head, type, name.list_head) + +/** + * HQM_LIST_FOR_EACH() - iterate over a list + * @head: list head + * @ptr: pointer to struct containing a struct hqm_list_entry + * @name: name of the hqm_list_entry field within the containing struct + * @iter: iterator variable + */ +#define HQM_LIST_FOR_EACH(head, ptr, name, iter) \ + list_for_each_entry(ptr, &(head).list_head, name.list_head) + +/** + * HQM_LIST_FOR_EACH_SAFE() - iterate over a list. This loop works even if + * an element is removed from the list while processing it. + * @ptr: pointer to struct containing a struct hqm_list_entry + * @ptr_tmp: pointer to struct containing a struct hqm_list_entry (temporary) + * @hd: list head + * @name: name of the hqm_list_entry field within the containing struct + * @iter: iterator variable + * @iter_tmp: iterator variable (temporary) + */ +#define HQM_LIST_FOR_EACH_SAFE(hd, ptr, ptr_tmp, name, iter, iter_tmp) \ + list_for_each_entry_safe(ptr, ptr_tmp, &(hd).list_head, name.list_head) + +#endif /* __HQM_OSDEP_LIST_H */ diff --git a/drivers/misc/hqm/hqm_osdep_types.h b/drivers/misc/hqm/hqm_osdep_types.h new file mode 100644 index 00000000000000..a5d5dae1ed19f7 --- /dev/null +++ b/drivers/misc/hqm/hqm_osdep_types.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * Copyright(c) 2017-2019 Intel Corporation + */ + +#ifndef __HQM_OSDEP_TYPES_H +#define __HQM_OSDEP_TYPES_H + +#include + +#endif /* __HQM_OSDEP_TYPES_H */ diff --git a/drivers/misc/hqm/hqm_pf_ops.c b/drivers/misc/hqm/hqm_pf_ops.c new file mode 100644 index 00000000000000..81419d190e9d19 --- /dev/null +++ b/drivers/misc/hqm/hqm_pf_ops.c @@ -0,0 +1,1439 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2017-2019 Intel Corporation */ + +#include +#include +#include +#include + +#include "hqm_resource.h" +#include "hqm_ioctl.h" +#include "hqm_intr.h" +#include "hqm_dp_ops.h" +#include "hqm_osdep.h" +#include "hqm_regs.h" + +/***********************************/ +/****** Runtime PM management ******/ +/***********************************/ + +static void hqm_pf_pm_inc_refcnt(struct pci_dev *pdev, bool resume) +{ + if (resume) + /* Increment the device's usage count and immediately wake it + * if it was suspended. + */ + pm_runtime_get_sync(&pdev->dev); + else + pm_runtime_get_noresume(&pdev->dev); +} + +static void hqm_pf_pm_dec_refcnt(struct pci_dev *pdev) +{ + /* Decrement the device's usage count and suspend it if the + * count reaches zero. + */ + pm_runtime_put_sync_suspend(&pdev->dev); +} + +static bool hqm_pf_pm_status_suspended(struct pci_dev *pdev) +{ + return pm_runtime_status_suspended(&pdev->dev); +} + +/********************************/ +/****** PCI BAR management ******/ +/********************************/ + +static void hqm_pf_unmap_pci_bar_space(struct hqm_dev *hqm_dev, + struct pci_dev *pdev) +{ + pci_iounmap(pdev, hqm_dev->hw.csr_kva); + pci_iounmap(pdev, hqm_dev->hw.func_kva); +} + +static int hqm_pf_map_pci_bar_space(struct hqm_dev *hqm_dev, + struct pci_dev *pdev) +{ + int ret; + u32 reg; + + /* BAR 0: PF FUNC BAR space */ + hqm_dev->hw.func_kva = pci_iomap(pdev, 0, 0); + hqm_dev->hw.func_phys_addr = pci_resource_start(pdev, 0); + + if (!hqm_dev->hw.func_kva) { + HQM_ERR(&pdev->dev, "Cannot iomap BAR 0 (size %llu)\n", + pci_resource_len(pdev, 0)); + + return -EIO; + } + + HQM_INFO(&pdev->dev, "BAR 0 iomem base: %p\n", hqm_dev->hw.func_kva); + HQM_INFO(&pdev->dev, "BAR 0 start: 0x%llx\n", + pci_resource_start(pdev, 0)); + HQM_INFO(&pdev->dev, "BAR 0 len: %llu\n", pci_resource_len(pdev, 0)); + + /* BAR 2: PF CSR BAR space */ + hqm_dev->hw.csr_kva = pci_iomap(pdev, 2, 0); + hqm_dev->hw.csr_phys_addr = pci_resource_start(pdev, 2); + + if (!hqm_dev->hw.csr_kva) { + HQM_ERR(&pdev->dev, "Cannot iomap BAR 2 (size %llu)\n", + pci_resource_len(pdev, 2)); + + ret = -EIO; + goto pci_iomap_bar2_fail; + } + + HQM_INFO(&pdev->dev, "BAR 2 iomem base: %p\n", hqm_dev->hw.csr_kva); + HQM_INFO(&pdev->dev, "BAR 2 start: 0x%llx\n", + pci_resource_start(pdev, 2)); + HQM_INFO(&pdev->dev, "BAR 2 len: %llu\n", pci_resource_len(pdev, 2)); + + /* Do a test register read. A failure here could be due to a BIOS or + * device enumeration issue. + */ + reg = HQM_CSR_RD(&hqm_dev->hw, HQM_SYS_TOTAL_VAS); + if (reg != HQM_MAX_NUM_DOMAINS) { + HQM_ERR(&pdev->dev, + "Test MMIO access error (read 0x%x, expected 0x%x)\n", + reg, HQM_MAX_NUM_DOMAINS); + ret = -EIO; + goto mmio_read_fail; + } + + return 0; + +mmio_read_fail: + pci_iounmap(pdev, hqm_dev->hw.csr_kva); +pci_iomap_bar2_fail: + pci_iounmap(pdev, hqm_dev->hw.func_kva); + + return ret; +} + +#define HQM_LDB_CQ_BOUND HQM_LDB_CQ_OFFS(HQM_MAX_NUM_LDB_PORTS) +#define HQM_DIR_CQ_BOUND HQM_DIR_CQ_OFFS(HQM_MAX_NUM_DIR_PORTS) +#define HQM_LDB_PC_BOUND HQM_LDB_PC_OFFS(HQM_MAX_NUM_LDB_PORTS) +#define HQM_DIR_PC_BOUND HQM_DIR_PC_OFFS(HQM_MAX_NUM_DIR_PORTS) + +static int hqm_pf_mmap(struct file *f, + struct vm_area_struct *vma, + u32 domain_id) +{ + unsigned long bar_pgoff; + unsigned long offset; + struct hqm_dev *dev; + struct page *page; + pgprot_t pgprot; + u32 port_id; + + dev = container_of(f->f_inode->i_cdev, struct hqm_dev, cdev); + + offset = vma->vm_pgoff << PAGE_SHIFT; + + if (offset >= HQM_LDB_CQ_BASE && offset < HQM_LDB_CQ_BOUND) { + if ((vma->vm_end - vma->vm_start) != HQM_LDB_CQ_MAX_SIZE) + return -EINVAL; + + bar_pgoff = dev->hw.func_phys_addr >> PAGE_SHIFT; + + port_id = (offset - HQM_LDB_CQ_BASE) / HQM_LDB_CQ_MAX_SIZE; + + if (dev->ops->ldb_port_owned_by_domain(&dev->hw, + domain_id, + port_id) != 1) + return -EINVAL; + + page = virt_to_page(dev->ldb_port_mem[port_id].cq_base); + + return remap_pfn_range(vma, + vma->vm_start, + page_to_pfn(page), + vma->vm_end - vma->vm_start, + vma->vm_page_prot); + + } else if (offset >= HQM_DIR_CQ_BASE && offset < HQM_DIR_CQ_BOUND) { + if ((vma->vm_end - vma->vm_start) != HQM_DIR_CQ_MAX_SIZE) + return -EINVAL; + + bar_pgoff = dev->hw.func_phys_addr >> PAGE_SHIFT; + + port_id = (offset - HQM_DIR_CQ_BASE) / HQM_DIR_CQ_MAX_SIZE; + + if (dev->ops->dir_port_owned_by_domain(&dev->hw, + domain_id, + port_id) != 1) + return -EINVAL; + + page = virt_to_page(dev->dir_port_mem[port_id].cq_base); + + return remap_pfn_range(vma, + vma->vm_start, + page_to_pfn(page), + vma->vm_end - vma->vm_start, + vma->vm_page_prot); + + } else if (offset >= HQM_LDB_PC_BASE && offset < HQM_LDB_PC_BOUND) { + if ((vma->vm_end - vma->vm_start) != HQM_LDB_PC_MAX_SIZE) + return -EINVAL; + + bar_pgoff = dev->hw.func_phys_addr >> PAGE_SHIFT; + + port_id = (offset - HQM_LDB_PC_BASE) / HQM_LDB_PC_MAX_SIZE; + + if (dev->ops->ldb_port_owned_by_domain(&dev->hw, + domain_id, + port_id) != 1) + return -EINVAL; + + page = virt_to_page(dev->ldb_port_mem[port_id].pc_base); + + return remap_pfn_range(vma, + vma->vm_start, + page_to_pfn(page), + vma->vm_end - vma->vm_start, + vma->vm_page_prot); + + } else if (offset >= HQM_DIR_PC_BASE && offset < HQM_DIR_PC_BOUND) { + if ((vma->vm_end - vma->vm_start) != HQM_DIR_PC_MAX_SIZE) + return -EINVAL; + + bar_pgoff = dev->hw.func_phys_addr >> PAGE_SHIFT; + + port_id = (offset - HQM_DIR_PC_BASE) / HQM_DIR_PC_MAX_SIZE; + + if (dev->ops->dir_port_owned_by_domain(&dev->hw, + domain_id, + port_id) != 1) + return -EINVAL; + + page = virt_to_page(dev->dir_port_mem[port_id].pc_base); + + return remap_pfn_range(vma, + vma->vm_start, + page_to_pfn(page), + vma->vm_end - vma->vm_start, + vma->vm_page_prot); + + } else if (offset >= HQM_LDB_PP_BASE && offset < HQM_LDB_PP_BOUND) { + if ((vma->vm_end - vma->vm_start) != HQM_LDB_PP_MAX_SIZE) + return -EINVAL; + + bar_pgoff = dev->hw.func_phys_addr >> PAGE_SHIFT; + + port_id = (offset - HQM_LDB_PP_BASE) / HQM_LDB_PP_MAX_SIZE; + + if (dev->ops->ldb_port_owned_by_domain(&dev->hw, + domain_id, + port_id) != 1) + return -EINVAL; + + pgprot = pgprot_noncached(vma->vm_page_prot); + + } else if (offset >= HQM_DIR_PP_BASE && offset < HQM_DIR_PP_BOUND) { + if ((vma->vm_end - vma->vm_start) != HQM_DIR_PP_MAX_SIZE) + return -EINVAL; + + bar_pgoff = dev->hw.func_phys_addr >> PAGE_SHIFT; + + port_id = (offset - HQM_DIR_PP_BASE) / HQM_DIR_PP_MAX_SIZE; + + if (dev->ops->dir_port_owned_by_domain(&dev->hw, + domain_id, + port_id) != 1) + return -EINVAL; + + pgprot = pgprot_noncached(vma->vm_page_prot); + + } else { + return -EINVAL; + } + + return io_remap_pfn_range(vma, + vma->vm_start, + bar_pgoff + vma->vm_pgoff, + vma->vm_end - vma->vm_start, + pgprot); +} + +/**********************************/ +/****** Interrupt management ******/ +/**********************************/ + +/* The alarm handler logs the alarm syndrome and, for user-caused errors, + * reports the alarm to user-space through the per-domain device file interface. + * + * This function runs as a bottom-half handler because it can call printk + * and/or acquire a mutex. These alarms don't need to be handled immediately -- + * they represent a serious, unexpected error (either in hardware or software) + * that can't be recovered without restarting the application or resetting the + * device. The VF->PF operations are also non-trivial and require running in a + * bottom-half handler. + */ +static irqreturn_t hqm_alarm_handler(int irq, void *hdlr_ptr) +{ + struct hqm_dev *dev = (struct hqm_dev *)hdlr_ptr; + int id; + + id = irq - dev->intr.base_vector; + + mutex_lock(&dev->alarm_isr_mutex); + + HQM_INFO(dev->hqm_device, "HQM alarm %d fired\n", id); + + switch (id) { + case HQM_INT_ALARM: + hqm_process_alarm_interrupt(&dev->hw); + break; + case HQM_INT_VF_TO_PF_MBOX: + HQM_ERR(dev->hqm_device, + "[%s()] Internal error: unexpected IRQ", __func__); + break; + case HQM_INT_INGRESS_ERROR: + hqm_process_ingress_error_interrupt(&dev->hw); + break; + default: + HQM_ERR(dev->hqm_device, + "[%s()] Internal error: unexpected IRQ", __func__); + } + + mutex_unlock(&dev->alarm_isr_mutex); + + return IRQ_HANDLED; +} + +static const char *alarm_hdlr_names[HQM_PF_NUM_ALARM_INTERRUPT_VECTORS] = { + [HQM_INT_ALARM] = "hqm_alarm", + [HQM_INT_INGRESS_ERROR] = "hqm_ingress_err", +}; + +static int hqm_init_alarm_interrupts(struct hqm_dev *dev, struct pci_dev *pdev) +{ + int i, ret; + + for (i = 0; i < HQM_PF_NUM_ALARM_INTERRUPT_VECTORS; i++) { + ret = devm_request_threaded_irq(&pdev->dev, + pci_irq_vector(pdev, i), + NULL, + hqm_alarm_handler, + IRQF_ONESHOT, + alarm_hdlr_names[i], + dev); + if (ret) + return ret; + + dev->intr.isr_registered[i] = true; + } + + hqm_enable_alarm_interrupts(&dev->hw); + + return 0; +} + +static irqreturn_t hqm_compressed_cq_intr_handler(int irq, void *hdlr_ptr) +{ + struct hqm_dev *dev = (struct hqm_dev *)hdlr_ptr; + u32 ldb_cq_interrupts[HQM_MAX_NUM_LDB_PORTS / 32]; + u32 dir_cq_interrupts[HQM_MAX_NUM_DIR_PORTS / 32]; + int i; + + HQM_INFO(dev->hqm_device, "Entered ISR\n"); + + hqm_read_compressed_cq_intr_status(&dev->hw, + ldb_cq_interrupts, + dir_cq_interrupts); + + hqm_ack_compressed_cq_intr(&dev->hw, + ldb_cq_interrupts, + dir_cq_interrupts); + + for (i = 0; i < HQM_MAX_NUM_LDB_PORTS; i++) { + if (!(ldb_cq_interrupts[i / 32] & (1 << (i % 32)))) + continue; + + HQM_INFO(dev->hqm_device, "[%s()] Waking LDB port %d\n", + __func__, i); + + hqm_wake_thread(dev, &dev->intr.ldb_cq_intr[i], WAKE_CQ_INTR); + } + + for (i = 0; i < HQM_MAX_NUM_DIR_PORTS; i++) { + if (!(dir_cq_interrupts[i / 32] & (1 << (i % 32)))) + continue; + + HQM_INFO(dev->hqm_device, "[%s()] Waking DIR port %d\n", + __func__, i); + + hqm_wake_thread(dev, &dev->intr.dir_cq_intr[i], WAKE_CQ_INTR); + } + + return IRQ_HANDLED; +} + +static int hqm_init_compressed_mode_interrupts(struct hqm_dev *dev, + struct pci_dev *pdev) +{ + int ret, irq; + + irq = pci_irq_vector(pdev, HQM_PF_COMPRESSED_MODE_CQ_VECTOR_ID); + + ret = devm_request_irq(&pdev->dev, + irq, + hqm_compressed_cq_intr_handler, + 0, + "hqm_compressed_cq", + dev); + if (ret) + return ret; + + dev->intr.isr_registered[HQM_PF_COMPRESSED_MODE_CQ_VECTOR_ID] = true; + + dev->intr.mode = HQM_MSIX_MODE_COMPRESSED; + + hqm_set_msix_mode(&dev->hw, HQM_MSIX_MODE_COMPRESSED); + + return 0; +} + +static void hqm_pf_free_interrupts(struct hqm_dev *dev, struct pci_dev *pdev) +{ + int i; + + for (i = 0; i < dev->intr.num_vectors; i++) { + if (dev->intr.isr_registered[i]) + devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), dev); + } + + pci_free_irq_vectors(pdev); +} + +static int hqm_pf_init_interrupts(struct hqm_dev *dev, struct pci_dev *pdev) +{ + int ret, i, nvecs; + + /* HQM supports two modes for CQ interrupts: + * - "compressed mode": all CQ interrupts are packed into a single + * vector. The ISR reads six interrupt status registers to + * determine the source(s). + * - "packed mode" (unused): the hardware supports up to 64 vectors. If + * software requests more than 64 CQs to use interrupts, the + * driver will pack CQs onto the same vector. The application + * thread is responsible for checking the CQ depth when it is + * woken and returns to user-space, in case its CQ shares the + * vector and didn't cause the interrupt. + */ + + nvecs = HQM_PF_NUM_COMPRESSED_MODE_VECTORS; + + ret = pci_alloc_irq_vectors(pdev, nvecs, nvecs, PCI_IRQ_MSIX); + if (ret < 0) + return ret; + + dev->intr.num_vectors = ret; + dev->intr.base_vector = pci_irq_vector(pdev, 0); + + ret = hqm_init_alarm_interrupts(dev, pdev); + if (ret) { + hqm_pf_free_interrupts(dev, pdev); + return ret; + } + + ret = hqm_init_compressed_mode_interrupts(dev, pdev); + if (ret) { + hqm_pf_free_interrupts(dev, pdev); + return ret; + } + + /* Initialize per-CQ interrupt structures, such as wait queues + * that threads will wait on until the CQ's interrupt fires. + */ + for (i = 0; i < HQM_MAX_NUM_LDB_PORTS; i++) { + init_waitqueue_head(&dev->intr.ldb_cq_intr[i].wq_head); + mutex_init(&dev->intr.ldb_cq_intr[i].mutex); + } + + for (i = 0; i < HQM_MAX_NUM_DIR_PORTS; i++) { + init_waitqueue_head(&dev->intr.dir_cq_intr[i].wq_head); + mutex_init(&dev->intr.dir_cq_intr[i].mutex); + } + + return 0; +} + +/* If the device is reset during use, its interrupt registers need to be + * reinitialized. + */ +static void hqm_pf_reinit_interrupts(struct hqm_dev *dev) +{ + hqm_enable_alarm_interrupts(&dev->hw); + + hqm_set_msix_mode(&dev->hw, dev->intr.mode); +} + +static int hqm_pf_enable_ldb_cq_interrupts(struct hqm_dev *dev, + int id, + u16 thresh) +{ + int mode, vec; + + if (dev->intr.mode == HQM_MSIX_MODE_COMPRESSED) { + mode = HQM_CQ_ISR_MODE_MSIX; + vec = 0; + } else { + mode = HQM_CQ_ISR_MODE_MSIX; + vec = fls64(~dev->intr.packed_vector_bitmap) - 1; + dev->intr.packed_vector_bitmap |= (u64)1 << vec; + } + + dev->intr.ldb_cq_intr[id].disabled = false; + dev->intr.ldb_cq_intr[id].configured = true; + dev->intr.ldb_cq_intr[id].vector = vec; + + return hqm_configure_ldb_cq_interrupt(&dev->hw, id, vec, + mode, 0, 0, thresh); +} + +static int hqm_pf_enable_dir_cq_interrupts(struct hqm_dev *dev, + int id, + u16 thresh) +{ + int mode, vec; + + if (dev->intr.mode == HQM_MSIX_MODE_COMPRESSED) { + mode = HQM_CQ_ISR_MODE_MSIX; + vec = 0; + } else { + mode = HQM_CQ_ISR_MODE_MSIX; + vec = fls64(~dev->intr.packed_vector_bitmap) - 1; + dev->intr.packed_vector_bitmap |= (u64)1 << vec; + } + + dev->intr.dir_cq_intr[id].disabled = false; + dev->intr.dir_cq_intr[id].configured = true; + dev->intr.dir_cq_intr[id].vector = vec; + + return hqm_configure_dir_cq_interrupt(&dev->hw, id, vec, + mode, 0, 0, thresh); +} + +static int hqm_pf_arm_cq_interrupt(struct hqm_dev *dev, + int domain_id, + int port_id, + bool is_ldb) +{ + int ret; + + if (is_ldb) + ret = dev->ops->ldb_port_owned_by_domain(&dev->hw, + domain_id, + port_id); + else + ret = dev->ops->dir_port_owned_by_domain(&dev->hw, + domain_id, + port_id); + + if (ret != 1) + return -EINVAL; + + return hqm_arm_cq_interrupt(&dev->hw, port_id, is_ldb, false, 0); +} + +/*******************************/ +/****** Driver management ******/ +/*******************************/ + +static int hqm_pf_init_driver_state(struct hqm_dev *hqm_dev) +{ + int i; + + hqm_dev->wq = create_singlethread_workqueue("HQM queue remapper"); + if (!hqm_dev->wq) + return -EINVAL; + + if (movdir64b_supported()) { + hqm_dev->enqueue_four = hqm_movdir64b; + } else { +#ifdef CONFIG_AS_SSE2 + hqm_dev->enqueue_four = hqm_movntdq; +#else + HQM_ERR(hqm_dev->hqm_device, + "%s: Platforms without movdir64 must support SSE2\n", + hqm_driver_name); + goto enqueue_four_fail; +#endif + } + + /* Initialize software state */ + for (i = 0; i < HQM_MAX_NUM_DOMAINS; i++) { + struct hqm_domain_dev *domain = &hqm_dev->sched_domains[i]; + + mutex_init(&domain->alert_mutex); + init_waitqueue_head(&domain->wq_head); + } + + init_waitqueue_head(&hqm_dev->measurement_wq); + + mutex_init(&hqm_dev->resource_mutex); + mutex_init(&hqm_dev->measurement_mutex); + mutex_init(&hqm_dev->alarm_isr_mutex); + + return 0; + +#ifndef CONFIG_AS_SSE2 +enqueue_four_fail: + destroy_workqueue(hqm_dev->wq); + return -EINVAL; +#endif +} + +static void hqm_pf_free_driver_state(struct hqm_dev *hqm_dev) +{ + destroy_workqueue(hqm_dev->wq); +} + +static int hqm_pf_cdev_add(struct hqm_dev *hqm_dev, + dev_t base, + const struct file_operations *fops) +{ + int ret; + + hqm_dev->dev_number = MKDEV(MAJOR(base), + MINOR(base) + + (hqm_dev->id * + HQM_NUM_DEV_FILES_PER_DEVICE)); + + cdev_init(&hqm_dev->cdev, fops); + + hqm_dev->cdev.dev = hqm_dev->dev_number; + hqm_dev->cdev.owner = THIS_MODULE; + + ret = cdev_add(&hqm_dev->cdev, + hqm_dev->cdev.dev, + HQM_NUM_DEV_FILES_PER_DEVICE); + + if (ret < 0) + HQM_ERR(hqm_dev->hqm_device, + "%s: cdev_add() returned %d\n", + hqm_driver_name, ret); + + return ret; +} + +static void hqm_pf_cdev_del(struct hqm_dev *hqm_dev) +{ + cdev_del(&hqm_dev->cdev); +} + +static int hqm_pf_device_create(struct hqm_dev *hqm_dev, + struct pci_dev *pdev, + struct class *hqm_class) +{ + dev_t dev; + + dev = MKDEV(MAJOR(hqm_dev->dev_number), + MINOR(hqm_dev->dev_number) + HQM_MAX_NUM_DOMAINS); + + /* Create a new device in order to create a /dev/ hqm node. This device + * is a child of the HQM PCI device. + */ + hqm_dev->hqm_device = device_create(hqm_class, + &pdev->dev, + dev, + hqm_dev, + "hqm%d/hqm", + hqm_dev->id); + + if (IS_ERR_VALUE(PTR_ERR(hqm_dev->hqm_device))) { + HQM_ERR(hqm_dev->hqm_device, + "%s: device_create() returned %ld\n", + hqm_driver_name, PTR_ERR(hqm_dev->hqm_device)); + + return PTR_ERR(hqm_dev->hqm_device); + } + + return 0; +} + +static void hqm_pf_device_destroy(struct hqm_dev *hqm_dev, + struct class *hqm_class) +{ + device_destroy(hqm_class, + MKDEV(MAJOR(hqm_dev->dev_number), + MINOR(hqm_dev->dev_number) + + HQM_MAX_NUM_DOMAINS)); +} + +static bool hqm_sparse_cq_mode_enabled; + +static void hqm_pf_init_hardware(struct hqm_dev *hqm_dev) +{ + int i; + + hqm_disable_dp_vasr_feature(&hqm_dev->hw); + + if (hqm_dev->revision < HQM_REV_B0) { + for (i = 0; i < pci_num_vf(hqm_dev->pdev); i++) + hqm_set_vf_reset_in_progress(&hqm_dev->hw, i); + } + + hqm_enable_excess_tokens_alarm(&hqm_dev->hw); + + hqm_sparse_cq_mode_enabled = hqm_dev->revision >= HQM_REV_B0; + + if (hqm_sparse_cq_mode_enabled) { + hqm_hw_enable_sparse_ldb_cq_mode(&hqm_dev->hw); + hqm_hw_enable_sparse_dir_cq_mode(&hqm_dev->hw); + } +} + +/*****************************/ +/****** Sysfs callbacks ******/ +/*****************************/ + +#define HQM_TOTAL_SYSFS_SHOW(name, macro) \ +static ssize_t total_##name##_show( \ + struct device *dev, \ + struct device_attribute *attr, \ + char *buf) \ +{ \ + int val = HQM_MAX_NUM_##macro; \ + \ + return scnprintf(buf, PAGE_SIZE, "%d\n", val); \ +} + +HQM_TOTAL_SYSFS_SHOW(num_sched_domains, DOMAINS) +HQM_TOTAL_SYSFS_SHOW(num_ldb_queues, LDB_QUEUES) +HQM_TOTAL_SYSFS_SHOW(num_ldb_ports, LDB_PORTS) +HQM_TOTAL_SYSFS_SHOW(num_ldb_credit_pools, LDB_CREDIT_POOLS) +HQM_TOTAL_SYSFS_SHOW(num_dir_credit_pools, DIR_CREDIT_POOLS) +HQM_TOTAL_SYSFS_SHOW(num_ldb_credits, LDB_CREDITS) +HQM_TOTAL_SYSFS_SHOW(num_dir_credits, DIR_CREDITS) +HQM_TOTAL_SYSFS_SHOW(num_atomic_inflights, AQOS_ENTRIES) +HQM_TOTAL_SYSFS_SHOW(num_hist_list_entries, HIST_LIST_ENTRIES) + +static ssize_t total_num_dir_ports_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + int val = HQM_MAX_NUM_DIR_PORTS; + + return scnprintf(buf, PAGE_SIZE, "%d\n", val); +} + +#define HQM_AVAIL_SYSFS_SHOW(name) \ +static ssize_t avail_##name##_show( \ + struct device *dev, \ + struct device_attribute *attr, \ + char *buf) \ +{ \ + struct hqm_dev *hqm_dev = dev_get_drvdata(dev); \ + struct hqm_get_num_resources_args arg; \ + struct hqm_hw *hw = &hqm_dev->hw; \ + int val; \ + \ + mutex_lock(&hqm_dev->resource_mutex); \ + \ + val = hqm_hw_get_num_resources(hw, &arg, false, 0); \ + \ + mutex_unlock(&hqm_dev->resource_mutex); \ + \ + if (val) \ + return -1; \ + \ + val = arg.name; \ + \ + return scnprintf(buf, PAGE_SIZE, "%d\n", val); \ +} + +HQM_AVAIL_SYSFS_SHOW(num_sched_domains) +HQM_AVAIL_SYSFS_SHOW(num_ldb_queues) +HQM_AVAIL_SYSFS_SHOW(num_ldb_ports) +HQM_AVAIL_SYSFS_SHOW(num_dir_ports) +HQM_AVAIL_SYSFS_SHOW(num_ldb_credit_pools) +HQM_AVAIL_SYSFS_SHOW(num_dir_credit_pools) +HQM_AVAIL_SYSFS_SHOW(num_ldb_credits) +HQM_AVAIL_SYSFS_SHOW(num_dir_credits) +HQM_AVAIL_SYSFS_SHOW(num_atomic_inflights) +HQM_AVAIL_SYSFS_SHOW(num_hist_list_entries) + +static ssize_t max_ctg_atm_inflights_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct hqm_dev *hqm_dev = dev_get_drvdata(dev); + struct hqm_get_num_resources_args arg; + struct hqm_hw *hw = &hqm_dev->hw; + int val; + + mutex_lock(&hqm_dev->resource_mutex); + + val = hqm_hw_get_num_resources(hw, &arg, false, 0); + + mutex_unlock(&hqm_dev->resource_mutex); + + if (val) + return -1; + + val = arg.max_contiguous_atomic_inflights; + + return scnprintf(buf, PAGE_SIZE, "%d\n", val); +} + +static ssize_t max_ctg_hl_entries_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct hqm_dev *hqm_dev = dev_get_drvdata(dev); + struct hqm_get_num_resources_args arg; + struct hqm_hw *hw = &hqm_dev->hw; + int val; + + mutex_lock(&hqm_dev->resource_mutex); + + val = hqm_hw_get_num_resources(hw, &arg, false, 0); + + mutex_unlock(&hqm_dev->resource_mutex); + + if (val) + return -1; + + val = arg.max_contiguous_hist_list_entries; + + return scnprintf(buf, PAGE_SIZE, "%d\n", val); +} + +/* Device attribute name doesn't match the show function name, so we define our + * own DEVICE_ATTR macro. + */ +#define HQM_DEVICE_ATTR_RO(_prefix, _name) \ +struct device_attribute dev_attr_##_prefix##_##_name = {\ + .attr = { .name = __stringify(_name), .mode = 0444 },\ + .show = _prefix##_##_name##_show,\ +} + +static HQM_DEVICE_ATTR_RO(total, num_sched_domains); +static HQM_DEVICE_ATTR_RO(total, num_ldb_queues); +static HQM_DEVICE_ATTR_RO(total, num_ldb_ports); +static HQM_DEVICE_ATTR_RO(total, num_dir_ports); +static HQM_DEVICE_ATTR_RO(total, num_ldb_credit_pools); +static HQM_DEVICE_ATTR_RO(total, num_dir_credit_pools); +static HQM_DEVICE_ATTR_RO(total, num_ldb_credits); +static HQM_DEVICE_ATTR_RO(total, num_dir_credits); +static HQM_DEVICE_ATTR_RO(total, num_atomic_inflights); +static HQM_DEVICE_ATTR_RO(total, num_hist_list_entries); + +static struct attribute *hqm_total_attrs[] = { + &dev_attr_total_num_sched_domains.attr, + &dev_attr_total_num_ldb_queues.attr, + &dev_attr_total_num_ldb_ports.attr, + &dev_attr_total_num_dir_ports.attr, + &dev_attr_total_num_ldb_credit_pools.attr, + &dev_attr_total_num_dir_credit_pools.attr, + &dev_attr_total_num_ldb_credits.attr, + &dev_attr_total_num_dir_credits.attr, + &dev_attr_total_num_atomic_inflights.attr, + &dev_attr_total_num_hist_list_entries.attr, + NULL +}; + +static const struct attribute_group hqm_total_attr_group = { + .attrs = hqm_total_attrs, + .name = "total_resources", +}; + +static HQM_DEVICE_ATTR_RO(avail, num_sched_domains); +static HQM_DEVICE_ATTR_RO(avail, num_ldb_queues); +static HQM_DEVICE_ATTR_RO(avail, num_ldb_ports); +static HQM_DEVICE_ATTR_RO(avail, num_dir_ports); +static HQM_DEVICE_ATTR_RO(avail, num_ldb_credit_pools); +static HQM_DEVICE_ATTR_RO(avail, num_dir_credit_pools); +static HQM_DEVICE_ATTR_RO(avail, num_ldb_credits); +static HQM_DEVICE_ATTR_RO(avail, num_dir_credits); +static HQM_DEVICE_ATTR_RO(avail, num_atomic_inflights); +static HQM_DEVICE_ATTR_RO(avail, num_hist_list_entries); +static DEVICE_ATTR_RO(max_ctg_atm_inflights); +static DEVICE_ATTR_RO(max_ctg_hl_entries); + +static struct attribute *hqm_avail_attrs[] = { + &dev_attr_avail_num_sched_domains.attr, + &dev_attr_avail_num_ldb_queues.attr, + &dev_attr_avail_num_ldb_ports.attr, + &dev_attr_avail_num_dir_ports.attr, + &dev_attr_avail_num_ldb_credit_pools.attr, + &dev_attr_avail_num_dir_credit_pools.attr, + &dev_attr_avail_num_ldb_credits.attr, + &dev_attr_avail_num_dir_credits.attr, + &dev_attr_avail_num_atomic_inflights.attr, + &dev_attr_avail_num_hist_list_entries.attr, + &dev_attr_max_ctg_atm_inflights.attr, + &dev_attr_max_ctg_hl_entries.attr, + NULL +}; + +static const struct attribute_group hqm_avail_attr_group = { + .attrs = hqm_avail_attrs, + .name = "avail_resources", +}; + +#define HQM_GROUP_SNS_PER_QUEUE_SHOW(id) \ +static ssize_t group##id##_sns_per_queue_show( \ + struct device *dev, \ + struct device_attribute *attr, \ + char *buf) \ +{ \ + struct hqm_dev *hqm_dev = dev_get_drvdata(dev); \ + struct hqm_hw *hw = &hqm_dev->hw; \ + int val; \ + \ + mutex_lock(&hqm_dev->resource_mutex); \ + \ + val = hqm_get_group_sequence_numbers(hw, id); \ + \ + mutex_unlock(&hqm_dev->resource_mutex); \ + \ + return scnprintf(buf, PAGE_SIZE, "%d\n", val);\ +} + +HQM_GROUP_SNS_PER_QUEUE_SHOW(0) +HQM_GROUP_SNS_PER_QUEUE_SHOW(1) +HQM_GROUP_SNS_PER_QUEUE_SHOW(2) +HQM_GROUP_SNS_PER_QUEUE_SHOW(3) + +#define HQM_GROUP_SNS_PER_QUEUE_STORE(id) \ +static ssize_t group##id##_sns_per_queue_store( \ + struct device *dev, \ + struct device_attribute *attr, \ + const char *buf, \ + size_t count) \ +{ \ + struct hqm_dev *hqm_dev = dev_get_drvdata(dev); \ + struct hqm_hw *hw = &hqm_dev->hw; \ + unsigned long val; \ + int err; \ + \ + err = kstrtoul(buf, 0, &val); \ + if (err) \ + return -1; \ + \ + mutex_lock(&hqm_dev->resource_mutex); \ + \ + hqm_set_group_sequence_numbers(hw, id, val); \ + \ + mutex_unlock(&hqm_dev->resource_mutex); \ + \ + return count; \ +} + +HQM_GROUP_SNS_PER_QUEUE_STORE(0) +HQM_GROUP_SNS_PER_QUEUE_STORE(1) +HQM_GROUP_SNS_PER_QUEUE_STORE(2) +HQM_GROUP_SNS_PER_QUEUE_STORE(3) + +/* RW sysfs files in the sequence_numbers/ subdirectory */ +static DEVICE_ATTR_RW(group0_sns_per_queue); +static DEVICE_ATTR_RW(group1_sns_per_queue); +static DEVICE_ATTR_RW(group2_sns_per_queue); +static DEVICE_ATTR_RW(group3_sns_per_queue); + +static struct attribute *hqm_sequence_number_attrs[] = { + &dev_attr_group0_sns_per_queue.attr, + &dev_attr_group1_sns_per_queue.attr, + &dev_attr_group2_sns_per_queue.attr, + &dev_attr_group3_sns_per_queue.attr, + NULL +}; + +static const struct attribute_group hqm_sequence_number_attr_group = { + .attrs = hqm_sequence_number_attrs, + .name = "sequence_numbers" +}; + +static ssize_t dev_id_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct hqm_dev *hqm_dev = dev_get_drvdata(dev); + + return scnprintf(buf, PAGE_SIZE, "%d\n", hqm_dev->id); +} + +static DEVICE_ATTR_RO(dev_id); + +static int hqm_pf_sysfs_create(struct hqm_dev *hqm_dev) +{ + struct kobject *kobj; + int ret; + + kobj = &hqm_dev->pdev->dev.kobj; + + ret = sysfs_create_file(kobj, &dev_attr_dev_id.attr); + if (ret) + goto hqm_dev_id_attr_group_fail; + + ret = sysfs_create_group(kobj, &hqm_total_attr_group); + if (ret) + goto hqm_total_attr_group_fail; + + ret = sysfs_create_group(kobj, &hqm_avail_attr_group); + if (ret) + goto hqm_avail_attr_group_fail; + + ret = sysfs_create_group(kobj, &hqm_sequence_number_attr_group); + if (ret) + goto hqm_sn_attr_group_fail; + + return 0; + +hqm_sn_attr_group_fail: + sysfs_remove_group(kobj, &hqm_avail_attr_group); +hqm_avail_attr_group_fail: + sysfs_remove_group(kobj, &hqm_total_attr_group); +hqm_total_attr_group_fail: + sysfs_remove_file(kobj, &dev_attr_dev_id.attr); +hqm_dev_id_attr_group_fail: + return ret; +} + +static void hqm_pf_sysfs_destroy(struct hqm_dev *hqm_dev) +{ + struct kobject *kobj; + + kobj = &hqm_dev->pdev->dev.kobj; + + sysfs_remove_group(kobj, &hqm_sequence_number_attr_group); + sysfs_remove_group(kobj, &hqm_avail_attr_group); + sysfs_remove_group(kobj, &hqm_total_attr_group); + sysfs_remove_file(kobj, &dev_attr_dev_id.attr); +} + +static void hqm_pf_sysfs_reapply_configuration(struct hqm_dev *dev) +{ + int i; + + for (i = 0; i < HQM_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) { + int num_sns = hqm_get_group_sequence_numbers(&dev->hw, i); + + hqm_set_group_sequence_numbers(&dev->hw, i, num_sns); + } +} + +/*****************************/ +/****** IOCTL callbacks ******/ +/*****************************/ + +static int hqm_pf_create_sched_domain(struct hqm_hw *hw, + struct hqm_create_sched_domain_args *args, + struct hqm_cmd_response *resp) +{ + return hqm_hw_create_sched_domain(hw, args, resp, false, 0); +} + +static int hqm_pf_create_ldb_pool(struct hqm_hw *hw, + u32 id, + struct hqm_create_ldb_pool_args *args, + struct hqm_cmd_response *resp) +{ + return hqm_hw_create_ldb_pool(hw, id, args, resp, false, 0); +} + +static int hqm_pf_create_dir_pool(struct hqm_hw *hw, + u32 id, + struct hqm_create_dir_pool_args *args, + struct hqm_cmd_response *resp) +{ + return hqm_hw_create_dir_pool(hw, id, args, resp, false, 0); +} + +static int hqm_pf_create_ldb_queue(struct hqm_hw *hw, + u32 id, + struct hqm_create_ldb_queue_args *args, + struct hqm_cmd_response *resp) +{ + return hqm_hw_create_ldb_queue(hw, id, args, resp, false, 0); +} + +static int hqm_pf_create_dir_queue(struct hqm_hw *hw, + u32 id, + struct hqm_create_dir_queue_args *args, + struct hqm_cmd_response *resp) +{ + return hqm_hw_create_dir_queue(hw, id, args, resp, false, 0); +} + +static int hqm_pf_create_ldb_port(struct hqm_hw *hw, + u32 id, + struct hqm_create_ldb_port_args *args, + uintptr_t pop_count_dma_base, + uintptr_t cq_dma_base, + struct hqm_cmd_response *resp) +{ + return hqm_hw_create_ldb_port(hw, id, args, + pop_count_dma_base, + cq_dma_base, + resp, false, 0); +} + +static int hqm_pf_create_dir_port(struct hqm_hw *hw, + u32 id, + struct hqm_create_dir_port_args *args, + uintptr_t pop_count_dma_base, + uintptr_t cq_dma_base, + struct hqm_cmd_response *resp) +{ + return hqm_hw_create_dir_port(hw, id, args, + pop_count_dma_base, + cq_dma_base, + resp, false, 0); +} + +static int hqm_pf_start_domain(struct hqm_hw *hw, + u32 id, + struct hqm_start_domain_args *args, + struct hqm_cmd_response *resp) +{ + return hqm_hw_start_domain(hw, id, args, resp, false, 0); +} + +static int hqm_pf_map_qid(struct hqm_hw *hw, + u32 id, + struct hqm_map_qid_args *args, + struct hqm_cmd_response *resp) +{ + return hqm_hw_map_qid(hw, id, args, resp, false, 0); +} + +static int hqm_pf_unmap_qid(struct hqm_hw *hw, + u32 id, + struct hqm_unmap_qid_args *args, + struct hqm_cmd_response *resp) +{ + return hqm_hw_unmap_qid(hw, id, args, resp, false, 0); +} + +static int hqm_pf_enable_ldb_port(struct hqm_hw *hw, + u32 id, + struct hqm_enable_ldb_port_args *args, + struct hqm_cmd_response *resp) +{ + return hqm_hw_enable_ldb_port(hw, id, args, resp, false, 0); +} + +static int hqm_pf_disable_ldb_port(struct hqm_hw *hw, + u32 id, + struct hqm_disable_ldb_port_args *args, + struct hqm_cmd_response *resp) +{ + return hqm_hw_disable_ldb_port(hw, id, args, resp, false, 0); +} + +static int hqm_pf_enable_dir_port(struct hqm_hw *hw, + u32 id, + struct hqm_enable_dir_port_args *args, + struct hqm_cmd_response *resp) +{ + return hqm_hw_enable_dir_port(hw, id, args, resp, false, 0); +} + +static int hqm_pf_disable_dir_port(struct hqm_hw *hw, + u32 id, + struct hqm_disable_dir_port_args *args, + struct hqm_cmd_response *resp) +{ + return hqm_hw_disable_dir_port(hw, id, args, resp, false, 0); +} + +static int hqm_pf_get_num_resources(struct hqm_hw *hw, + struct hqm_get_num_resources_args *args) +{ + return hqm_hw_get_num_resources(hw, args, false, 0); +} + +static void hqm_reset_packed_interrupts(struct hqm_dev *dev, int id) +{ + struct hqm_hw *hw = &dev->hw; + u32 vec; + int i; + + for (i = 0; i < HQM_MAX_NUM_LDB_PORTS; i++) { + if (!hqm_ldb_port_owned_by_domain(hw, id, i, false, 0)) + continue; + + if (!dev->intr.ldb_cq_intr[i].configured) + continue; + + vec = dev->intr.ldb_cq_intr[i].vector; + + dev->intr.packed_vector_bitmap &= ~((u64)1 << vec); + } + + for (i = 0; i < HQM_MAX_NUM_DIR_PORTS; i++) { + if (!hqm_dir_port_owned_by_domain(hw, id, i, false, 0)) + continue; + + if (!dev->intr.dir_cq_intr[i].configured) + continue; + + vec = dev->intr.dir_cq_intr[i].vector; + + dev->intr.packed_vector_bitmap &= ~((u64)1 << vec); + } +} + +static int hqm_pf_reset_domain(struct hqm_dev *dev, u32 id) +{ + /* Unset the domain's packed vector bitmap entries */ + if (dev->intr.mode == HQM_MSIX_MODE_PACKED) + hqm_reset_packed_interrupts(dev, id); + + return hqm_reset_domain(&dev->hw, id, false, 0); +} + +static int hqm_pf_measure_perf(struct hqm_dev *dev, + struct hqm_sample_perf_counters_args *args, + struct hqm_cmd_response *response) +{ + union hqm_perf_metric_group_data data; + struct timespec start, end; + long timeout; + u32 elapsed; + + memset(&data, 0, sizeof(data)); + + /* Only one active measurement is allowed at a time. */ + if (!mutex_trylock(&dev->measurement_mutex)) + return -EBUSY; + + getnstimeofday(&start); + + hqm_init_perf_metric_measurement(&dev->hw, + args->perf_metric_group_id, + args->measurement_duration_us); + + timeout = usecs_to_jiffies(args->measurement_duration_us); + + wait_event_interruptible_timeout(dev->measurement_wq, + READ_ONCE(dev->reset_active), + timeout); + + getnstimeofday(&end); + + hqm_collect_perf_metric_data(&dev->hw, + args->perf_metric_group_id, + &data); + + mutex_unlock(&dev->measurement_mutex); + + /* Calculate the elapsed time in microseconds */ + elapsed = (end.tv_sec - start.tv_sec) * 1000000 + + (end.tv_nsec - start.tv_nsec) / 1000; + + if (copy_to_user((void __user *)args->elapsed_time_us, + &elapsed, + sizeof(elapsed))) { + pr_err("Invalid elapsed time pointer\n"); + return -EFAULT; + } + + if (copy_to_user((void __user *)args->perf_metric_group_data, + &data, + sizeof(data))) { + pr_err("Invalid performance metric group data pointer\n"); + return -EFAULT; + } + + return 0; +} + +static int hqm_pf_get_ldb_queue_depth(struct hqm_hw *hw, + u32 id, + struct hqm_get_ldb_queue_depth_args *args, + struct hqm_cmd_response *resp) +{ + return hqm_hw_get_ldb_queue_depth(hw, id, args, resp, false, 0); +} + +static int hqm_pf_get_dir_queue_depth(struct hqm_hw *hw, + u32 id, + struct hqm_get_dir_queue_depth_args *args, + struct hqm_cmd_response *resp) +{ + return hqm_hw_get_dir_queue_depth(hw, id, args, resp, false, 0); +} + +static int hqm_pf_pending_port_unmaps(struct hqm_hw *hw, + u32 id, + struct hqm_pending_port_unmaps_args *args, + struct hqm_cmd_response *resp) +{ + return hqm_hw_pending_port_unmaps(hw, id, args, resp, false, 0); +} + +static int hqm_pf_measure_sched_count(struct hqm_dev *dev, + struct hqm_measure_sched_count_args *args, + struct hqm_cmd_response *response) +{ + struct hqm_sched_counts *cnt = NULL; + struct timespec start, end; + int ret = 0, i; + long timeout; + u32 elapsed; + + cnt = devm_kmalloc(&dev->pdev->dev, 2 * sizeof(*cnt), GFP_KERNEL); + if (!cnt) { + ret = -ENOMEM; + goto done; + } + + getnstimeofday(&start); + + hqm_read_sched_counts(&dev->hw, &cnt[0], false, 0); + + timeout = usecs_to_jiffies(args->measurement_duration_us); + + wait_event_interruptible_timeout(dev->measurement_wq, + READ_ONCE(dev->reset_active), + timeout); + + getnstimeofday(&end); + + hqm_read_sched_counts(&dev->hw, &cnt[1], false, 0); + + /* Calculate the elapsed time in microseconds */ + elapsed = (end.tv_sec - start.tv_sec) * 1000000 + + (end.tv_nsec - start.tv_nsec) / 1000; + + if (copy_to_user((void __user *)args->elapsed_time_us, + &elapsed, + sizeof(elapsed))) { + pr_err("Invalid elapsed time pointer\n"); + ret = -EFAULT; + goto done; + } + + /* Calculate the scheduling count difference */ + cnt[1].ldb_sched_count -= cnt[0].ldb_sched_count; + cnt[1].dir_sched_count -= cnt[0].dir_sched_count; + + for (i = 0; i < HQM_MAX_NUM_LDB_PORTS; i++) + cnt[1].ldb_cq_sched_count[i] -= cnt[0].ldb_cq_sched_count[i]; + + for (i = 0; i < HQM_MAX_NUM_DIR_PORTS; i++) + cnt[1].dir_cq_sched_count[i] -= cnt[0].dir_cq_sched_count[i]; + + if (copy_to_user((void __user *)args->sched_count_data, + &cnt[1], + sizeof(cnt[1]))) { + pr_err("Invalid performance metric group data pointer\n"); + ret = -EFAULT; + goto done; + } + +done: + if (cnt) + devm_kfree(&dev->pdev->dev, cnt); + + return ret; +} + +static int hqm_pf_query_cq_poll_mode(struct hqm_dev *hqm_dev, + struct hqm_cmd_response *user_resp) +{ + user_resp->status = 0; + + if (hqm_sparse_cq_mode_enabled) { + hqm_hw_enable_sparse_ldb_cq_mode(&hqm_dev->hw); + hqm_hw_enable_sparse_dir_cq_mode(&hqm_dev->hw); + } + + if (hqm_sparse_cq_mode_enabled) + user_resp->id = HQM_CQ_POLL_MODE_SPARSE; + else + user_resp->id = HQM_CQ_POLL_MODE_STD; + + return 0; +} + +/**************************************/ +/****** Resource query callbacks ******/ +/**************************************/ + +static int hqm_pf_ldb_port_owned_by_domain(struct hqm_hw *hw, + u32 domain_id, + u32 port_id) +{ + return hqm_ldb_port_owned_by_domain(hw, domain_id, port_id, false, 0); +} + +static int hqm_pf_dir_port_owned_by_domain(struct hqm_hw *hw, + u32 domain_id, + u32 port_id) +{ + return hqm_dir_port_owned_by_domain(hw, domain_id, port_id, false, 0); +} + +static int hqm_pf_get_sn_allocation(struct hqm_hw *hw, u32 group_id) +{ + return hqm_get_group_sequence_numbers(hw, group_id); +} + +static int hqm_pf_get_sn_occupancy(struct hqm_hw *hw, u32 group_id) +{ + return hqm_get_group_sequence_number_occupancy(hw, group_id); +} + +/*******************************/ +/****** HQM PF Device Ops ******/ +/*******************************/ + +struct hqm_device_ops hqm_pf_ops = { + .map_pci_bar_space = hqm_pf_map_pci_bar_space, + .unmap_pci_bar_space = hqm_pf_unmap_pci_bar_space, + .mmap = hqm_pf_mmap, + .inc_pm_refcnt = hqm_pf_pm_inc_refcnt, + .dec_pm_refcnt = hqm_pf_pm_dec_refcnt, + .pm_refcnt_status_suspended = hqm_pf_pm_status_suspended, + .init_driver_state = hqm_pf_init_driver_state, + .free_driver_state = hqm_pf_free_driver_state, + .device_create = hqm_pf_device_create, + .device_destroy = hqm_pf_device_destroy, + .cdev_add = hqm_pf_cdev_add, + .cdev_del = hqm_pf_cdev_del, + .sysfs_create = hqm_pf_sysfs_create, + .sysfs_destroy = hqm_pf_sysfs_destroy, + .sysfs_reapply = hqm_pf_sysfs_reapply_configuration, + .init_interrupts = hqm_pf_init_interrupts, + .enable_ldb_cq_interrupts = hqm_pf_enable_ldb_cq_interrupts, + .enable_dir_cq_interrupts = hqm_pf_enable_dir_cq_interrupts, + .arm_cq_interrupt = hqm_pf_arm_cq_interrupt, + .reinit_interrupts = hqm_pf_reinit_interrupts, + .free_interrupts = hqm_pf_free_interrupts, + .init_hardware = hqm_pf_init_hardware, + .create_sched_domain = hqm_pf_create_sched_domain, + .create_ldb_pool = hqm_pf_create_ldb_pool, + .create_dir_pool = hqm_pf_create_dir_pool, + .create_ldb_queue = hqm_pf_create_ldb_queue, + .create_dir_queue = hqm_pf_create_dir_queue, + .create_ldb_port = hqm_pf_create_ldb_port, + .create_dir_port = hqm_pf_create_dir_port, + .start_domain = hqm_pf_start_domain, + .map_qid = hqm_pf_map_qid, + .unmap_qid = hqm_pf_unmap_qid, + .enable_ldb_port = hqm_pf_enable_ldb_port, + .enable_dir_port = hqm_pf_enable_dir_port, + .disable_ldb_port = hqm_pf_disable_ldb_port, + .disable_dir_port = hqm_pf_disable_dir_port, + .get_num_resources = hqm_pf_get_num_resources, + .reset_domain = hqm_pf_reset_domain, + .measure_perf = hqm_pf_measure_perf, + .measure_sched_count = hqm_pf_measure_sched_count, + .ldb_port_owned_by_domain = hqm_pf_ldb_port_owned_by_domain, + .dir_port_owned_by_domain = hqm_pf_dir_port_owned_by_domain, + .get_sn_allocation = hqm_pf_get_sn_allocation, + .get_sn_occupancy = hqm_pf_get_sn_occupancy, + .get_ldb_queue_depth = hqm_pf_get_ldb_queue_depth, + .get_dir_queue_depth = hqm_pf_get_dir_queue_depth, + .pending_port_unmaps = hqm_pf_pending_port_unmaps, + .query_cq_poll_mode = hqm_pf_query_cq_poll_mode, +}; diff --git a/drivers/misc/hqm/hqm_regs.h b/drivers/misc/hqm/hqm_regs.h new file mode 100644 index 00000000000000..8897785b8926d7 --- /dev/null +++ b/drivers/misc/hqm/hqm_regs.h @@ -0,0 +1,2621 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * Copyright(c) 2016-2019 Intel Corporation + */ + +#ifndef __HQM_REGS_H +#define __HQM_REGS_H + +#include "hqm_osdep_types.h" + +#define HQM_FUNC_PF_VF2PF_MAILBOX_BYTES 256 +#define HQM_FUNC_PF_VF2PF_MAILBOX(vf_id, x) \ + (0x1000 + 0x4 * (x) + (vf_id) * 0x10000) +#define HQM_FUNC_PF_VF2PF_MAILBOX_RST 0x0 +union hqm_func_pf_vf2pf_mailbox { + struct { + u32 msg : 32; + } field; + u32 val; +}; + +#define HQM_FUNC_PF_VF2PF_MAILBOX_ISR(vf_id) \ + (0x1f00 + (vf_id) * 0x10000) +#define HQM_FUNC_PF_VF2PF_MAILBOX_ISR_RST 0x0 +union hqm_func_pf_vf2pf_mailbox_isr { + struct { + u32 vf_isr : 16; + u32 rsvd0 : 16; + } field; + u32 val; +}; + +#define HQM_FUNC_PF_VF2PF_FLR_ISR(vf_id) \ + (0x1f04 + (vf_id) * 0x10000) +#define HQM_FUNC_PF_VF2PF_FLR_ISR_RST 0x0 +union hqm_func_pf_vf2pf_flr_isr { + struct { + u32 vf_isr : 16; + u32 rsvd0 : 16; + } field; + u32 val; +}; + +#define HQM_FUNC_PF_VF2PF_ISR_PEND(vf_id) \ + (0x1f10 + (vf_id) * 0x10000) +#define HQM_FUNC_PF_VF2PF_ISR_PEND_RST 0x0 +union hqm_func_pf_vf2pf_isr_pend { + struct { + u32 isr_pend : 1; + u32 rsvd0 : 31; + } field; + u32 val; +}; + +#define HQM_FUNC_PF_PF2VF_MAILBOX_BYTES 64 +#define HQM_FUNC_PF_PF2VF_MAILBOX(vf_id, x) \ + (0x2000 + 0x4 * (x) + (vf_id) * 0x10000) +#define HQM_FUNC_PF_PF2VF_MAILBOX_RST 0x0 +union hqm_func_pf_pf2vf_mailbox { + struct { + u32 msg : 32; + } field; + u32 val; +}; + +#define HQM_FUNC_PF_PF2VF_MAILBOX_ISR(vf_id) \ + (0x2f00 + (vf_id) * 0x10000) +#define HQM_FUNC_PF_PF2VF_MAILBOX_ISR_RST 0x0 +union hqm_func_pf_pf2vf_mailbox_isr { + struct { + u32 isr : 16; + u32 rsvd0 : 16; + } field; + u32 val; +}; + +#define HQM_FUNC_PF_VF_RESET_IN_PROGRESS(vf_id) \ + (0x3000 + (vf_id) * 0x10000) +#define HQM_FUNC_PF_VF_RESET_IN_PROGRESS_RST 0xffff +union hqm_func_pf_vf_reset_in_progress { + struct { + u32 reset_in_progress : 16; + u32 rsvd0 : 16; + } field; + u32 val; +}; + +#define HQM_MSIX_MEM_VECTOR_CTRL(x) \ + (0x100000c + (x) * 0x10) +#define HQM_MSIX_MEM_VECTOR_CTRL_RST 0x1 +union hqm_msix_mem_vector_ctrl { + struct { + u32 vec_mask : 1; + u32 rsvd0 : 31; + } field; + u32 val; +}; + +#define HQM_SYS_TOTAL_VAS 0x124 +#define HQM_SYS_TOTAL_VAS_RST 0x20 +union hqm_sys_total_vas { + struct { + u32 total_vas : 32; + } field; + u32 val; +}; + +#define HQM_SYS_ALARM_PF_SYND2 0x508 +#define HQM_SYS_ALARM_PF_SYND2_RST 0x0 +union hqm_sys_alarm_pf_synd2 { + struct { + u32 lock_id : 16; + u32 meas : 1; + u32 debug : 7; + u32 cq_pop : 1; + u32 qe_uhl : 1; + u32 qe_orsp : 1; + u32 qe_valid : 1; + u32 cq_int_rearm : 1; + u32 dsi_error : 1; + u32 rsvd0 : 2; + } field; + u32 val; +}; + +#define HQM_SYS_ALARM_PF_SYND1 0x504 +#define HQM_SYS_ALARM_PF_SYND1_RST 0x0 +union hqm_sys_alarm_pf_synd1 { + struct { + u32 dsi : 16; + u32 qid : 8; + u32 qtype : 2; + u32 qpri : 3; + u32 msg_type : 3; + } field; + u32 val; +}; + +#define HQM_SYS_ALARM_PF_SYND0 0x500 +#define HQM_SYS_ALARM_PF_SYND0_RST 0x0 +union hqm_sys_alarm_pf_synd0 { + struct { + u32 syndrome : 8; + u32 rtype : 2; + u32 rsvd0 : 2; + u32 from_dmv : 1; + u32 is_ldb : 1; + u32 cls : 2; + u32 aid : 6; + u32 unit : 4; + u32 source : 4; + u32 more : 1; + u32 valid : 1; + } field; + u32 val; +}; + +#define HQM_SYS_VF_LDB_VPP_V(x) \ + (0xf00 + (x) * 0x1000) +#define HQM_SYS_VF_LDB_VPP_V_RST 0x0 +union hqm_sys_vf_ldb_vpp_v { + struct { + u32 vpp_v : 1; + u32 rsvd0 : 31; + } field; + u32 val; +}; + +#define HQM_SYS_VF_LDB_VPP2PP(x) \ + (0xf08 + (x) * 0x1000) +#define HQM_SYS_VF_LDB_VPP2PP_RST 0x0 +union hqm_sys_vf_ldb_vpp2pp { + struct { + u32 pp : 6; + u32 rsvd0 : 26; + } field; + u32 val; +}; + +#define HQM_SYS_VF_DIR_VPP_V(x) \ + (0xf10 + (x) * 0x1000) +#define HQM_SYS_VF_DIR_VPP_V_RST 0x0 +union hqm_sys_vf_dir_vpp_v { + struct { + u32 vpp_v : 1; + u32 rsvd0 : 31; + } field; + u32 val; +}; + +#define HQM_SYS_VF_DIR_VPP2PP(x) \ + (0xf18 + (x) * 0x1000) +#define HQM_SYS_VF_DIR_VPP2PP_RST 0x0 +union hqm_sys_vf_dir_vpp2pp { + struct { + u32 pp : 7; + u32 rsvd0 : 25; + } field; + u32 val; +}; + +#define HQM_SYS_VF_LDB_VQID_V(x) \ + (0xf20 + (x) * 0x1000) +#define HQM_SYS_VF_LDB_VQID_V_RST 0x0 +union hqm_sys_vf_ldb_vqid_v { + struct { + u32 vqid_v : 1; + u32 rsvd0 : 31; + } field; + u32 val; +}; + +#define HQM_SYS_VF_LDB_VQID2QID(x) \ + (0xf28 + (x) * 0x1000) +#define HQM_SYS_VF_LDB_VQID2QID_RST 0x0 +union hqm_sys_vf_ldb_vqid2qid { + struct { + u32 qid : 7; + u32 rsvd0 : 25; + } field; + u32 val; +}; + +#define HQM_SYS_LDB_QID2VQID(x) \ + (0xf2c + (x) * 0x1000) +#define HQM_SYS_LDB_QID2VQID_RST 0x0 +union hqm_sys_ldb_qid2vqid { + struct { + u32 vqid : 7; + u32 rsvd0 : 25; + } field; + u32 val; +}; + +#define HQM_SYS_VF_DIR_VQID_V(x) \ + (0xf30 + (x) * 0x1000) +#define HQM_SYS_VF_DIR_VQID_V_RST 0x0 +union hqm_sys_vf_dir_vqid_v { + struct { + u32 vqid_v : 1; + u32 rsvd0 : 31; + } field; + u32 val; +}; + +#define HQM_SYS_VF_DIR_VQID2QID(x) \ + (0xf38 + (x) * 0x1000) +#define HQM_SYS_VF_DIR_VQID2QID_RST 0x0 +union hqm_sys_vf_dir_vqid2qid { + struct { + u32 qid : 7; + u32 rsvd0 : 25; + } field; + u32 val; +}; + +#define HQM_SYS_LDB_VASQID_V(x) \ + (0xf60 + (x) * 0x1000) +#define HQM_SYS_LDB_VASQID_V_RST 0x0 +union hqm_sys_ldb_vasqid_v { + struct { + u32 vasqid_v : 1; + u32 rsvd0 : 31; + } field; + u32 val; +}; + +#define HQM_SYS_DIR_VASQID_V(x) \ + (0xf68 + (x) * 0x1000) +#define HQM_SYS_DIR_VASQID_V_RST 0x0 +union hqm_sys_dir_vasqid_v { + struct { + u32 vasqid_v : 1; + u32 rsvd0 : 31; + } field; + u32 val; +}; + +#define HQM_SYS_WBUF_DIR_FLAGS(x) \ + (0xf70 + (x) * 0x1000) +#define HQM_SYS_WBUF_DIR_FLAGS_RST 0x0 +union hqm_sys_wbuf_dir_flags { + struct { + u32 wb_v : 4; + u32 cl : 1; + u32 busy : 1; + u32 opt : 1; + u32 rsvd0 : 25; + } field; + u32 val; +}; + +#define HQM_SYS_WBUF_LDB_FLAGS(x) \ + (0xf78 + (x) * 0x1000) +#define HQM_SYS_WBUF_LDB_FLAGS_RST 0x0 +union hqm_sys_wbuf_ldb_flags { + struct { + u32 wb_v : 4; + u32 cl : 1; + u32 busy : 1; + u32 rsvd0 : 26; + } field; + u32 val; +}; + +#define HQM_SYS_SMON_COMP_MASK1 0x3024 +#define HQM_SYS_SMON_COMP_MASK1_RST 0xffffffff +union hqm_sys_smon_comp_mask1 { + struct { + u32 comp_mask1 : 32; + } field; + u32 val; +}; + +#define HQM_SYS_SMON_COMP_MASK0 0x3020 +#define HQM_SYS_SMON_COMP_MASK0_RST 0xffffffff +union hqm_sys_smon_comp_mask0 { + struct { + u32 comp_mask0 : 32; + } field; + u32 val; +}; + +#define HQM_SYS_SMON_MAX_TMR 0x301c +#define HQM_SYS_SMON_MAX_TMR_RST 0x0 +union hqm_sys_smon_max_tmr { + struct { + u32 maxvalue : 32; + } field; + u32 val; +}; + +#define HQM_SYS_SMON_TMR 0x3018 +#define HQM_SYS_SMON_TMR_RST 0x0 +union hqm_sys_smon_tmr { + struct { + u32 timer_val : 32; + } field; + u32 val; +}; + +#define HQM_SYS_SMON_ACTIVITYCNTR1 0x3014 +#define HQM_SYS_SMON_ACTIVITYCNTR1_RST 0x0 +union hqm_sys_smon_activitycntr1 { + struct { + u32 counter1 : 32; + } field; + u32 val; +}; + +#define HQM_SYS_SMON_ACTIVITYCNTR0 0x3010 +#define HQM_SYS_SMON_ACTIVITYCNTR0_RST 0x0 +union hqm_sys_smon_activitycntr0 { + struct { + u32 counter0 : 32; + } field; + u32 val; +}; + +#define HQM_SYS_SMON_COMPARE1 0x300c +#define HQM_SYS_SMON_COMPARE1_RST 0x0 +union hqm_sys_smon_compare1 { + struct { + u32 compare1 : 32; + } field; + u32 val; +}; + +#define HQM_SYS_SMON_COMPARE0 0x3008 +#define HQM_SYS_SMON_COMPARE0_RST 0x0 +union hqm_sys_smon_compare0 { + struct { + u32 compare0 : 32; + } field; + u32 val; +}; + +#define HQM_SYS_SMON_CFG1 0x3004 +#define HQM_SYS_SMON_CFG1_RST 0x0 +union hqm_sys_smon_cfg1 { + struct { + u32 mode0 : 8; + u32 mode1 : 8; + u32 rsvd : 16; + } field; + u32 val; +}; + +#define HQM_SYS_SMON_CFG0 0x3000 +#define HQM_SYS_SMON_CFG0_RST 0x40000000 +union hqm_sys_smon_cfg0 { + struct { + u32 smon_enable : 1; + u32 rsvd2 : 3; + u32 smon0_function : 3; + u32 smon0_function_compare : 1; + u32 smon1_function : 3; + u32 smon1_function_compare : 1; + u32 smon_mode : 4; + u32 stopcounterovfl : 1; + u32 intcounterovfl : 1; + u32 statcounter0ovfl : 1; + u32 statcounter1ovfl : 1; + u32 stoptimerovfl : 1; + u32 inttimerovfl : 1; + u32 stattimerovfl : 1; + u32 rsvd1 : 1; + u32 timer_prescale : 5; + u32 rsvd0 : 1; + u32 version : 2; + } field; + u32 val; +}; + +#define HQM_SYS_ALARM_VF_SYND2(x) \ + (0x8000018 + (x) * 0x1000) +#define HQM_SYS_ALARM_VF_SYND2_RST 0x0 +union hqm_sys_alarm_vf_synd2 { + struct { + u32 lock_id : 16; + u32 meas : 1; + u32 debug : 7; + u32 cq_pop : 1; + u32 qe_uhl : 1; + u32 qe_orsp : 1; + u32 qe_valid : 1; + u32 cq_int_rearm : 1; + u32 dsi_error : 1; + u32 rsvd0 : 2; + } field; + u32 val; +}; + +#define HQM_SYS_ALARM_VF_SYND1(x) \ + (0x8000014 + (x) * 0x1000) +#define HQM_SYS_ALARM_VF_SYND1_RST 0x0 +union hqm_sys_alarm_vf_synd1 { + struct { + u32 dsi : 16; + u32 qid : 8; + u32 qtype : 2; + u32 qpri : 3; + u32 msg_type : 3; + } field; + u32 val; +}; + +#define HQM_SYS_ALARM_VF_SYND0(x) \ + (0x8000010 + (x) * 0x1000) +#define HQM_SYS_ALARM_VF_SYND0_RST 0x0 +union hqm_sys_alarm_vf_synd0 { + struct { + u32 syndrome : 8; + u32 rtype : 2; + u32 rsvd0 : 2; + u32 from_dmv : 1; + u32 is_ldb : 1; + u32 cls : 2; + u32 aid : 6; + u32 unit : 4; + u32 source : 4; + u32 more : 1; + u32 valid : 1; + } field; + u32 val; +}; + +#define HQM_SYS_LDB_QID_V(x) \ + (0x8000034 + (x) * 0x1000) +#define HQM_SYS_LDB_QID_V_RST 0x0 +union hqm_sys_ldb_qid_v { + struct { + u32 qid_v : 1; + u32 rsvd0 : 31; + } field; + u32 val; +}; + +#define HQM_SYS_LDB_QID_CFG_V(x) \ + (0x8000030 + (x) * 0x1000) +#define HQM_SYS_LDB_QID_CFG_V_RST 0x0 +union hqm_sys_ldb_qid_cfg_v { + struct { + u32 sn_cfg_v : 1; + u32 fid_cfg_v : 1; + u32 rsvd0 : 30; + } field; + u32 val; +}; + +#define HQM_SYS_DIR_QID_V(x) \ + (0x8000040 + (x) * 0x1000) +#define HQM_SYS_DIR_QID_V_RST 0x0 +union hqm_sys_dir_qid_v { + struct { + u32 qid_v : 1; + u32 rsvd0 : 31; + } field; + u32 val; +}; + +#define HQM_SYS_LDB_POOL_ENBLD(x) \ + (0x8000070 + (x) * 0x1000) +#define HQM_SYS_LDB_POOL_ENBLD_RST 0x0 +union hqm_sys_ldb_pool_enbld { + struct { + u32 pool_enabled : 1; + u32 rsvd0 : 31; + } field; + u32 val; +}; + +#define HQM_SYS_DIR_POOL_ENBLD(x) \ + (0x8000080 + (x) * 0x1000) +#define HQM_SYS_DIR_POOL_ENBLD_RST 0x0 +union hqm_sys_dir_pool_enbld { + struct { + u32 pool_enabled : 1; + u32 rsvd0 : 31; + } field; + u32 val; +}; + +#define HQM_SYS_LDB_PP2VPP(x) \ + (0x8000090 + (x) * 0x1000) +#define HQM_SYS_LDB_PP2VPP_RST 0x0 +union hqm_sys_ldb_pp2vpp { + struct { + u32 vpp : 6; + u32 rsvd0 : 26; + } field; + u32 val; +}; + +#define HQM_SYS_DIR_PP2VPP(x) \ + (0x8000094 + (x) * 0x1000) +#define HQM_SYS_DIR_PP2VPP_RST 0x0 +union hqm_sys_dir_pp2vpp { + struct { + u32 vpp : 7; + u32 rsvd0 : 25; + } field; + u32 val; +}; + +#define HQM_SYS_LDB_PP_V(x) \ + (0x8000128 + (x) * 0x1000) +#define HQM_SYS_LDB_PP_V_RST 0x0 +union hqm_sys_ldb_pp_v { + struct { + u32 pp_v : 1; + u32 rsvd0 : 31; + } field; + u32 val; +}; + +#define HQM_SYS_LDB_CQ_ISR(x) \ + (0x8000124 + (x) * 0x1000) +#define HQM_SYS_LDB_CQ_ISR_RST 0x0 +/* CQ Interrupt Modes */ +#define HQM_CQ_ISR_MODE_DIS 0 +#define HQM_CQ_ISR_MODE_MSI 1 +#define HQM_CQ_ISR_MODE_MSIX 2 +union hqm_sys_ldb_cq_isr { + struct { + u32 vector : 6; + u32 vf : 4; + u32 en_code : 2; + u32 rsvd0 : 20; + } field; + u32 val; +}; + +#define HQM_SYS_LDB_CQ2VF_PF(x) \ + (0x8000120 + (x) * 0x1000) +#define HQM_SYS_LDB_CQ2VF_PF_RST 0x0 +union hqm_sys_ldb_cq2vf_pf { + struct { + u32 vf : 4; + u32 is_pf : 1; + u32 rsvd0 : 27; + } field; + u32 val; +}; + +#define HQM_SYS_LDB_PP2VAS(x) \ + (0x800011c + (x) * 0x1000) +#define HQM_SYS_LDB_PP2VAS_RST 0x0 +union hqm_sys_ldb_pp2vas { + struct { + u32 vas : 5; + u32 rsvd0 : 27; + } field; + u32 val; +}; + +#define HQM_SYS_LDB_PP2LDBPOOL(x) \ + (0x8000118 + (x) * 0x1000) +#define HQM_SYS_LDB_PP2LDBPOOL_RST 0x0 +union hqm_sys_ldb_pp2ldbpool { + struct { + u32 ldbpool : 6; + u32 rsvd0 : 26; + } field; + u32 val; +}; + +#define HQM_SYS_LDB_PP2DIRPOOL(x) \ + (0x8000114 + (x) * 0x1000) +#define HQM_SYS_LDB_PP2DIRPOOL_RST 0x0 +union hqm_sys_ldb_pp2dirpool { + struct { + u32 dirpool : 6; + u32 rsvd0 : 26; + } field; + u32 val; +}; + +#define HQM_SYS_LDB_PP2VF_PF(x) \ + (0x8000110 + (x) * 0x1000) +#define HQM_SYS_LDB_PP2VF_PF_RST 0x0 +union hqm_sys_ldb_pp2vf_pf { + struct { + u32 vf : 4; + u32 is_pf : 1; + u32 rsvd0 : 27; + } field; + u32 val; +}; + +#define HQM_SYS_LDB_PP_ADDR_U(x) \ + (0x800010c + (x) * 0x1000) +#define HQM_SYS_LDB_PP_ADDR_U_RST 0x0 +union hqm_sys_ldb_pp_addr_u { + struct { + u32 addr_u : 32; + } field; + u32 val; +}; + +#define HQM_SYS_LDB_PP_ADDR_L(x) \ + (0x8000108 + (x) * 0x1000) +#define HQM_SYS_LDB_PP_ADDR_L_RST 0x0 +union hqm_sys_ldb_pp_addr_l { + struct { + u32 rsvd0 : 7; + u32 addr_l : 25; + } field; + u32 val; +}; + +#define HQM_SYS_LDB_CQ_ADDR_U(x) \ + (0x8000104 + (x) * 0x1000) +#define HQM_SYS_LDB_CQ_ADDR_U_RST 0x0 +union hqm_sys_ldb_cq_addr_u { + struct { + u32 addr_u : 32; + } field; + u32 val; +}; + +#define HQM_SYS_LDB_CQ_ADDR_L(x) \ + (0x8000100 + (x) * 0x1000) +#define HQM_SYS_LDB_CQ_ADDR_L_RST 0x0 +union hqm_sys_ldb_cq_addr_l { + struct { + u32 rsvd0 : 6; + u32 addr_l : 26; + } field; + u32 val; +}; + +#define HQM_SYS_DIR_PP_V(x) \ + (0x8000228 + (x) * 0x1000) +#define HQM_SYS_DIR_PP_V_RST 0x0 +union hqm_sys_dir_pp_v { + struct { + u32 pp_v : 1; + u32 mb_dm : 1; + u32 rsvd0 : 30; + } field; + u32 val; +}; + +#define HQM_SYS_DIR_CQ_ISR(x) \ + (0x8000224 + (x) * 0x1000) +#define HQM_SYS_DIR_CQ_ISR_RST 0x0 +union hqm_sys_dir_cq_isr { + struct { + u32 vector : 6; + u32 vf : 4; + u32 en_code : 2; + u32 rsvd0 : 20; + } field; + u32 val; +}; + +#define HQM_SYS_DIR_CQ2VF_PF(x) \ + (0x8000220 + (x) * 0x1000) +#define HQM_SYS_DIR_CQ2VF_PF_RST 0x0 +union hqm_sys_dir_cq2vf_pf { + struct { + u32 vf : 4; + u32 is_pf : 1; + u32 rsvd0 : 27; + } field; + u32 val; +}; + +#define HQM_SYS_DIR_PP2VAS(x) \ + (0x800021c + (x) * 0x1000) +#define HQM_SYS_DIR_PP2VAS_RST 0x0 +union hqm_sys_dir_pp2vas { + struct { + u32 vas : 5; + u32 rsvd0 : 27; + } field; + u32 val; +}; + +#define HQM_SYS_DIR_PP2LDBPOOL(x) \ + (0x8000218 + (x) * 0x1000) +#define HQM_SYS_DIR_PP2LDBPOOL_RST 0x0 +union hqm_sys_dir_pp2ldbpool { + struct { + u32 ldbpool : 6; + u32 rsvd0 : 26; + } field; + u32 val; +}; + +#define HQM_SYS_DIR_PP2DIRPOOL(x) \ + (0x8000214 + (x) * 0x1000) +#define HQM_SYS_DIR_PP2DIRPOOL_RST 0x0 +union hqm_sys_dir_pp2dirpool { + struct { + u32 dirpool : 6; + u32 rsvd0 : 26; + } field; + u32 val; +}; + +#define HQM_SYS_DIR_PP2VF_PF(x) \ + (0x8000210 + (x) * 0x1000) +#define HQM_SYS_DIR_PP2VF_PF_RST 0x0 +union hqm_sys_dir_pp2vf_pf { + struct { + u32 vf : 4; + u32 is_pf : 1; + u32 is_hw_dsi : 1; + u32 rsvd0 : 26; + } field; + u32 val; +}; + +#define HQM_SYS_DIR_PP_ADDR_U(x) \ + (0x800020c + (x) * 0x1000) +#define HQM_SYS_DIR_PP_ADDR_U_RST 0x0 +union hqm_sys_dir_pp_addr_u { + struct { + u32 addr_u : 32; + } field; + u32 val; +}; + +#define HQM_SYS_DIR_PP_ADDR_L(x) \ + (0x8000208 + (x) * 0x1000) +#define HQM_SYS_DIR_PP_ADDR_L_RST 0x0 +union hqm_sys_dir_pp_addr_l { + struct { + u32 rsvd0 : 7; + u32 addr_l : 25; + } field; + u32 val; +}; + +#define HQM_SYS_DIR_CQ_ADDR_U(x) \ + (0x8000204 + (x) * 0x1000) +#define HQM_SYS_DIR_CQ_ADDR_U_RST 0x0 +union hqm_sys_dir_cq_addr_u { + struct { + u32 addr_u : 32; + } field; + u32 val; +}; + +#define HQM_SYS_DIR_CQ_ADDR_L(x) \ + (0x8000200 + (x) * 0x1000) +#define HQM_SYS_DIR_CQ_ADDR_L_RST 0x0 +union hqm_sys_dir_cq_addr_l { + struct { + u32 rsvd0 : 6; + u32 addr_l : 26; + } field; + u32 val; +}; + +#define HQM_SYS_INGRESS_ALARM_ENBL 0x300 +#define HQM_SYS_INGRESS_ALARM_ENBL_RST 0x0 +union hqm_sys_ingress_alarm_enbl { + struct { + u32 illegal_hcw : 1; + u32 illegal_pp : 1; + u32 disabled_pp : 1; + u32 illegal_qid : 1; + u32 disabled_qid : 1; + u32 illegal_ldb_qid_cfg : 1; + u32 illegal_cqid : 1; + u32 rsvd0 : 25; + } field; + u32 val; +}; + +#define HQM_SYS_CQ_MODE 0x30c +#define HQM_SYS_CQ_MODE_RST 0x0 +union hqm_sys_cq_mode { + struct { + u32 ldb_cq64 : 1; + u32 dir_cq64 : 1; + u32 rsvd0 : 30; + } field; + u32 val; +}; + +#define HQM_SYS_FUNC_VF_BAR_DSBL(x) \ + (0x310 + (x) * 0x4) +#define HQM_SYS_FUNC_VF_BAR_DSBL_RST 0x0 +union hqm_sys_func_vf_bar_dsbl { + struct { + u32 func_vf_bar_dis : 1; + u32 rsvd0 : 31; + } field; + u32 val; +}; + +#define HQM_SYS_MSIX_ACK 0x400 +#define HQM_SYS_MSIX_ACK_RST 0x0 +union hqm_sys_msix_ack { + struct { + u32 msix_0_ack : 1; + u32 msix_1_ack : 1; + u32 msix_2_ack : 1; + u32 msix_3_ack : 1; + u32 msix_4_ack : 1; + u32 msix_5_ack : 1; + u32 msix_6_ack : 1; + u32 msix_7_ack : 1; + u32 msix_8_ack : 1; + u32 rsvd0 : 23; + } field; + u32 val; +}; + +#define HQM_SYS_MSIX_PASSTHRU 0x404 +#define HQM_SYS_MSIX_PASSTHRU_RST 0x0 +union hqm_sys_msix_passthru { + struct { + u32 msix_0_passthru : 1; + u32 msix_1_passthru : 1; + u32 msix_2_passthru : 1; + u32 msix_3_passthru : 1; + u32 msix_4_passthru : 1; + u32 msix_5_passthru : 1; + u32 msix_6_passthru : 1; + u32 msix_7_passthru : 1; + u32 msix_8_passthru : 1; + u32 rsvd0 : 23; + } field; + u32 val; +}; + +#define HQM_SYS_MSIX_MODE 0x408 +#define HQM_SYS_MSIX_MODE_RST 0x0 +/* MSI-X Modes */ +#define HQM_MSIX_MODE_PACKED 0 +#define HQM_MSIX_MODE_COMPRESSED 1 +union hqm_sys_msix_mode { + struct { + u32 mode : 1; + u32 rsvd0 : 31; + } field; + u32 val; +}; + +#define HQM_SYS_DIR_CQ_31_0_OCC_INT_STS 0x440 +#define HQM_SYS_DIR_CQ_31_0_OCC_INT_STS_RST 0x0 +union hqm_sys_dir_cq_31_0_occ_int_sts { + struct { + u32 cq_0_occ_int : 1; + u32 cq_1_occ_int : 1; + u32 cq_2_occ_int : 1; + u32 cq_3_occ_int : 1; + u32 cq_4_occ_int : 1; + u32 cq_5_occ_int : 1; + u32 cq_6_occ_int : 1; + u32 cq_7_occ_int : 1; + u32 cq_8_occ_int : 1; + u32 cq_9_occ_int : 1; + u32 cq_10_occ_int : 1; + u32 cq_11_occ_int : 1; + u32 cq_12_occ_int : 1; + u32 cq_13_occ_int : 1; + u32 cq_14_occ_int : 1; + u32 cq_15_occ_int : 1; + u32 cq_16_occ_int : 1; + u32 cq_17_occ_int : 1; + u32 cq_18_occ_int : 1; + u32 cq_19_occ_int : 1; + u32 cq_20_occ_int : 1; + u32 cq_21_occ_int : 1; + u32 cq_22_occ_int : 1; + u32 cq_23_occ_int : 1; + u32 cq_24_occ_int : 1; + u32 cq_25_occ_int : 1; + u32 cq_26_occ_int : 1; + u32 cq_27_occ_int : 1; + u32 cq_28_occ_int : 1; + u32 cq_29_occ_int : 1; + u32 cq_30_occ_int : 1; + u32 cq_31_occ_int : 1; + } field; + u32 val; +}; + +#define HQM_SYS_DIR_CQ_63_32_OCC_INT_STS 0x444 +#define HQM_SYS_DIR_CQ_63_32_OCC_INT_STS_RST 0x0 +union hqm_sys_dir_cq_63_32_occ_int_sts { + struct { + u32 cq_32_occ_int : 1; + u32 cq_33_occ_int : 1; + u32 cq_34_occ_int : 1; + u32 cq_35_occ_int : 1; + u32 cq_36_occ_int : 1; + u32 cq_37_occ_int : 1; + u32 cq_38_occ_int : 1; + u32 cq_39_occ_int : 1; + u32 cq_40_occ_int : 1; + u32 cq_41_occ_int : 1; + u32 cq_42_occ_int : 1; + u32 cq_43_occ_int : 1; + u32 cq_44_occ_int : 1; + u32 cq_45_occ_int : 1; + u32 cq_46_occ_int : 1; + u32 cq_47_occ_int : 1; + u32 cq_48_occ_int : 1; + u32 cq_49_occ_int : 1; + u32 cq_50_occ_int : 1; + u32 cq_51_occ_int : 1; + u32 cq_52_occ_int : 1; + u32 cq_53_occ_int : 1; + u32 cq_54_occ_int : 1; + u32 cq_55_occ_int : 1; + u32 cq_56_occ_int : 1; + u32 cq_57_occ_int : 1; + u32 cq_58_occ_int : 1; + u32 cq_59_occ_int : 1; + u32 cq_60_occ_int : 1; + u32 cq_61_occ_int : 1; + u32 cq_62_occ_int : 1; + u32 cq_63_occ_int : 1; + } field; + u32 val; +}; + +#define HQM_SYS_DIR_CQ_95_64_OCC_INT_STS 0x448 +#define HQM_SYS_DIR_CQ_95_64_OCC_INT_STS_RST 0x0 +union hqm_sys_dir_cq_95_64_occ_int_sts { + struct { + u32 cq_64_occ_int : 1; + u32 cq_65_occ_int : 1; + u32 cq_66_occ_int : 1; + u32 cq_67_occ_int : 1; + u32 cq_68_occ_int : 1; + u32 cq_69_occ_int : 1; + u32 cq_70_occ_int : 1; + u32 cq_71_occ_int : 1; + u32 cq_72_occ_int : 1; + u32 cq_73_occ_int : 1; + u32 cq_74_occ_int : 1; + u32 cq_75_occ_int : 1; + u32 cq_76_occ_int : 1; + u32 cq_77_occ_int : 1; + u32 cq_78_occ_int : 1; + u32 cq_79_occ_int : 1; + u32 cq_80_occ_int : 1; + u32 cq_81_occ_int : 1; + u32 cq_82_occ_int : 1; + u32 cq_83_occ_int : 1; + u32 cq_84_occ_int : 1; + u32 cq_85_occ_int : 1; + u32 cq_86_occ_int : 1; + u32 cq_87_occ_int : 1; + u32 cq_88_occ_int : 1; + u32 cq_89_occ_int : 1; + u32 cq_90_occ_int : 1; + u32 cq_91_occ_int : 1; + u32 cq_92_occ_int : 1; + u32 cq_93_occ_int : 1; + u32 cq_94_occ_int : 1; + u32 cq_95_occ_int : 1; + } field; + u32 val; +}; + +#define HQM_SYS_DIR_CQ_127_96_OCC_INT_STS 0x44c +#define HQM_SYS_DIR_CQ_127_96_OCC_INT_STS_RST 0x0 +union hqm_sys_dir_cq_127_96_occ_int_sts { + struct { + u32 cq_96_occ_int : 1; + u32 cq_97_occ_int : 1; + u32 cq_98_occ_int : 1; + u32 cq_99_occ_int : 1; + u32 cq_100_occ_int : 1; + u32 cq_101_occ_int : 1; + u32 cq_102_occ_int : 1; + u32 cq_103_occ_int : 1; + u32 cq_104_occ_int : 1; + u32 cq_105_occ_int : 1; + u32 cq_106_occ_int : 1; + u32 cq_107_occ_int : 1; + u32 cq_108_occ_int : 1; + u32 cq_109_occ_int : 1; + u32 cq_110_occ_int : 1; + u32 cq_111_occ_int : 1; + u32 cq_112_occ_int : 1; + u32 cq_113_occ_int : 1; + u32 cq_114_occ_int : 1; + u32 cq_115_occ_int : 1; + u32 cq_116_occ_int : 1; + u32 cq_117_occ_int : 1; + u32 cq_118_occ_int : 1; + u32 cq_119_occ_int : 1; + u32 cq_120_occ_int : 1; + u32 cq_121_occ_int : 1; + u32 cq_122_occ_int : 1; + u32 cq_123_occ_int : 1; + u32 cq_124_occ_int : 1; + u32 cq_125_occ_int : 1; + u32 cq_126_occ_int : 1; + u32 cq_127_occ_int : 1; + } field; + u32 val; +}; + +#define HQM_SYS_LDB_CQ_31_0_OCC_INT_STS 0x460 +#define HQM_SYS_LDB_CQ_31_0_OCC_INT_STS_RST 0x0 +union hqm_sys_ldb_cq_31_0_occ_int_sts { + struct { + u32 cq_0_occ_int : 1; + u32 cq_1_occ_int : 1; + u32 cq_2_occ_int : 1; + u32 cq_3_occ_int : 1; + u32 cq_4_occ_int : 1; + u32 cq_5_occ_int : 1; + u32 cq_6_occ_int : 1; + u32 cq_7_occ_int : 1; + u32 cq_8_occ_int : 1; + u32 cq_9_occ_int : 1; + u32 cq_10_occ_int : 1; + u32 cq_11_occ_int : 1; + u32 cq_12_occ_int : 1; + u32 cq_13_occ_int : 1; + u32 cq_14_occ_int : 1; + u32 cq_15_occ_int : 1; + u32 cq_16_occ_int : 1; + u32 cq_17_occ_int : 1; + u32 cq_18_occ_int : 1; + u32 cq_19_occ_int : 1; + u32 cq_20_occ_int : 1; + u32 cq_21_occ_int : 1; + u32 cq_22_occ_int : 1; + u32 cq_23_occ_int : 1; + u32 cq_24_occ_int : 1; + u32 cq_25_occ_int : 1; + u32 cq_26_occ_int : 1; + u32 cq_27_occ_int : 1; + u32 cq_28_occ_int : 1; + u32 cq_29_occ_int : 1; + u32 cq_30_occ_int : 1; + u32 cq_31_occ_int : 1; + } field; + u32 val; +}; + +#define HQM_SYS_LDB_CQ_63_32_OCC_INT_STS 0x464 +#define HQM_SYS_LDB_CQ_63_32_OCC_INT_STS_RST 0x0 +union hqm_sys_ldb_cq_63_32_occ_int_sts { + struct { + u32 cq_32_occ_int : 1; + u32 cq_33_occ_int : 1; + u32 cq_34_occ_int : 1; + u32 cq_35_occ_int : 1; + u32 cq_36_occ_int : 1; + u32 cq_37_occ_int : 1; + u32 cq_38_occ_int : 1; + u32 cq_39_occ_int : 1; + u32 cq_40_occ_int : 1; + u32 cq_41_occ_int : 1; + u32 cq_42_occ_int : 1; + u32 cq_43_occ_int : 1; + u32 cq_44_occ_int : 1; + u32 cq_45_occ_int : 1; + u32 cq_46_occ_int : 1; + u32 cq_47_occ_int : 1; + u32 cq_48_occ_int : 1; + u32 cq_49_occ_int : 1; + u32 cq_50_occ_int : 1; + u32 cq_51_occ_int : 1; + u32 cq_52_occ_int : 1; + u32 cq_53_occ_int : 1; + u32 cq_54_occ_int : 1; + u32 cq_55_occ_int : 1; + u32 cq_56_occ_int : 1; + u32 cq_57_occ_int : 1; + u32 cq_58_occ_int : 1; + u32 cq_59_occ_int : 1; + u32 cq_60_occ_int : 1; + u32 cq_61_occ_int : 1; + u32 cq_62_occ_int : 1; + u32 cq_63_occ_int : 1; + } field; + u32 val; +}; + +#define HQM_SYS_ALARM_HW_SYND 0x50c +#define HQM_SYS_ALARM_HW_SYND_RST 0x0 +union hqm_sys_alarm_hw_synd { + struct { + u32 syndrome : 8; + u32 rtype : 2; + u32 rsvd0 : 2; + u32 from_dmv : 1; + u32 is_ldb : 1; + u32 cls : 2; + u32 aid : 6; + u32 unit : 4; + u32 source : 4; + u32 more : 1; + u32 valid : 1; + } field; + u32 val; +}; + +#define HQM_LSP_CQ_LDB_TOT_SCH_CNT_CTRL(x) \ + (0x20000000 + (x) * 0x1000) +#define HQM_LSP_CQ_LDB_TOT_SCH_CNT_CTRL_RST 0x0 +union hqm_lsp_cq_ldb_tot_sch_cnt_ctrl { + struct { + u32 count : 32; + } field; + u32 val; +}; + +#define HQM_LSP_CQ_LDB_DSBL(x) \ + (0x20000124 + (x) * 0x1000) +#define HQM_LSP_CQ_LDB_DSBL_RST 0x1 +union hqm_lsp_cq_ldb_dsbl { + struct { + u32 disabled : 1; + u32 rsvd0 : 31; + } field; + u32 val; +}; + +#define HQM_LSP_CQ_LDB_TOT_SCH_CNTH(x) \ + (0x20000120 + (x) * 0x1000) +#define HQM_LSP_CQ_LDB_TOT_SCH_CNTH_RST 0x0 +union hqm_lsp_cq_ldb_tot_sch_cnth { + struct { + u32 count : 32; + } field; + u32 val; +}; + +#define HQM_LSP_CQ_LDB_TOT_SCH_CNTL(x) \ + (0x2000011c + (x) * 0x1000) +#define HQM_LSP_CQ_LDB_TOT_SCH_CNTL_RST 0x0 +union hqm_lsp_cq_ldb_tot_sch_cntl { + struct { + u32 count : 32; + } field; + u32 val; +}; + +#define HQM_LSP_CQ_LDB_TKN_DEPTH_SEL(x) \ + (0x20000118 + (x) * 0x1000) +#define HQM_LSP_CQ_LDB_TKN_DEPTH_SEL_RST 0x0 +union hqm_lsp_cq_ldb_tkn_depth_sel { + struct { + u32 token_depth_select : 4; + u32 ignore_depth : 1; + u32 enab_shallow_cq : 1; + u32 rsvd0 : 26; + } field; + u32 val; +}; + +#define HQM_LSP_CQ_LDB_TKN_CNT(x) \ + (0x20000114 + (x) * 0x1000) +#define HQM_LSP_CQ_LDB_TKN_CNT_RST 0x0 +union hqm_lsp_cq_ldb_tkn_cnt { + struct { + u32 token_count : 11; + u32 rsvd0 : 21; + } field; + u32 val; +}; + +#define HQM_LSP_CQ_LDB_INFL_LIM(x) \ + (0x20000110 + (x) * 0x1000) +#define HQM_LSP_CQ_LDB_INFL_LIM_RST 0x0 +union hqm_lsp_cq_ldb_infl_lim { + struct { + u32 limit : 13; + u32 rsvd0 : 19; + } field; + u32 val; +}; + +#define HQM_LSP_CQ_LDB_INFL_CNT(x) \ + (0x2000010c + (x) * 0x1000) +#define HQM_LSP_CQ_LDB_INFL_CNT_RST 0x0 +union hqm_lsp_cq_ldb_infl_cnt { + struct { + u32 count : 13; + u32 rsvd0 : 19; + } field; + u32 val; +}; + +#define HQM_LSP_CQ2QID(x, y) \ + (0x20000104 + (x) * 0x1000 + (y) * 0x4) +#define HQM_LSP_CQ2QID_RST 0x0 +union hqm_lsp_cq2qid { + struct { + u32 qid_p0 : 7; + u32 rsvd3 : 1; + u32 qid_p1 : 7; + u32 rsvd2 : 1; + u32 qid_p2 : 7; + u32 rsvd1 : 1; + u32 qid_p3 : 7; + u32 rsvd0 : 1; + } field; + u32 val; +}; + +#define HQM_LSP_CQ2PRIOV(x) \ + (0x20000100 + (x) * 0x1000) +#define HQM_LSP_CQ2PRIOV_RST 0x0 +union hqm_lsp_cq2priov { + struct { + u32 prio : 24; + u32 v : 8; + } field; + u32 val; +}; + +#define HQM_LSP_CQ_DIR_DSBL(x) \ + (0x20000310 + (x) * 0x1000) +#define HQM_LSP_CQ_DIR_DSBL_RST 0x1 +union hqm_lsp_cq_dir_dsbl { + struct { + u32 disabled : 1; + u32 rsvd0 : 31; + } field; + u32 val; +}; + +#define HQM_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI(x) \ + (0x2000030c + (x) * 0x1000) +#define HQM_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI_RST 0x0 +union hqm_lsp_cq_dir_tkn_depth_sel_dsi { + struct { + u32 token_depth_select : 4; + u32 disable_wb_opt : 1; + u32 ignore_depth : 1; + u32 rsvd0 : 26; + } field; + u32 val; +}; + +#define HQM_LSP_CQ_DIR_TOT_SCH_CNTH(x) \ + (0x20000308 + (x) * 0x1000) +#define HQM_LSP_CQ_DIR_TOT_SCH_CNTH_RST 0x0 +union hqm_lsp_cq_dir_tot_sch_cnth { + struct { + u32 count : 32; + } field; + u32 val; +}; + +#define HQM_LSP_CQ_DIR_TOT_SCH_CNTL(x) \ + (0x20000304 + (x) * 0x1000) +#define HQM_LSP_CQ_DIR_TOT_SCH_CNTL_RST 0x0 +union hqm_lsp_cq_dir_tot_sch_cntl { + struct { + u32 count : 32; + } field; + u32 val; +}; + +#define HQM_LSP_CQ_DIR_TKN_CNT(x) \ + (0x20000300 + (x) * 0x1000) +#define HQM_LSP_CQ_DIR_TKN_CNT_RST 0x0 +union hqm_lsp_cq_dir_tkn_cnt { + struct { + u32 count : 11; + u32 rsvd0 : 21; + } field; + u32 val; +}; + +#define HQM_LSP_QID_LDB_QID2CQIDX(x, y) \ + (0x20000400 + (x) * 0x1000 + (y) * 0x4) +#define HQM_LSP_QID_LDB_QID2CQIDX_RST 0x0 +union hqm_lsp_qid_ldb_qid2cqidx { + struct { + u32 cq_p0 : 8; + u32 cq_p1 : 8; + u32 cq_p2 : 8; + u32 cq_p3 : 8; + } field; + u32 val; +}; + +#define HQM_LSP_QID_LDB_QID2CQIDX2(x, y) \ + (0x20000500 + (x) * 0x1000 + (y) * 0x4) +#define HQM_LSP_QID_LDB_QID2CQIDX2_RST 0x0 +union hqm_lsp_qid_ldb_qid2cqidx2 { + struct { + u32 cq_p0 : 8; + u32 cq_p1 : 8; + u32 cq_p2 : 8; + u32 cq_p3 : 8; + } field; + u32 val; +}; + +#define HQM_LSP_QID_ATQ_ENQUEUE_CNT(x) \ + (0x2000066c + (x) * 0x1000) +#define HQM_LSP_QID_ATQ_ENQUEUE_CNT_RST 0x0 +union hqm_lsp_qid_atq_enqueue_cnt { + struct { + u32 count : 15; + u32 rsvd0 : 17; + } field; + u32 val; +}; + +#define HQM_LSP_QID_LDB_INFL_LIM(x) \ + (0x2000064c + (x) * 0x1000) +#define HQM_LSP_QID_LDB_INFL_LIM_RST 0x0 +union hqm_lsp_qid_ldb_infl_lim { + struct { + u32 limit : 13; + u32 rsvd0 : 19; + } field; + u32 val; +}; + +#define HQM_LSP_QID_LDB_INFL_CNT(x) \ + (0x2000062c + (x) * 0x1000) +#define HQM_LSP_QID_LDB_INFL_CNT_RST 0x0 +union hqm_lsp_qid_ldb_infl_cnt { + struct { + u32 count : 13; + u32 rsvd0 : 19; + } field; + u32 val; +}; + +#define HQM_LSP_QID_AQED_ACTIVE_LIM(x) \ + (0x20000628 + (x) * 0x1000) +#define HQM_LSP_QID_AQED_ACTIVE_LIM_RST 0x0 +union hqm_lsp_qid_aqed_active_lim { + struct { + u32 limit : 12; + u32 rsvd0 : 20; + } field; + u32 val; +}; + +#define HQM_LSP_QID_AQED_ACTIVE_CNT(x) \ + (0x20000624 + (x) * 0x1000) +#define HQM_LSP_QID_AQED_ACTIVE_CNT_RST 0x0 +union hqm_lsp_qid_aqed_active_cnt { + struct { + u32 count : 12; + u32 rsvd0 : 20; + } field; + u32 val; +}; + +#define HQM_LSP_QID_LDB_ENQUEUE_CNT(x) \ + (0x20000604 + (x) * 0x1000) +#define HQM_LSP_QID_LDB_ENQUEUE_CNT_RST 0x0 +union hqm_lsp_qid_ldb_enqueue_cnt { + struct { + u32 count : 15; + u32 rsvd0 : 17; + } field; + u32 val; +}; + +#define HQM_LSP_QID_LDB_REPLAY_CNT(x) \ + (0x20000600 + (x) * 0x1000) +#define HQM_LSP_QID_LDB_REPLAY_CNT_RST 0x0 +union hqm_lsp_qid_ldb_replay_cnt { + struct { + u32 count : 15; + u32 rsvd0 : 17; + } field; + u32 val; +}; + +#define HQM_LSP_QID_DIR_ENQUEUE_CNT(x) \ + (0x20000700 + (x) * 0x1000) +#define HQM_LSP_QID_DIR_ENQUEUE_CNT_RST 0x0 +union hqm_lsp_qid_dir_enqueue_cnt { + struct { + u32 count : 13; + u32 rsvd0 : 19; + } field; + u32 val; +}; + +#define HQM_LSP_CTRL_CONFIG_0 0x2800002c +#define HQM_LSP_CTRL_CONFIG_0_RST 0x12cc +union hqm_lsp_ctrl_config_0 { + struct { + u32 atm_cq_qid_priority_prot : 1; + u32 ldb_arb_ignore_empty : 1; + u32 ldb_arb_mode : 2; + u32 ldb_arb_threshold : 18; + u32 cfg_cq_sla_upd_always : 1; + u32 cfg_cq_wcn_upd_always : 1; + u32 spare : 8; + } field; + u32 val; +}; + +#define HQM_LSP_LDB_SCHED_CTRL 0x28100000 +#define HQM_LSP_LDB_SCHED_CTRL_RST 0x0 +union hqm_lsp_ldb_sched_ctrl { + struct { + u32 cq : 8; + u32 qidix : 3; + u32 value : 1; + u32 nalb_haswork_v : 1; + u32 rlist_haswork_v : 1; + u32 slist_haswork_v : 1; + u32 inflight_ok_v : 1; + u32 aqed_nfull_v : 1; + u32 spare0 : 15; + } field; + u32 val; +}; + +#define HQM_LSP_DIR_SCH_CNT_H 0x2820000c +#define HQM_LSP_DIR_SCH_CNT_H_RST 0x0 +union hqm_lsp_dir_sch_cnt_h { + struct { + u32 count : 32; + } field; + u32 val; +}; + +#define HQM_LSP_DIR_SCH_CNT_L 0x28200008 +#define HQM_LSP_DIR_SCH_CNT_L_RST 0x0 +union hqm_lsp_dir_sch_cnt_l { + struct { + u32 count : 32; + } field; + u32 val; +}; + +#define HQM_LSP_LDB_SCH_CNT_H 0x28200004 +#define HQM_LSP_LDB_SCH_CNT_H_RST 0x0 +union hqm_lsp_ldb_sch_cnt_h { + struct { + u32 count : 32; + } field; + u32 val; +}; + +#define HQM_LSP_LDB_SCH_CNT_L 0x28200000 +#define HQM_LSP_LDB_SCH_CNT_L_RST 0x0 +union hqm_lsp_ldb_sch_cnt_l { + struct { + u32 count : 32; + } field; + u32 val; +}; + +#define HQM_DP_DIR_CSR_CTRL 0x38000018 +#define HQM_DP_DIR_CSR_CTRL_RST 0xc0000000 +union hqm_dp_dir_csr_ctrl { + struct { + u32 cfg_int_dis : 1; + u32 cfg_int_dis_sbe : 1; + u32 cfg_int_dis_mbe : 1; + u32 spare0 : 27; + u32 cfg_vasr_dis : 1; + u32 cfg_int_dis_synd : 1; + } field; + u32 val; +}; + +#define HQM_ATM_PIPE_QID_LDB_QID2CQIDX(x, y) \ + (0x70000000 + (x) * 0x1000 + (y) * 0x4) +#define HQM_ATM_PIPE_QID_LDB_QID2CQIDX_RST 0x0 +union hqm_atm_pipe_qid_ldb_qid2cqidx { + struct { + u32 cq_p0 : 8; + u32 cq_p1 : 8; + u32 cq_p2 : 8; + u32 cq_p3 : 8; + } field; + u32 val; +}; + +#define HQM_AQED_PIPE_QID_FID_LIM(x) \ + (0x80000014 + (x) * 0x1000) +#define HQM_AQED_PIPE_QID_FID_LIM_RST 0x7ff +union hqm_aqed_pipe_qid_fid_lim { + struct { + u32 qid_fid_limit : 13; + u32 rsvd0 : 19; + } field; + u32 val; +}; + +#define HQM_AQED_PIPE_FL_POP_PTR(x) \ + (0x80000010 + (x) * 0x1000) +#define HQM_AQED_PIPE_FL_POP_PTR_RST 0x0 +union hqm_aqed_pipe_fl_pop_ptr { + struct { + u32 pop_ptr : 11; + u32 generation : 1; + u32 rsvd0 : 20; + } field; + u32 val; +}; + +#define HQM_AQED_PIPE_FL_PUSH_PTR(x) \ + (0x8000000c + (x) * 0x1000) +#define HQM_AQED_PIPE_FL_PUSH_PTR_RST 0x0 +union hqm_aqed_pipe_fl_push_ptr { + struct { + u32 push_ptr : 11; + u32 generation : 1; + u32 rsvd0 : 20; + } field; + u32 val; +}; + +#define HQM_AQED_PIPE_FL_BASE(x) \ + (0x80000008 + (x) * 0x1000) +#define HQM_AQED_PIPE_FL_BASE_RST 0x0 +union hqm_aqed_pipe_fl_base { + struct { + u32 base : 11; + u32 rsvd0 : 21; + } field; + u32 val; +}; + +#define HQM_AQED_PIPE_FL_LIM(x) \ + (0x80000004 + (x) * 0x1000) +#define HQM_AQED_PIPE_FL_LIM_RST 0x800 +union hqm_aqed_pipe_fl_lim { + struct { + u32 limit : 11; + u32 freelist_disable : 1; + u32 rsvd0 : 20; + } field; + u32 val; +}; + +#define HQM_RO_PIPE_QID2GRPSLT(x) \ + (0x90000000 + (x) * 0x1000) +#define HQM_RO_PIPE_QID2GRPSLT_RST 0x0 +union hqm_ro_pipe_qid2grpslt { + struct { + u32 slot : 5; + u32 rsvd1 : 3; + u32 group : 2; + u32 rsvd0 : 22; + } field; + u32 val; +}; + +#define HQM_RO_PIPE_GRP_SN_MODE 0x98000008 +#define HQM_RO_PIPE_GRP_SN_MODE_RST 0x0 +union hqm_ro_pipe_grp_sn_mode { + struct { + u32 sn_mode_0 : 3; + u32 reserved0 : 5; + u32 sn_mode_1 : 3; + u32 reserved1 : 5; + u32 sn_mode_2 : 3; + u32 reserved2 : 5; + u32 sn_mode_3 : 3; + u32 reserved3 : 5; + } field; + u32 val; +}; + +#define HQM_CHP_DIR_CQ_WD_ENB(x) \ + (0xa0000038 + (x) * 0x1000) +#define HQM_CHP_DIR_CQ_WD_ENB_RST 0x0 +union hqm_chp_dir_cq_wd_enb { + struct { + u32 wd_enable : 1; + u32 rsvd0 : 31; + } field; + u32 val; +}; + +#define HQM_CHP_DIR_LDB_PP2POOL(x) \ + (0xa0000034 + (x) * 0x1000) +#define HQM_CHP_DIR_LDB_PP2POOL_RST 0x0 +union hqm_chp_dir_ldb_pp2pool { + struct { + u32 pool : 6; + u32 rsvd0 : 26; + } field; + u32 val; +}; + +#define HQM_CHP_DIR_DIR_PP2POOL(x) \ + (0xa0000030 + (x) * 0x1000) +#define HQM_CHP_DIR_DIR_PP2POOL_RST 0x0 +union hqm_chp_dir_dir_pp2pool { + struct { + u32 pool : 6; + u32 rsvd0 : 26; + } field; + u32 val; +}; + +#define HQM_CHP_DIR_PP_LDB_CRD_CNT(x) \ + (0xa000002c + (x) * 0x1000) +#define HQM_CHP_DIR_PP_LDB_CRD_CNT_RST 0x0 +union hqm_chp_dir_pp_ldb_crd_cnt { + struct { + u32 count : 16; + u32 rsvd0 : 16; + } field; + u32 val; +}; + +#define HQM_CHP_DIR_PP_DIR_CRD_CNT(x) \ + (0xa0000028 + (x) * 0x1000) +#define HQM_CHP_DIR_PP_DIR_CRD_CNT_RST 0x0 +union hqm_chp_dir_pp_dir_crd_cnt { + struct { + u32 count : 14; + u32 rsvd0 : 18; + } field; + u32 val; +}; + +#define HQM_CHP_DIR_CQ_TMR_THRESHOLD(x) \ + (0xa0000024 + (x) * 0x1000) +#define HQM_CHP_DIR_CQ_TMR_THRESHOLD_RST 0x0 +union hqm_chp_dir_cq_tmr_threshold { + struct { + u32 timer_thrsh : 14; + u32 rsvd0 : 18; + } field; + u32 val; +}; + +#define HQM_CHP_DIR_CQ_INT_ENB(x) \ + (0xa0000020 + (x) * 0x1000) +#define HQM_CHP_DIR_CQ_INT_ENB_RST 0x0 +union hqm_chp_dir_cq_int_enb { + struct { + u32 en_tim : 1; + u32 en_depth : 1; + u32 rsvd0 : 30; + } field; + u32 val; +}; + +#define HQM_CHP_DIR_CQ_INT_DEPTH_THRSH(x) \ + (0xa000001c + (x) * 0x1000) +#define HQM_CHP_DIR_CQ_INT_DEPTH_THRSH_RST 0x0 +union hqm_chp_dir_cq_int_depth_thrsh { + struct { + u32 depth_threshold : 12; + u32 rsvd0 : 20; + } field; + u32 val; +}; + +#define HQM_CHP_DIR_CQ_TKN_DEPTH_SEL(x) \ + (0xa0000018 + (x) * 0x1000) +#define HQM_CHP_DIR_CQ_TKN_DEPTH_SEL_RST 0x0 +union hqm_chp_dir_cq_tkn_depth_sel { + struct { + u32 token_depth_select : 4; + u32 rsvd0 : 28; + } field; + u32 val; +}; + +#define HQM_CHP_DIR_PP_LDB_MIN_CRD_QNT(x) \ + (0xa0000014 + (x) * 0x1000) +#define HQM_CHP_DIR_PP_LDB_MIN_CRD_QNT_RST 0x1 +union hqm_chp_dir_pp_ldb_min_crd_qnt { + struct { + u32 quanta : 10; + u32 rsvd0 : 22; + } field; + u32 val; +}; + +#define HQM_CHP_DIR_PP_DIR_MIN_CRD_QNT(x) \ + (0xa0000010 + (x) * 0x1000) +#define HQM_CHP_DIR_PP_DIR_MIN_CRD_QNT_RST 0x1 +union hqm_chp_dir_pp_dir_min_crd_qnt { + struct { + u32 quanta : 10; + u32 rsvd0 : 22; + } field; + u32 val; +}; + +#define HQM_CHP_DIR_PP_LDB_CRD_LWM(x) \ + (0xa000000c + (x) * 0x1000) +#define HQM_CHP_DIR_PP_LDB_CRD_LWM_RST 0x0 +union hqm_chp_dir_pp_ldb_crd_lwm { + struct { + u32 lwm : 16; + u32 rsvd0 : 16; + } field; + u32 val; +}; + +#define HQM_CHP_DIR_PP_LDB_CRD_HWM(x) \ + (0xa0000008 + (x) * 0x1000) +#define HQM_CHP_DIR_PP_LDB_CRD_HWM_RST 0x0 +union hqm_chp_dir_pp_ldb_crd_hwm { + struct { + u32 hwm : 16; + u32 rsvd0 : 16; + } field; + u32 val; +}; + +#define HQM_CHP_DIR_PP_DIR_CRD_LWM(x) \ + (0xa0000004 + (x) * 0x1000) +#define HQM_CHP_DIR_PP_DIR_CRD_LWM_RST 0x0 +union hqm_chp_dir_pp_dir_crd_lwm { + struct { + u32 lwm : 14; + u32 rsvd0 : 18; + } field; + u32 val; +}; + +#define HQM_CHP_DIR_PP_DIR_CRD_HWM(x) \ + (0xa0000000 + (x) * 0x1000) +#define HQM_CHP_DIR_PP_DIR_CRD_HWM_RST 0x0 +union hqm_chp_dir_pp_dir_crd_hwm { + struct { + u32 hwm : 14; + u32 rsvd0 : 18; + } field; + u32 val; +}; + +#define HQM_CHP_LDB_CQ_WD_ENB(x) \ + (0xa0000144 + (x) * 0x1000) +#define HQM_CHP_LDB_CQ_WD_ENB_RST 0x0 +union hqm_chp_ldb_cq_wd_enb { + struct { + u32 wd_enable : 1; + u32 rsvd0 : 31; + } field; + u32 val; +}; + +#define HQM_CHP_SN_CHK_ENBL(x) \ + (0xa0000140 + (x) * 0x1000) +#define HQM_CHP_SN_CHK_ENBL_RST 0x0 +union hqm_chp_sn_chk_enbl { + struct { + u32 en : 1; + u32 rsvd0 : 31; + } field; + u32 val; +}; + +#define HQM_CHP_HIST_LIST_BASE(x) \ + (0xa000013c + (x) * 0x1000) +#define HQM_CHP_HIST_LIST_BASE_RST 0x0 +union hqm_chp_hist_list_base { + struct { + u32 base : 13; + u32 rsvd0 : 19; + } field; + u32 val; +}; + +#define HQM_CHP_HIST_LIST_LIM(x) \ + (0xa0000138 + (x) * 0x1000) +#define HQM_CHP_HIST_LIST_LIM_RST 0x0 +union hqm_chp_hist_list_lim { + struct { + u32 limit : 13; + u32 rsvd0 : 19; + } field; + u32 val; +}; + +#define HQM_CHP_LDB_LDB_PP2POOL(x) \ + (0xa0000134 + (x) * 0x1000) +#define HQM_CHP_LDB_LDB_PP2POOL_RST 0x0 +union hqm_chp_ldb_ldb_pp2pool { + struct { + u32 pool : 6; + u32 rsvd0 : 26; + } field; + u32 val; +}; + +#define HQM_CHP_LDB_DIR_PP2POOL(x) \ + (0xa0000130 + (x) * 0x1000) +#define HQM_CHP_LDB_DIR_PP2POOL_RST 0x0 +union hqm_chp_ldb_dir_pp2pool { + struct { + u32 pool : 6; + u32 rsvd0 : 26; + } field; + u32 val; +}; + +#define HQM_CHP_LDB_PP_LDB_CRD_CNT(x) \ + (0xa000012c + (x) * 0x1000) +#define HQM_CHP_LDB_PP_LDB_CRD_CNT_RST 0x0 +union hqm_chp_ldb_pp_ldb_crd_cnt { + struct { + u32 count : 16; + u32 rsvd0 : 16; + } field; + u32 val; +}; + +#define HQM_CHP_LDB_PP_DIR_CRD_CNT(x) \ + (0xa0000128 + (x) * 0x1000) +#define HQM_CHP_LDB_PP_DIR_CRD_CNT_RST 0x0 +union hqm_chp_ldb_pp_dir_crd_cnt { + struct { + u32 count : 14; + u32 rsvd0 : 18; + } field; + u32 val; +}; + +#define HQM_CHP_LDB_CQ_TMR_THRESHOLD(x) \ + (0xa0000124 + (x) * 0x1000) +#define HQM_CHP_LDB_CQ_TMR_THRESHOLD_RST 0x0 +union hqm_chp_ldb_cq_tmr_threshold { + struct { + u32 thrsh : 14; + u32 rsvd0 : 18; + } field; + u32 val; +}; + +#define HQM_CHP_LDB_CQ_INT_ENB(x) \ + (0xa0000120 + (x) * 0x1000) +#define HQM_CHP_LDB_CQ_INT_ENB_RST 0x0 +union hqm_chp_ldb_cq_int_enb { + struct { + u32 en_tim : 1; + u32 en_depth : 1; + u32 rsvd0 : 30; + } field; + u32 val; +}; + +#define HQM_CHP_LDB_CQ_INT_DEPTH_THRSH(x) \ + (0xa000011c + (x) * 0x1000) +#define HQM_CHP_LDB_CQ_INT_DEPTH_THRSH_RST 0x0 +union hqm_chp_ldb_cq_int_depth_thrsh { + struct { + u32 depth_threshold : 12; + u32 rsvd0 : 20; + } field; + u32 val; +}; + +#define HQM_CHP_LDB_CQ_TKN_DEPTH_SEL(x) \ + (0xa0000118 + (x) * 0x1000) +#define HQM_CHP_LDB_CQ_TKN_DEPTH_SEL_RST 0x0 +union hqm_chp_ldb_cq_tkn_depth_sel { + struct { + u32 token_depth_select : 4; + u32 rsvd0 : 28; + } field; + u32 val; +}; + +#define HQM_CHP_LDB_PP_LDB_MIN_CRD_QNT(x) \ + (0xa0000114 + (x) * 0x1000) +#define HQM_CHP_LDB_PP_LDB_MIN_CRD_QNT_RST 0x1 +union hqm_chp_ldb_pp_ldb_min_crd_qnt { + struct { + u32 quanta : 10; + u32 rsvd0 : 22; + } field; + u32 val; +}; + +#define HQM_CHP_LDB_PP_DIR_MIN_CRD_QNT(x) \ + (0xa0000110 + (x) * 0x1000) +#define HQM_CHP_LDB_PP_DIR_MIN_CRD_QNT_RST 0x1 +union hqm_chp_ldb_pp_dir_min_crd_qnt { + struct { + u32 quanta : 10; + u32 rsvd0 : 22; + } field; + u32 val; +}; + +#define HQM_CHP_LDB_PP_LDB_CRD_LWM(x) \ + (0xa000010c + (x) * 0x1000) +#define HQM_CHP_LDB_PP_LDB_CRD_LWM_RST 0x0 +union hqm_chp_ldb_pp_ldb_crd_lwm { + struct { + u32 lwm : 16; + u32 rsvd0 : 16; + } field; + u32 val; +}; + +#define HQM_CHP_LDB_PP_LDB_CRD_HWM(x) \ + (0xa0000108 + (x) * 0x1000) +#define HQM_CHP_LDB_PP_LDB_CRD_HWM_RST 0x0 +union hqm_chp_ldb_pp_ldb_crd_hwm { + struct { + u32 hwm : 16; + u32 rsvd0 : 16; + } field; + u32 val; +}; + +#define HQM_CHP_LDB_PP_DIR_CRD_LWM(x) \ + (0xa0000104 + (x) * 0x1000) +#define HQM_CHP_LDB_PP_DIR_CRD_LWM_RST 0x0 +union hqm_chp_ldb_pp_dir_crd_lwm { + struct { + u32 lwm : 14; + u32 rsvd0 : 18; + } field; + u32 val; +}; + +#define HQM_CHP_LDB_PP_DIR_CRD_HWM(x) \ + (0xa0000100 + (x) * 0x1000) +#define HQM_CHP_LDB_PP_DIR_CRD_HWM_RST 0x0 +union hqm_chp_ldb_pp_dir_crd_hwm { + struct { + u32 hwm : 14; + u32 rsvd0 : 18; + } field; + u32 val; +}; + +#define HQM_CHP_DIR_CQ_DEPTH(x) \ + (0xa0000218 + (x) * 0x1000) +#define HQM_CHP_DIR_CQ_DEPTH_RST 0x0 +union hqm_chp_dir_cq_depth { + struct { + u32 cq_depth : 11; + u32 rsvd0 : 21; + } field; + u32 val; +}; + +#define HQM_CHP_DIR_CQ_WPTR(x) \ + (0xa0000214 + (x) * 0x1000) +#define HQM_CHP_DIR_CQ_WPTR_RST 0x0 +union hqm_chp_dir_cq_wptr { + struct { + u32 write_pointer : 10; + u32 rsvd0 : 22; + } field; + u32 val; +}; + +#define HQM_CHP_DIR_PP_LDB_PUSH_PTR(x) \ + (0xa0000210 + (x) * 0x1000) +#define HQM_CHP_DIR_PP_LDB_PUSH_PTR_RST 0x0 +union hqm_chp_dir_pp_ldb_push_ptr { + struct { + u32 push_pointer : 16; + u32 rsvd0 : 16; + } field; + u32 val; +}; + +#define HQM_CHP_DIR_PP_DIR_PUSH_PTR(x) \ + (0xa000020c + (x) * 0x1000) +#define HQM_CHP_DIR_PP_DIR_PUSH_PTR_RST 0x0 +union hqm_chp_dir_pp_dir_push_ptr { + struct { + u32 push_pointer : 16; + u32 rsvd0 : 16; + } field; + u32 val; +}; + +#define HQM_CHP_DIR_PP_STATE_RESET(x) \ + (0xa0000204 + (x) * 0x1000) +#define HQM_CHP_DIR_PP_STATE_RESET_RST 0x0 +union hqm_chp_dir_pp_state_reset { + struct { + u32 rsvd1 : 7; + u32 dir_type : 1; + u32 rsvd0 : 23; + u32 reset_pp_state : 1; + } field; + u32 val; +}; + +#define HQM_CHP_DIR_PP_CRD_REQ_STATE(x) \ + (0xa0000200 + (x) * 0x1000) +#define HQM_CHP_DIR_PP_CRD_REQ_STATE_RST 0x0 +union hqm_chp_dir_pp_crd_req_state { + struct { + u32 dir_crd_req_active_valid : 1; + u32 dir_crd_req_active_check : 1; + u32 dir_crd_req_active_busy : 1; + u32 rsvd1 : 1; + u32 ldb_crd_req_active_valid : 1; + u32 ldb_crd_req_active_check : 1; + u32 ldb_crd_req_active_busy : 1; + u32 rsvd0 : 1; + u32 no_pp_credit_update : 1; + u32 crd_req_state : 23; + } field; + u32 val; +}; + +#define HQM_CHP_LDB_CQ_DEPTH(x) \ + (0xa0000320 + (x) * 0x1000) +#define HQM_CHP_LDB_CQ_DEPTH_RST 0x0 +union hqm_chp_ldb_cq_depth { + struct { + u32 depth : 11; + u32 reserved : 2; + u32 rsvd0 : 19; + } field; + u32 val; +}; + +#define HQM_CHP_LDB_CQ_WPTR(x) \ + (0xa000031c + (x) * 0x1000) +#define HQM_CHP_LDB_CQ_WPTR_RST 0x0 +union hqm_chp_ldb_cq_wptr { + struct { + u32 write_pointer : 10; + u32 rsvd0 : 22; + } field; + u32 val; +}; + +#define HQM_CHP_LDB_PP_LDB_PUSH_PTR(x) \ + (0xa0000318 + (x) * 0x1000) +#define HQM_CHP_LDB_PP_LDB_PUSH_PTR_RST 0x0 +union hqm_chp_ldb_pp_ldb_push_ptr { + struct { + u32 push_pointer : 16; + u32 rsvd0 : 16; + } field; + u32 val; +}; + +#define HQM_CHP_LDB_PP_DIR_PUSH_PTR(x) \ + (0xa0000314 + (x) * 0x1000) +#define HQM_CHP_LDB_PP_DIR_PUSH_PTR_RST 0x0 +union hqm_chp_ldb_pp_dir_push_ptr { + struct { + u32 push_pointer : 16; + u32 rsvd0 : 16; + } field; + u32 val; +}; + +#define HQM_CHP_HIST_LIST_POP_PTR(x) \ + (0xa000030c + (x) * 0x1000) +#define HQM_CHP_HIST_LIST_POP_PTR_RST 0x0 +union hqm_chp_hist_list_pop_ptr { + struct { + u32 pop_ptr : 13; + u32 generation : 1; + u32 rsvd0 : 18; + } field; + u32 val; +}; + +#define HQM_CHP_HIST_LIST_PUSH_PTR(x) \ + (0xa0000308 + (x) * 0x1000) +#define HQM_CHP_HIST_LIST_PUSH_PTR_RST 0x0 +union hqm_chp_hist_list_push_ptr { + struct { + u32 push_ptr : 13; + u32 generation : 1; + u32 rsvd0 : 18; + } field; + u32 val; +}; + +#define HQM_CHP_LDB_PP_STATE_RESET(x) \ + (0xa0000304 + (x) * 0x1000) +#define HQM_CHP_LDB_PP_STATE_RESET_RST 0x0 +union hqm_chp_ldb_pp_state_reset { + struct { + u32 rsvd1 : 7; + u32 dir_type : 1; + u32 rsvd0 : 23; + u32 reset_pp_state : 1; + } field; + u32 val; +}; + +#define HQM_CHP_LDB_PP_CRD_REQ_STATE(x) \ + (0xa0000300 + (x) * 0x1000) +#define HQM_CHP_LDB_PP_CRD_REQ_STATE_RST 0x0 +union hqm_chp_ldb_pp_crd_req_state { + struct { + u32 dir_crd_req_active_valid : 1; + u32 dir_crd_req_active_check : 1; + u32 dir_crd_req_active_busy : 1; + u32 rsvd1 : 1; + u32 ldb_crd_req_active_valid : 1; + u32 ldb_crd_req_active_check : 1; + u32 ldb_crd_req_active_busy : 1; + u32 rsvd0 : 1; + u32 no_pp_credit_update : 1; + u32 crd_req_state : 23; + } field; + u32 val; +}; + +#define HQM_CHP_ORD_QID_SN(x) \ + (0xa0000408 + (x) * 0x1000) +#define HQM_CHP_ORD_QID_SN_RST 0x0 +union hqm_chp_ord_qid_sn { + struct { + u32 sn : 12; + u32 rsvd0 : 20; + } field; + u32 val; +}; + +#define HQM_CHP_ORD_QID_SN_MAP(x) \ + (0xa0000404 + (x) * 0x1000) +#define HQM_CHP_ORD_QID_SN_MAP_RST 0x0 +union hqm_chp_ord_qid_sn_map { + struct { + u32 mode : 3; + u32 slot : 5; + u32 grp : 2; + u32 rsvd0 : 22; + } field; + u32 val; +}; + +#define HQM_CHP_LDB_POOL_CRD_CNT(x) \ + (0xa000050c + (x) * 0x1000) +#define HQM_CHP_LDB_POOL_CRD_CNT_RST 0x0 +union hqm_chp_ldb_pool_crd_cnt { + struct { + u32 count : 16; + u32 rsvd0 : 16; + } field; + u32 val; +}; + +#define HQM_CHP_QED_FL_BASE(x) \ + (0xa0000508 + (x) * 0x1000) +#define HQM_CHP_QED_FL_BASE_RST 0x0 +union hqm_chp_qed_fl_base { + struct { + u32 base : 14; + u32 rsvd0 : 18; + } field; + u32 val; +}; + +#define HQM_CHP_QED_FL_LIM(x) \ + (0xa0000504 + (x) * 0x1000) +#define HQM_CHP_QED_FL_LIM_RST 0x8000 +union hqm_chp_qed_fl_lim { + struct { + u32 limit : 14; + u32 rsvd1 : 1; + u32 freelist_disable : 1; + u32 rsvd0 : 16; + } field; + u32 val; +}; + +#define HQM_CHP_LDB_POOL_CRD_LIM(x) \ + (0xa0000500 + (x) * 0x1000) +#define HQM_CHP_LDB_POOL_CRD_LIM_RST 0x0 +union hqm_chp_ldb_pool_crd_lim { + struct { + u32 limit : 16; + u32 rsvd0 : 16; + } field; + u32 val; +}; + +#define HQM_CHP_QED_FL_POP_PTR(x) \ + (0xa0000604 + (x) * 0x1000) +#define HQM_CHP_QED_FL_POP_PTR_RST 0x0 +union hqm_chp_qed_fl_pop_ptr { + struct { + u32 pop_ptr : 14; + u32 reserved0 : 1; + u32 generation : 1; + u32 rsvd0 : 16; + } field; + u32 val; +}; + +#define HQM_CHP_QED_FL_PUSH_PTR(x) \ + (0xa0000600 + (x) * 0x1000) +#define HQM_CHP_QED_FL_PUSH_PTR_RST 0x0 +union hqm_chp_qed_fl_push_ptr { + struct { + u32 push_ptr : 14; + u32 reserved0 : 1; + u32 generation : 1; + u32 rsvd0 : 16; + } field; + u32 val; +}; + +#define HQM_CHP_DIR_POOL_CRD_CNT(x) \ + (0xa000070c + (x) * 0x1000) +#define HQM_CHP_DIR_POOL_CRD_CNT_RST 0x0 +union hqm_chp_dir_pool_crd_cnt { + struct { + u32 count : 14; + u32 rsvd0 : 18; + } field; + u32 val; +}; + +#define HQM_CHP_DQED_FL_BASE(x) \ + (0xa0000708 + (x) * 0x1000) +#define HQM_CHP_DQED_FL_BASE_RST 0x0 +union hqm_chp_dqed_fl_base { + struct { + u32 base : 12; + u32 rsvd0 : 20; + } field; + u32 val; +}; + +#define HQM_CHP_DQED_FL_LIM(x) \ + (0xa0000704 + (x) * 0x1000) +#define HQM_CHP_DQED_FL_LIM_RST 0x2000 +union hqm_chp_dqed_fl_lim { + struct { + u32 limit : 12; + u32 rsvd1 : 1; + u32 freelist_disable : 1; + u32 rsvd0 : 18; + } field; + u32 val; +}; + +#define HQM_CHP_DIR_POOL_CRD_LIM(x) \ + (0xa0000700 + (x) * 0x1000) +#define HQM_CHP_DIR_POOL_CRD_LIM_RST 0x0 +union hqm_chp_dir_pool_crd_lim { + struct { + u32 limit : 14; + u32 rsvd0 : 18; + } field; + u32 val; +}; + +#define HQM_CHP_DQED_FL_POP_PTR(x) \ + (0xa0000804 + (x) * 0x1000) +#define HQM_CHP_DQED_FL_POP_PTR_RST 0x0 +union hqm_chp_dqed_fl_pop_ptr { + struct { + u32 pop_ptr : 12; + u32 reserved0 : 1; + u32 generation : 1; + u32 rsvd0 : 18; + } field; + u32 val; +}; + +#define HQM_CHP_DQED_FL_PUSH_PTR(x) \ + (0xa0000800 + (x) * 0x1000) +#define HQM_CHP_DQED_FL_PUSH_PTR_RST 0x0 +union hqm_chp_dqed_fl_push_ptr { + struct { + u32 push_ptr : 12; + u32 reserved0 : 1; + u32 generation : 1; + u32 rsvd0 : 18; + } field; + u32 val; +}; + +#define HQM_CHP_CTRL_DIAG_02 0xa8000154 +#define HQM_CHP_CTRL_DIAG_02_RST 0x0 +union hqm_chp_ctrl_diag_02 { + struct { + u32 control : 32; + } field; + u32 val; +}; + +#define HQM_CHP_SMON_MAX_TMR 0xa8000150 +#define HQM_CHP_SMON_MAX_TMR_RST 0x0 +union hqm_chp_smon_max_tmr { + struct { + u32 maxvalue : 32; + } field; + u32 val; +}; + +#define HQM_CHP_SMON_TMR 0xa800014c +#define HQM_CHP_SMON_TMR_RST 0x0 +union hqm_chp_smon_tmr { + struct { + u32 timer : 32; + } field; + u32 val; +}; + +#define HQM_CHP_SMON_CNTR1 0xa8000148 +#define HQM_CHP_SMON_CNTR1_RST 0x0 +union hqm_chp_smon_cntr1 { + struct { + u32 counter1 : 32; + } field; + u32 val; +}; + +#define HQM_CHP_SMON_CNTR0 0xa8000144 +#define HQM_CHP_SMON_CNTR0_RST 0x0 +union hqm_chp_smon_cntr0 { + struct { + u32 counter0 : 32; + } field; + u32 val; +}; + +#define HQM_CHP_SMON_COMPARE1 0xa8000140 +#define HQM_CHP_SMON_COMPARE1_RST 0x0 +union hqm_chp_smon_compare1 { + struct { + u32 compare1 : 32; + } field; + u32 val; +}; + +#define HQM_CHP_SMON_COMPARE0 0xa800013c +#define HQM_CHP_SMON_COMPARE0_RST 0x0 +union hqm_chp_smon_compare0 { + struct { + u32 compare0 : 32; + } field; + u32 val; +}; + +#define HQM_CHP_SMON_CFG1 0xa8000138 +#define HQM_CHP_SMON_CFG1_RST 0x0 +union hqm_chp_smon_cfg1 { + struct { + u32 mode0 : 8; + u32 mode1 : 8; + u32 spare0 : 16; + } field; + u32 val; +}; + +#define HQM_CHP_SMON_CFG0 0xa8000134 +#define HQM_CHP_SMON_CFG0_RST 0x40000000 +union hqm_chp_smon_cfg0 { + struct { + u32 smon_enable : 1; + u32 smon_0trigger_enable : 1; + u32 spare0 : 2; + u32 smon0_function : 3; + u32 smon0_function_compare : 1; + u32 smon1_function : 3; + u32 smon1_function_compare : 1; + u32 smon_mode : 4; + u32 stopcounterovfl : 1; + u32 intcounterovfl : 1; + u32 statcounter0ovfl : 1; + u32 statcounter1ovfl : 1; + u32 stoptimerovfl : 1; + u32 inttimerovfl : 1; + u32 stattimerovfl : 1; + u32 spare1 : 1; + u32 timer_prescale : 5; + u32 spare2 : 1; + u32 version : 2; + } field; + u32 val; +}; + +#define HQM_CHP_CFG_CHP_CSR_CTRL 0xa8000130 +#define HQM_CHP_CFG_CHP_CSR_CTRL_RST 0xc0003fff +#define HQM_CHP_CFG_EXCESS_TOKENS_SHIFT 12 +union hqm_chp_cfg_chp_csr_ctrl { + struct { + u32 int_inf_alarm_enable_0 : 1; + u32 int_inf_alarm_enable_1 : 1; + u32 int_inf_alarm_enable_2 : 1; + u32 int_inf_alarm_enable_3 : 1; + u32 int_inf_alarm_enable_4 : 1; + u32 int_inf_alarm_enable_5 : 1; + u32 int_inf_alarm_enable_6 : 1; + u32 int_inf_alarm_enable_7 : 1; + u32 int_inf_alarm_enable_8 : 1; + u32 int_inf_alarm_enable_9 : 1; + u32 int_inf_alarm_enable_10 : 1; + u32 int_inf_alarm_enable_11 : 1; + u32 int_inf_alarm_enable_12 : 1; + u32 int_cor_alarm_enable : 1; + u32 csr_control_spare : 14; + u32 cfg_vasr_dis : 1; + u32 counter_clear : 1; + u32 blk_cor_report : 1; + u32 blk_cor_synd : 1; + } field; + u32 val; +}; + +#define HQM_CHP_LDB_CQ_INTR_ARMED1 0xa8000068 +#define HQM_CHP_LDB_CQ_INTR_ARMED1_RST 0x0 +union hqm_chp_ldb_cq_intr_armed1 { + struct { + u32 armed : 32; + } field; + u32 val; +}; + +#define HQM_CHP_LDB_CQ_INTR_ARMED0 0xa8000064 +#define HQM_CHP_LDB_CQ_INTR_ARMED0_RST 0x0 +union hqm_chp_ldb_cq_intr_armed0 { + struct { + u32 armed : 32; + } field; + u32 val; +}; + +#define HQM_CHP_DIR_CQ_INTR_ARMED3 0xa8000024 +#define HQM_CHP_DIR_CQ_INTR_ARMED3_RST 0x0 +union hqm_chp_dir_cq_intr_armed3 { + struct { + u32 armed : 32; + } field; + u32 val; +}; + +#define HQM_CHP_DIR_CQ_INTR_ARMED2 0xa8000020 +#define HQM_CHP_DIR_CQ_INTR_ARMED2_RST 0x0 +union hqm_chp_dir_cq_intr_armed2 { + struct { + u32 armed : 32; + } field; + u32 val; +}; + +#define HQM_CHP_DIR_CQ_INTR_ARMED1 0xa800001c +#define HQM_CHP_DIR_CQ_INTR_ARMED1_RST 0x0 +union hqm_chp_dir_cq_intr_armed1 { + struct { + u32 armed : 32; + } field; + u32 val; +}; + +#define HQM_CHP_DIR_CQ_INTR_ARMED0 0xa8000018 +#define HQM_CHP_DIR_CQ_INTR_ARMED0_RST 0x0 +union hqm_chp_dir_cq_intr_armed0 { + struct { + u32 armed : 32; + } field; + u32 val; +}; + +#define HQM_CFG_MSTR_DIAG_RESET_STS 0xb8000004 +#define HQM_CFG_MSTR_DIAG_RESET_STS_RST 0x1ff +union hqm_cfg_mstr_diag_reset_sts { + struct { + u32 chp_pf_reset_done : 1; + u32 rop_pf_reset_done : 1; + u32 lsp_pf_reset_done : 1; + u32 nalb_pf_reset_done : 1; + u32 ap_pf_reset_done : 1; + u32 dp_pf_reset_done : 1; + u32 qed_pf_reset_done : 1; + u32 dqed_pf_reset_done : 1; + u32 aqed_pf_reset_done : 1; + u32 rsvd1 : 6; + u32 pf_reset_active : 1; + u32 chp_vf_reset_done : 1; + u32 rop_vf_reset_done : 1; + u32 lsp_vf_reset_done : 1; + u32 nalb_vf_reset_done : 1; + u32 ap_vf_reset_done : 1; + u32 dp_vf_reset_done : 1; + u32 qed_vf_reset_done : 1; + u32 dqed_vf_reset_done : 1; + u32 aqed_vf_reset_done : 1; + u32 rsvd0 : 6; + u32 vf_reset_active : 1; + } field; + u32 val; +}; + +#define HQM_CFG_MSTR_BCAST_RESET_VF_START 0xc8100000 +#define HQM_CFG_MSTR_BCAST_RESET_VF_START_RST 0x0 +/* HW Reset Types */ +#define VF_RST_TYPE_CQ_LDB 0 +#define VF_RST_TYPE_QID_LDB 1 +#define VF_RST_TYPE_POOL_LDB 2 +#define VF_RST_TYPE_CQ_DIR 8 +#define VF_RST_TYPE_QID_DIR 9 +#define VF_RST_TYPE_POOL_DIR 10 +union hqm_cfg_mstr_bcast_reset_vf_start { + struct { + u32 vf_reset_start : 1; + u32 reserved : 3; + u32 vf_reset_type : 4; + u32 vf_reset_id : 24; + } field; + u32 val; +}; + +#define HQM_FUNC_VF_VF2PF_MAILBOX_BYTES 256 +#define HQM_FUNC_VF_VF2PF_MAILBOX(x) \ + (0x1000 + (x) * 0x4) +#define HQM_FUNC_VF_VF2PF_MAILBOX_RST 0x0 +union hqm_func_vf_vf2pf_mailbox { + struct { + u32 msg : 32; + } field; + u32 val; +}; + +#define HQM_FUNC_VF_VF2PF_MAILBOX_ISR 0x1f00 +#define HQM_FUNC_VF_VF2PF_MAILBOX_ISR_RST 0x0 +union hqm_func_vf_vf2pf_mailbox_isr { + struct { + u32 isr : 1; + u32 rsvd0 : 31; + } field; + u32 val; +}; + +#define HQM_FUNC_VF_PF2VF_MAILBOX_BYTES 64 +#define HQM_FUNC_VF_PF2VF_MAILBOX(x) \ + (0x2000 + (x) * 0x4) +#define HQM_FUNC_VF_PF2VF_MAILBOX_RST 0x0 +union hqm_func_vf_pf2vf_mailbox { + struct { + u32 msg : 32; + } field; + u32 val; +}; + +#define HQM_FUNC_VF_PF2VF_MAILBOX_ISR 0x2f00 +#define HQM_FUNC_VF_PF2VF_MAILBOX_ISR_RST 0x0 +union hqm_func_vf_pf2vf_mailbox_isr { + struct { + u32 pf_isr : 1; + u32 rsvd0 : 31; + } field; + u32 val; +}; + +#define HQM_FUNC_VF_VF_MSI_ISR_PEND 0x2f10 +#define HQM_FUNC_VF_VF_MSI_ISR_PEND_RST 0x0 +union hqm_func_vf_vf_msi_isr_pend { + struct { + u32 isr_pend : 32; + } field; + u32 val; +}; + +#define HQM_FUNC_VF_VF_RESET_IN_PROGRESS 0x3000 +#define HQM_FUNC_VF_VF_RESET_IN_PROGRESS_RST 0x1 +union hqm_func_vf_vf_reset_in_progress { + struct { + u32 reset_in_progress : 1; + u32 rsvd0 : 31; + } field; + u32 val; +}; + +#define HQM_FUNC_VF_VF_MSI_ISR 0x4000 +#define HQM_FUNC_VF_VF_MSI_ISR_RST 0x0 +union hqm_func_vf_vf_msi_isr { + struct { + u32 vf_msi_isr : 32; + } field; + u32 val; +}; + +#endif /* __HQM_REGS_H */ diff --git a/drivers/misc/hqm/hqm_resource.c b/drivers/misc/hqm/hqm_resource.c new file mode 100644 index 00000000000000..a6f5660341b51c --- /dev/null +++ b/drivers/misc/hqm/hqm_resource.c @@ -0,0 +1,9691 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2016-2019 Intel Corporation */ + +#include "hqm_hw_types.h" +#include +#include "hqm_resource.h" +#include "hqm_osdep.h" +#include "hqm_osdep_bitmap.h" +#include "hqm_osdep_types.h" +#include "hqm_smon.h" +#include "hqm_regs.h" +#include "hqm_mbox.h" + +#define HQM_DOM_LIST_HEAD(head, type) \ + HQM_LIST_HEAD((head), type, domain_list) + +#define HQM_FUNC_LIST_HEAD(head, type) \ + HQM_LIST_HEAD((head), type, func_list) + +#define HQM_DOM_LIST_FOR(head, ptr, iter) \ + HQM_LIST_FOR_EACH(head, ptr, domain_list, iter) + +#define HQM_FUNC_LIST_FOR(head, ptr, iter) \ + HQM_LIST_FOR_EACH(head, ptr, func_list, iter) + +#define HQM_DOM_LIST_FOR_SAFE(head, ptr, ptr_tmp, it, it_tmp) \ + HQM_LIST_FOR_EACH_SAFE((head), ptr, ptr_tmp, domain_list, it, it_tmp) + +#define HQM_FUNC_LIST_FOR_SAFE(head, ptr, ptr_tmp, it, it_tmp) \ + HQM_LIST_FOR_EACH_SAFE((head), ptr, ptr_tmp, func_list, it, it_tmp) + +/* The PF driver cannot assume that a register write will affect subsequent HCW + * writes. To ensure a write completes, the driver must read back a CSR. This + * function only need be called for configuration that can occur after the + * domain has started; prior to starting, applications can't send HCWs. + */ +static inline void hqm_flush_csr(struct hqm_hw *hw) +{ + HQM_CSR_RD(hw, HQM_SYS_TOTAL_VAS); +} + +static void hqm_init_fn_rsrc_lists(struct hqm_function_resources *rsrc) +{ + hqm_list_init_head(&rsrc->avail_domains); + hqm_list_init_head(&rsrc->used_domains); + hqm_list_init_head(&rsrc->avail_ldb_queues); + hqm_list_init_head(&rsrc->avail_ldb_ports); + hqm_list_init_head(&rsrc->avail_dir_pq_pairs); + hqm_list_init_head(&rsrc->avail_ldb_credit_pools); + hqm_list_init_head(&rsrc->avail_dir_credit_pools); +} + +static void hqm_init_domain_rsrc_lists(struct hqm_domain *domain) +{ + hqm_list_init_head(&domain->used_ldb_queues); + hqm_list_init_head(&domain->used_ldb_ports); + hqm_list_init_head(&domain->used_dir_pq_pairs); + hqm_list_init_head(&domain->used_ldb_credit_pools); + hqm_list_init_head(&domain->used_dir_credit_pools); + hqm_list_init_head(&domain->avail_ldb_queues); + hqm_list_init_head(&domain->avail_ldb_ports); + hqm_list_init_head(&domain->avail_dir_pq_pairs); + hqm_list_init_head(&domain->avail_ldb_credit_pools); + hqm_list_init_head(&domain->avail_dir_credit_pools); +} + +int hqm_resource_init(struct hqm_hw *hw) +{ + struct hqm_list_entry *list; + unsigned int i; + + /* For optimal load-balancing, ports that map to one or more QIDs in + * common should not be in numerical sequence. This is application + * dependent, but the driver interleaves port IDs as much as possible + * to reduce the likelihood of this. This initial allocation maximizes + * the average distance between an ID and its immediate neighbors (i.e. + * the distance from 1 to 0 and to 2, the distance from 2 to 1 and to + * 3, etc.). + */ + u32 init_ldb_port_allocation[HQM_MAX_NUM_LDB_PORTS] = { + 0, 31, 62, 29, 60, 27, 58, 25, 56, 23, 54, 21, 52, 19, 50, 17, + 48, 15, 46, 13, 44, 11, 42, 9, 40, 7, 38, 5, 36, 3, 34, 1, + 32, 63, 30, 61, 28, 59, 26, 57, 24, 55, 22, 53, 20, 51, 18, 49, + 16, 47, 14, 45, 12, 43, 10, 41, 8, 39, 6, 37, 4, 35, 2, 33 + }; + + /* Zero-out resource tracking data structures */ + memset(&hw->rsrcs, 0, sizeof(hw->rsrcs)); + memset(&hw->pf, 0, sizeof(hw->pf)); + + hqm_init_fn_rsrc_lists(&hw->pf); + + for (i = 0; i < HQM_MAX_NUM_VFS; i++) { + memset(&hw->vf[i], 0, sizeof(hw->vf[i])); + hqm_init_fn_rsrc_lists(&hw->vf[i]); + } + + for (i = 0; i < HQM_MAX_NUM_DOMAINS; i++) { + memset(&hw->domains[i], 0, sizeof(hw->domains[i])); + hqm_init_domain_rsrc_lists(&hw->domains[i]); + hw->domains[i].parent_func = &hw->pf; + } + + /* Give all resources to the PF driver */ + hw->pf.num_avail_domains = HQM_MAX_NUM_DOMAINS; + for (i = 0; i < hw->pf.num_avail_domains; i++) { + list = &hw->domains[i].func_list; + + hqm_list_add(&hw->pf.avail_domains, list); + } + + hw->pf.num_avail_ldb_queues = HQM_MAX_NUM_LDB_QUEUES; + for (i = 0; i < hw->pf.num_avail_ldb_queues; i++) { + list = &hw->rsrcs.ldb_queues[i].func_list; + + hqm_list_add(&hw->pf.avail_ldb_queues, list); + } + + hw->pf.num_avail_ldb_ports = HQM_MAX_NUM_LDB_PORTS; + for (i = 0; i < hw->pf.num_avail_ldb_ports; i++) { + struct hqm_ldb_port *port; + + port = &hw->rsrcs.ldb_ports[init_ldb_port_allocation[i]]; + + hqm_list_add(&hw->pf.avail_ldb_ports, &port->func_list); + } + + hw->pf.num_avail_dir_pq_pairs = HQM_MAX_NUM_DIR_PORTS; + for (i = 0; i < hw->pf.num_avail_dir_pq_pairs; i++) { + list = &hw->rsrcs.dir_pq_pairs[i].func_list; + + hqm_list_add(&hw->pf.avail_dir_pq_pairs, list); + } + + hw->pf.num_avail_ldb_credit_pools = HQM_MAX_NUM_LDB_CREDIT_POOLS; + for (i = 0; i < hw->pf.num_avail_ldb_credit_pools; i++) { + list = &hw->rsrcs.ldb_credit_pools[i].func_list; + + hqm_list_add(&hw->pf.avail_ldb_credit_pools, list); + } + + hw->pf.num_avail_dir_credit_pools = HQM_MAX_NUM_DIR_CREDIT_POOLS; + for (i = 0; i < hw->pf.num_avail_dir_credit_pools; i++) { + list = &hw->rsrcs.dir_credit_pools[i].func_list; + + hqm_list_add(&hw->pf.avail_dir_credit_pools, list); + } + + /* There are 5120 history list entries, which allows us to overprovision + * the inflight limit (4096) by 1k. + */ + if (hqm_bitmap_alloc(hw, + &hw->pf.avail_hist_list_entries, + HQM_MAX_NUM_HIST_LIST_ENTRIES)) + return -1; + + hqm_bitmap_fill(hw->pf.avail_hist_list_entries); + + if (hqm_bitmap_alloc(hw, + &hw->pf.avail_qed_freelist_entries, + HQM_MAX_NUM_LDB_CREDITS)) + return -1; + + hqm_bitmap_fill(hw->pf.avail_qed_freelist_entries); + + if (hqm_bitmap_alloc(hw, + &hw->pf.avail_dqed_freelist_entries, + HQM_MAX_NUM_DIR_CREDITS)) + return -1; + + hqm_bitmap_fill(hw->pf.avail_dqed_freelist_entries); + + if (hqm_bitmap_alloc(hw, + &hw->pf.avail_aqed_freelist_entries, + HQM_MAX_NUM_AQOS_ENTRIES)) + return -1; + + hqm_bitmap_fill(hw->pf.avail_aqed_freelist_entries); + + for (i = 0; i < HQM_MAX_NUM_VFS; i++) { + if (hqm_bitmap_alloc(hw, + &hw->vf[i].avail_hist_list_entries, + HQM_MAX_NUM_HIST_LIST_ENTRIES)) + return -1; + if (hqm_bitmap_alloc(hw, + &hw->vf[i].avail_qed_freelist_entries, + HQM_MAX_NUM_LDB_CREDITS)) + return -1; + if (hqm_bitmap_alloc(hw, + &hw->vf[i].avail_dqed_freelist_entries, + HQM_MAX_NUM_DIR_CREDITS)) + return -1; + if (hqm_bitmap_alloc(hw, + &hw->vf[i].avail_aqed_freelist_entries, + HQM_MAX_NUM_AQOS_ENTRIES)) + return -1; + + hqm_bitmap_zero(hw->vf[i].avail_hist_list_entries); + hqm_bitmap_zero(hw->vf[i].avail_qed_freelist_entries); + hqm_bitmap_zero(hw->vf[i].avail_dqed_freelist_entries); + hqm_bitmap_zero(hw->vf[i].avail_aqed_freelist_entries); + } + + /* Initialize the hardware resource IDs */ + for (i = 0; i < HQM_MAX_NUM_DOMAINS; i++) { + hw->domains[i].id.phys_id = i; + hw->domains[i].id.vf_owned = false; + } + + for (i = 0; i < HQM_MAX_NUM_LDB_QUEUES; i++) { + hw->rsrcs.ldb_queues[i].id.phys_id = i; + hw->rsrcs.ldb_queues[i].id.vf_owned = false; + } + + for (i = 0; i < HQM_MAX_NUM_LDB_PORTS; i++) { + hw->rsrcs.ldb_ports[i].id.phys_id = i; + hw->rsrcs.ldb_ports[i].id.vf_owned = false; + } + + for (i = 0; i < HQM_MAX_NUM_DIR_PORTS; i++) { + hw->rsrcs.dir_pq_pairs[i].id.phys_id = i; + hw->rsrcs.dir_pq_pairs[i].id.vf_owned = false; + } + + for (i = 0; i < HQM_MAX_NUM_LDB_CREDIT_POOLS; i++) { + hw->rsrcs.ldb_credit_pools[i].id.phys_id = i; + hw->rsrcs.ldb_credit_pools[i].id.vf_owned = false; + } + + for (i = 0; i < HQM_MAX_NUM_DIR_CREDIT_POOLS; i++) { + hw->rsrcs.dir_credit_pools[i].id.phys_id = i; + hw->rsrcs.dir_credit_pools[i].id.vf_owned = false; + } + + for (i = 0; i < HQM_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) { + hw->rsrcs.sn_groups[i].id = i; + /* Default mode (0) is 32 sequence numbers per queue */ + hw->rsrcs.sn_groups[i].mode = 0; + hw->rsrcs.sn_groups[i].sequence_numbers_per_queue = 32; + hw->rsrcs.sn_groups[i].slot_use_bitmap = 0; + } + + return 0; +} + +void hqm_resource_free(struct hqm_hw *hw) +{ + int i; + + hqm_bitmap_free(hw->pf.avail_hist_list_entries); + + hqm_bitmap_free(hw->pf.avail_qed_freelist_entries); + + hqm_bitmap_free(hw->pf.avail_dqed_freelist_entries); + + hqm_bitmap_free(hw->pf.avail_aqed_freelist_entries); + + for (i = 0; i < HQM_MAX_NUM_VFS; i++) { + hqm_bitmap_free(hw->vf[i].avail_hist_list_entries); + hqm_bitmap_free(hw->vf[i].avail_qed_freelist_entries); + hqm_bitmap_free(hw->vf[i].avail_dqed_freelist_entries); + hqm_bitmap_free(hw->vf[i].avail_aqed_freelist_entries); + } +} + +static struct hqm_domain *hqm_get_domain_from_id(struct hqm_hw *hw, + u32 id, + bool vf_request, + unsigned int vf_id) +{ + struct hqm_list_entry *iter __attribute__((unused)); + struct hqm_function_resources *rsrcs; + struct hqm_domain *domain; + + if (id >= HQM_MAX_NUM_DOMAINS) + return NULL; + + if (!vf_request) + return &hw->domains[id]; + + rsrcs = &hw->vf[vf_id]; + + HQM_FUNC_LIST_FOR(rsrcs->used_domains, domain, iter) + if (domain->id.virt_id == id) + return domain; + + return NULL; +} + +static struct hqm_credit_pool * +hqm_get_domain_ldb_pool(u32 id, + bool vf_request, + struct hqm_domain *domain) +{ + struct hqm_list_entry *iter __attribute__((unused)); + struct hqm_credit_pool *pool; + + if (id >= HQM_MAX_NUM_LDB_CREDIT_POOLS) + return NULL; + + HQM_DOM_LIST_FOR(domain->used_ldb_credit_pools, pool, iter) + if ((!vf_request && pool->id.phys_id == id) || + (vf_request && pool->id.virt_id == id)) + return pool; + + return NULL; +} + +static struct hqm_credit_pool * +hqm_get_domain_dir_pool(u32 id, + bool vf_request, + struct hqm_domain *domain) +{ + struct hqm_list_entry *iter __attribute__((unused)); + struct hqm_credit_pool *pool; + + if (id >= HQM_MAX_NUM_DIR_CREDIT_POOLS) + return NULL; + + HQM_DOM_LIST_FOR(domain->used_dir_credit_pools, pool, iter) + if ((!vf_request && pool->id.phys_id == id) || + (vf_request && pool->id.virt_id == id)) + return pool; + + return NULL; +} + +static struct hqm_ldb_port *hqm_get_ldb_port_from_id(struct hqm_hw *hw, + u32 id, + bool vf_request, + unsigned int vf_id) +{ + struct hqm_list_entry *iter1 __attribute__((unused)); + struct hqm_list_entry *iter2 __attribute__((unused)); + struct hqm_function_resources *rsrcs; + struct hqm_ldb_port *port; + struct hqm_domain *domain; + + if (id >= HQM_MAX_NUM_LDB_PORTS) + return NULL; + + rsrcs = (vf_request) ? &hw->vf[vf_id] : &hw->pf; + + if (!vf_request) + return &hw->rsrcs.ldb_ports[id]; + + HQM_FUNC_LIST_FOR(rsrcs->used_domains, domain, iter1) { + HQM_DOM_LIST_FOR(domain->used_ldb_ports, port, iter2) + if (port->id.virt_id == id) + return port; + } + + HQM_FUNC_LIST_FOR(rsrcs->avail_ldb_ports, port, iter1) + if (port->id.virt_id == id) + return port; + + return NULL; +} + +static struct hqm_ldb_port * +hqm_get_domain_used_ldb_port(u32 id, + bool vf_request, + struct hqm_domain *domain) +{ + struct hqm_list_entry *iter __attribute__((unused)); + struct hqm_ldb_port *port; + + if (id >= HQM_MAX_NUM_LDB_PORTS) + return NULL; + + HQM_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) + if ((!vf_request && port->id.phys_id == id) || + (vf_request && port->id.virt_id == id)) + return port; + + HQM_DOM_LIST_FOR(domain->avail_ldb_ports, port, iter) + if ((!vf_request && port->id.phys_id == id) || + (vf_request && port->id.virt_id == id)) + return port; + + return NULL; +} + +static struct hqm_ldb_port *hqm_get_domain_ldb_port(u32 id, + bool vf_request, + struct hqm_domain *domain) +{ + struct hqm_list_entry *iter __attribute__((unused)); + struct hqm_ldb_port *port; + + if (id >= HQM_MAX_NUM_LDB_PORTS) + return NULL; + + HQM_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) + if ((!vf_request && port->id.phys_id == id) || + (vf_request && port->id.virt_id == id)) + return port; + + HQM_DOM_LIST_FOR(domain->avail_ldb_ports, port, iter) + if ((!vf_request && port->id.phys_id == id) || + (vf_request && port->id.virt_id == id)) + return port; + + return NULL; +} + +static struct hqm_dir_pq_pair *hqm_get_dir_pq_from_id(struct hqm_hw *hw, + u32 id, + bool vf_request, + unsigned int vf_id) +{ + struct hqm_list_entry *iter1 __attribute__((unused)); + struct hqm_list_entry *iter2 __attribute__((unused)); + struct hqm_function_resources *rsrcs; + struct hqm_dir_pq_pair *port; + struct hqm_domain *domain; + + if (id >= HQM_MAX_NUM_DIR_PORTS) + return NULL; + + rsrcs = (vf_request) ? &hw->vf[vf_id] : &hw->pf; + + if (!vf_request) + return &hw->rsrcs.dir_pq_pairs[id]; + + HQM_FUNC_LIST_FOR(rsrcs->used_domains, domain, iter1) { + HQM_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter2) + if (port->id.virt_id == id) + return port; + } + + HQM_FUNC_LIST_FOR(rsrcs->avail_dir_pq_pairs, port, iter1) + if (port->id.virt_id == id) + return port; + + return NULL; +} + +static struct hqm_dir_pq_pair * +hqm_get_domain_used_dir_pq(u32 id, + bool vf_request, + struct hqm_domain *domain) +{ + struct hqm_list_entry *iter __attribute__((unused)); + struct hqm_dir_pq_pair *port; + + if (id >= HQM_MAX_NUM_DIR_PORTS) + return NULL; + + HQM_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) + if ((!vf_request && port->id.phys_id == id) || + (vf_request && port->id.virt_id == id)) + return port; + + return NULL; +} + +static struct hqm_dir_pq_pair *hqm_get_domain_dir_pq(u32 id, + bool vf_request, + struct hqm_domain *domain) +{ + struct hqm_list_entry *iter __attribute__((unused)); + struct hqm_dir_pq_pair *port; + + if (id >= HQM_MAX_NUM_DIR_PORTS) + return NULL; + + HQM_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) + if ((!vf_request && port->id.phys_id == id) || + (vf_request && port->id.virt_id == id)) + return port; + + HQM_DOM_LIST_FOR(domain->avail_dir_pq_pairs, port, iter) + if ((!vf_request && port->id.phys_id == id) || + (vf_request && port->id.virt_id == id)) + return port; + + return NULL; +} + +static struct hqm_ldb_queue *hqm_get_ldb_queue_from_id(struct hqm_hw *hw, + u32 id, + bool vf_request, + unsigned int vf_id) +{ + struct hqm_list_entry *iter1 __attribute__((unused)); + struct hqm_list_entry *iter2 __attribute__((unused)); + struct hqm_function_resources *rsrcs; + struct hqm_ldb_queue *queue; + struct hqm_domain *domain; + + if (id >= HQM_MAX_NUM_LDB_QUEUES) + return NULL; + + rsrcs = (vf_request) ? &hw->vf[vf_id] : &hw->pf; + + if (!vf_request) + return &hw->rsrcs.ldb_queues[id]; + + HQM_FUNC_LIST_FOR(rsrcs->used_domains, domain, iter1) { + HQM_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter2) + if (queue->id.virt_id == id) + return queue; + } + + HQM_FUNC_LIST_FOR(rsrcs->avail_ldb_queues, queue, iter1) + if (queue->id.virt_id == id) + return queue; + + return NULL; +} + +static struct hqm_ldb_queue *hqm_get_domain_ldb_queue(u32 id, + bool vf_request, + struct hqm_domain *domain) +{ + struct hqm_list_entry *iter __attribute__((unused)); + struct hqm_ldb_queue *queue; + + if (id >= HQM_MAX_NUM_LDB_QUEUES) + return NULL; + + HQM_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) + if ((!vf_request && queue->id.phys_id == id) || + (vf_request && queue->id.virt_id == id)) + return queue; + + return NULL; +} + +#define HQM_XFER_LL_RSRC(dst, src, num, type_t, name) ({ \ + struct hqm_list_entry *it1 __attribute__((unused)); \ + struct hqm_list_entry *it2 __attribute__((unused)); \ + struct hqm_function_resources *_src = src; \ + struct hqm_function_resources *_dst = dst; \ + type_t *ptr, *tmp __attribute__((unused)); \ + unsigned int i = 0; \ + \ + HQM_FUNC_LIST_FOR_SAFE(_src->avail_##name##s, ptr, tmp, it1, it2) { \ + if (i++ == (num)) \ + break; \ + \ + hqm_list_del(&_src->avail_##name##s, &ptr->func_list); \ + hqm_list_add(&_dst->avail_##name##s, &ptr->func_list); \ + _src->num_avail_##name##s--; \ + _dst->num_avail_##name##s++; \ + } \ +}) + +#define HQM_VF_ID_CLEAR(head, type_t) ({ \ + struct hqm_list_entry *iter __attribute__((unused)); \ + type_t *var; \ + \ + HQM_FUNC_LIST_FOR(head, var, iter) \ + var->id.vf_owned = false; \ +}) + +int hqm_update_vf_sched_domains(struct hqm_hw *hw, u32 vf_id, u32 num) +{ + struct hqm_list_entry *iter __attribute__((unused)); + struct hqm_function_resources *src, *dst; + struct hqm_domain *domain; + unsigned int orig; + int ret; + + if (vf_id >= HQM_MAX_NUM_VFS) + return -EINVAL; + + src = &hw->pf; + dst = &hw->vf[vf_id]; + + /* If the VF is locked, its resource assignment can't be changed */ + if (hqm_vf_is_locked(hw, vf_id)) + return -EPERM; + + orig = dst->num_avail_domains; + + /* Detach the destination VF's current resources before checking if + * enough are available, and set their IDs accordingly. + */ + HQM_VF_ID_CLEAR(dst->avail_domains, struct hqm_domain); + + HQM_XFER_LL_RSRC(src, dst, orig, struct hqm_domain, domain); + + /* Are there enough available resources to satisfy the request? */ + if (num > src->num_avail_domains) { + num = orig; + ret = -EINVAL; + } else { + ret = 0; + } + + HQM_XFER_LL_RSRC(dst, src, num, struct hqm_domain, domain); + + /* Set the domains' VF backpointer */ + HQM_FUNC_LIST_FOR(dst->avail_domains, domain, iter) + domain->parent_func = dst; + + return ret; +} + +int hqm_update_vf_ldb_queues(struct hqm_hw *hw, u32 vf_id, u32 num) +{ + struct hqm_function_resources *src, *dst; + unsigned int orig; + int ret; + + if (vf_id >= HQM_MAX_NUM_VFS) + return -EINVAL; + + src = &hw->pf; + dst = &hw->vf[vf_id]; + + /* If the VF is locked, its resource assignment can't be changed */ + if (hqm_vf_is_locked(hw, vf_id)) + return -EPERM; + + orig = dst->num_avail_ldb_queues; + + /* Detach the destination VF's current resources before checking if + * enough are available, and set their IDs accordingly. + */ + HQM_VF_ID_CLEAR(dst->avail_ldb_queues, struct hqm_ldb_queue); + + HQM_XFER_LL_RSRC(src, dst, orig, struct hqm_ldb_queue, ldb_queue); + + /* Are there enough available resources to satisfy the request? */ + if (num > src->num_avail_ldb_queues) { + num = orig; + ret = -EINVAL; + } else { + ret = 0; + } + + HQM_XFER_LL_RSRC(dst, src, num, struct hqm_ldb_queue, ldb_queue); + + return ret; +} + +int hqm_update_vf_ldb_ports(struct hqm_hw *hw, u32 vf_id, u32 num) +{ + struct hqm_function_resources *src, *dst; + unsigned int orig; + int ret; + + if (vf_id >= HQM_MAX_NUM_VFS) + return -EINVAL; + + src = &hw->pf; + dst = &hw->vf[vf_id]; + + /* If the VF is locked, its resource assignment can't be changed */ + if (hqm_vf_is_locked(hw, vf_id)) + return -EPERM; + + orig = dst->num_avail_ldb_ports; + + /* Detach the destination VF's current resources before checking if + * enough are available, and set their IDs accordingly. + */ + HQM_VF_ID_CLEAR(dst->avail_ldb_ports, struct hqm_ldb_port); + + HQM_XFER_LL_RSRC(src, dst, orig, struct hqm_ldb_port, ldb_port); + + /* Are there enough available resources to satisfy the request? */ + if (num > src->num_avail_ldb_ports) { + num = orig; + ret = -EINVAL; + } else { + ret = 0; + } + + HQM_XFER_LL_RSRC(dst, src, num, struct hqm_ldb_port, ldb_port); + + return ret; +} + +int hqm_update_vf_dir_ports(struct hqm_hw *hw, u32 vf_id, u32 num) +{ + struct hqm_function_resources *src, *dst; + unsigned int orig; + int ret; + + if (vf_id >= HQM_MAX_NUM_VFS) + return -EINVAL; + + src = &hw->pf; + dst = &hw->vf[vf_id]; + + /* If the VF is locked, its resource assignment can't be changed */ + if (hqm_vf_is_locked(hw, vf_id)) + return -EPERM; + + orig = dst->num_avail_dir_pq_pairs; + + /* Detach the destination VF's current resources before checking if + * enough are available, and set their IDs accordingly. + */ + HQM_VF_ID_CLEAR(dst->avail_dir_pq_pairs, struct hqm_dir_pq_pair); + + HQM_XFER_LL_RSRC(src, dst, orig, struct hqm_dir_pq_pair, dir_pq_pair); + + /* Are there enough available resources to satisfy the request? */ + if (num > src->num_avail_dir_pq_pairs) { + num = orig; + ret = -EINVAL; + } else { + ret = 0; + } + + HQM_XFER_LL_RSRC(dst, src, num, struct hqm_dir_pq_pair, dir_pq_pair); + + return ret; +} + +int hqm_update_vf_ldb_credit_pools(struct hqm_hw *hw, + u32 vf_id, + u32 num) +{ + struct hqm_function_resources *src, *dst; + unsigned int orig; + int ret; + + if (vf_id >= HQM_MAX_NUM_VFS) + return -EINVAL; + + src = &hw->pf; + dst = &hw->vf[vf_id]; + + /* If the VF is locked, its resource assignment can't be changed */ + if (hqm_vf_is_locked(hw, vf_id)) + return -EPERM; + + orig = dst->num_avail_ldb_credit_pools; + + /* Detach the destination VF's current resources before checking if + * enough are available, and set their IDs accordingly. + */ + HQM_VF_ID_CLEAR(dst->avail_ldb_credit_pools, struct hqm_credit_pool); + + HQM_XFER_LL_RSRC(src, + dst, + orig, + struct hqm_credit_pool, + ldb_credit_pool); + + /* Are there enough available resources to satisfy the request? */ + if (num > src->num_avail_ldb_credit_pools) { + num = orig; + ret = -EINVAL; + } else { + ret = 0; + } + + HQM_XFER_LL_RSRC(dst, + src, + num, + struct hqm_credit_pool, + ldb_credit_pool); + + return ret; +} + +int hqm_update_vf_dir_credit_pools(struct hqm_hw *hw, + u32 vf_id, + u32 num) +{ + struct hqm_function_resources *src, *dst; + unsigned int orig; + int ret; + + if (vf_id >= HQM_MAX_NUM_VFS) + return -EINVAL; + + src = &hw->pf; + dst = &hw->vf[vf_id]; + + /* If the VF is locked, its resource assignment can't be changed */ + if (hqm_vf_is_locked(hw, vf_id)) + return -EPERM; + + orig = dst->num_avail_dir_credit_pools; + + /* Detach the VF's current resources before checking if enough are + * available, and set their IDs accordingly. + */ + HQM_VF_ID_CLEAR(dst->avail_dir_credit_pools, struct hqm_credit_pool); + + HQM_XFER_LL_RSRC(src, + dst, + orig, + struct hqm_credit_pool, + dir_credit_pool); + + /* Are there enough available resources to satisfy the request? */ + if (num > src->num_avail_dir_credit_pools) { + num = orig; + ret = -EINVAL; + } else { + ret = 0; + } + + HQM_XFER_LL_RSRC(dst, + src, + num, + struct hqm_credit_pool, + dir_credit_pool); + + return ret; +} + +static int hqm_transfer_bitmap_resources(struct hqm_bitmap *src, + struct hqm_bitmap *dst, + u32 num) +{ + int orig, ret, base; + + /* Reassign the dest's bitmap entries to the source's before checking + * if a contiguous chunk of size 'num' is available. The reassignment + * may be necessary to create a sufficiently large contiguous chunk. + */ + orig = hqm_bitmap_count(dst); + + hqm_bitmap_or(src, src, dst); + + hqm_bitmap_zero(dst); + + /* Are there enough available resources to satisfy the request? */ + base = hqm_bitmap_find_set_bit_range(src, num); + + if (base == -ENOENT) { + num = orig; + base = hqm_bitmap_find_set_bit_range(src, num); + ret = -EINVAL; + } else { + ret = 0; + } + + hqm_bitmap_set_range(dst, base, num); + + hqm_bitmap_clear_range(src, base, num); + + return ret; +} + +int hqm_update_vf_ldb_credits(struct hqm_hw *hw, u32 vf_id, u32 num) +{ + struct hqm_function_resources *src, *dst; + + if (vf_id >= HQM_MAX_NUM_VFS) + return -EINVAL; + + src = &hw->pf; + dst = &hw->vf[vf_id]; + + /* If the VF is locked, its resource assignment can't be changed */ + if (hqm_vf_is_locked(hw, vf_id)) + return -EPERM; + + return hqm_transfer_bitmap_resources(src->avail_qed_freelist_entries, + dst->avail_qed_freelist_entries, + num); +} + +int hqm_update_vf_dir_credits(struct hqm_hw *hw, u32 vf_id, u32 num) +{ + struct hqm_function_resources *src, *dst; + + if (vf_id >= HQM_MAX_NUM_VFS) + return -EINVAL; + + src = &hw->pf; + dst = &hw->vf[vf_id]; + + /* If the VF is locked, its resource assignment can't be changed */ + if (hqm_vf_is_locked(hw, vf_id)) + return -EPERM; + + return hqm_transfer_bitmap_resources(src->avail_dqed_freelist_entries, + dst->avail_dqed_freelist_entries, + num); +} + +int hqm_update_vf_hist_list_entries(struct hqm_hw *hw, + u32 vf_id, + u32 num) +{ + struct hqm_function_resources *src, *dst; + + if (vf_id >= HQM_MAX_NUM_VFS) + return -EINVAL; + + src = &hw->pf; + dst = &hw->vf[vf_id]; + + /* If the VF is locked, its resource assignment can't be changed */ + if (hqm_vf_is_locked(hw, vf_id)) + return -EPERM; + + return hqm_transfer_bitmap_resources(src->avail_hist_list_entries, + dst->avail_hist_list_entries, + num); +} + +int hqm_update_vf_atomic_inflights(struct hqm_hw *hw, + u32 vf_id, + u32 num) +{ + struct hqm_function_resources *src, *dst; + + if (vf_id >= HQM_MAX_NUM_VFS) + return -EINVAL; + + src = &hw->pf; + dst = &hw->vf[vf_id]; + + /* If the VF is locked, its resource assignment can't be changed */ + if (hqm_vf_is_locked(hw, vf_id)) + return -EPERM; + + return hqm_transfer_bitmap_resources(src->avail_aqed_freelist_entries, + dst->avail_aqed_freelist_entries, + num); +} + +static int hqm_attach_ldb_queues(struct hqm_hw *hw, + struct hqm_function_resources *rsrcs, + struct hqm_domain *domain, + u32 num_queues, + struct hqm_cmd_response *resp) +{ + unsigned int i, j; + + if (rsrcs->num_avail_ldb_queues < num_queues) { + resp->status = HQM_ST_LDB_QUEUES_UNAVAILABLE; + return -1; + } + + for (i = 0; i < num_queues; i++) { + struct hqm_ldb_queue *queue; + + queue = HQM_FUNC_LIST_HEAD(rsrcs->avail_ldb_queues, + typeof(*queue)); + if (!queue) { + HQM_BASE_ERR(hw, + "[%s()] Internal error: domain validation failed\n", + __func__); + goto cleanup; + } + + hqm_list_del(&rsrcs->avail_ldb_queues, &queue->func_list); + + queue->domain_id = domain->id; + queue->owned = true; + + hqm_list_add(&domain->avail_ldb_queues, &queue->domain_list); + } + + rsrcs->num_avail_ldb_queues -= num_queues; + + return 0; + +cleanup: + + /* Return the assigned queues */ + for (j = 0; j < i; j++) { + struct hqm_ldb_queue *queue; + + queue = HQM_FUNC_LIST_HEAD(domain->avail_ldb_queues, + typeof(*queue)); + /* Unrecoverable internal error */ + if (!queue) + break; + + queue->owned = false; + + hqm_list_del(&domain->avail_ldb_queues, &queue->domain_list); + + hqm_list_add(&rsrcs->avail_ldb_queues, &queue->func_list); + } + + return -EFAULT; +} + +static struct hqm_ldb_port * +hqm_get_next_ldb_port(struct hqm_hw *hw, + struct hqm_function_resources *rsrcs, + u32 domain_id) +{ + struct hqm_list_entry *iter __attribute__((unused)); + struct hqm_ldb_port *port; + + /* To reduce the odds of consecutive load-balanced ports mapping to the + * same queue(s), the driver attempts to allocate ports whose neighbors + * are owned by a different domain. + */ + HQM_FUNC_LIST_FOR(rsrcs->avail_ldb_ports, port, iter) { + u32 next, prev; + u32 phys_id; + + phys_id = port->id.phys_id; + next = phys_id + 1; + prev = phys_id - 1; + + if (phys_id == HQM_MAX_NUM_LDB_PORTS - 1) + next = 0; + if (phys_id == 0) + prev = HQM_MAX_NUM_LDB_PORTS - 1; + + if (!hw->rsrcs.ldb_ports[next].owned || + hw->rsrcs.ldb_ports[next].domain_id.phys_id == domain_id) + continue; + + if (!hw->rsrcs.ldb_ports[prev].owned || + hw->rsrcs.ldb_ports[prev].domain_id.phys_id == domain_id) + continue; + + return port; + } + + /* Failing that, the driver looks for a port with one neighbor owned by + * a different domain and the other unallocated. + */ + HQM_FUNC_LIST_FOR(rsrcs->avail_ldb_ports, port, iter) { + u32 next, prev; + u32 phys_id; + + phys_id = port->id.phys_id; + next = phys_id + 1; + prev = phys_id - 1; + + if (phys_id == HQM_MAX_NUM_LDB_PORTS - 1) + next = 0; + if (phys_id == 0) + prev = HQM_MAX_NUM_LDB_PORTS - 1; + + if (!hw->rsrcs.ldb_ports[prev].owned && + hw->rsrcs.ldb_ports[next].owned && + hw->rsrcs.ldb_ports[next].domain_id.phys_id != domain_id) + return port; + + if (!hw->rsrcs.ldb_ports[next].owned && + hw->rsrcs.ldb_ports[prev].owned && + hw->rsrcs.ldb_ports[prev].domain_id.phys_id != domain_id) + return port; + } + + /* Failing that, the driver looks for a port with both neighbors + * unallocated. + */ + HQM_FUNC_LIST_FOR(rsrcs->avail_ldb_ports, port, iter) { + u32 next, prev; + u32 phys_id; + + phys_id = port->id.phys_id; + next = phys_id + 1; + prev = phys_id - 1; + + if (phys_id == HQM_MAX_NUM_LDB_PORTS - 1) + next = 0; + if (phys_id == 0) + prev = HQM_MAX_NUM_LDB_PORTS - 1; + + if (!hw->rsrcs.ldb_ports[prev].owned && + !hw->rsrcs.ldb_ports[next].owned) + return port; + } + + /* If all else fails, the driver returns the next available port. */ + return HQM_FUNC_LIST_HEAD(rsrcs->avail_ldb_ports, typeof(*port)); +} + +static int hqm_attach_ldb_ports(struct hqm_hw *hw, + struct hqm_function_resources *rsrcs, + struct hqm_domain *domain, + u32 num_ports, + struct hqm_cmd_response *resp) +{ + unsigned int i, j; + + if (rsrcs->num_avail_ldb_ports < num_ports) { + resp->status = HQM_ST_LDB_PORTS_UNAVAILABLE; + return -1; + } + + for (i = 0; i < num_ports; i++) { + struct hqm_ldb_port *port; + + port = hqm_get_next_ldb_port(hw, rsrcs, domain->id.phys_id); + + if (!port) { + HQM_BASE_ERR(hw, + "[%s()] Internal error: domain validation failed\n", + __func__); + goto cleanup; + } + + hqm_list_del(&rsrcs->avail_ldb_ports, &port->func_list); + + port->domain_id = domain->id; + port->owned = true; + + hqm_list_add(&domain->avail_ldb_ports, &port->domain_list); + } + + rsrcs->num_avail_ldb_ports -= num_ports; + + return 0; + +cleanup: + + /* Return the assigned ports */ + for (j = 0; j < i; j++) { + struct hqm_ldb_port *port; + + port = HQM_FUNC_LIST_HEAD(domain->avail_ldb_ports, + typeof(*port)); + /* Unrecoverable internal error */ + if (!port) + break; + + port->owned = false; + + hqm_list_del(&domain->avail_ldb_ports, &port->domain_list); + + hqm_list_add(&rsrcs->avail_ldb_ports, &port->func_list); + } + + return -EFAULT; +} + +static int hqm_attach_dir_ports(struct hqm_hw *hw, + struct hqm_function_resources *rsrcs, + struct hqm_domain *domain, + u32 num_ports, + struct hqm_cmd_response *resp) +{ + unsigned int i, j; + + if (rsrcs->num_avail_dir_pq_pairs < num_ports) { + resp->status = HQM_ST_DIR_PORTS_UNAVAILABLE; + return -1; + } + + for (i = 0; i < num_ports; i++) { + struct hqm_dir_pq_pair *port; + + port = HQM_FUNC_LIST_HEAD(rsrcs->avail_dir_pq_pairs, + typeof(*port)); + if (!port) { + HQM_BASE_ERR(hw, + "[%s()] Internal error: domain validation failed\n", + __func__); + goto cleanup; + } + + hqm_list_del(&rsrcs->avail_dir_pq_pairs, &port->func_list); + + port->domain_id = domain->id; + port->owned = true; + + hqm_list_add(&domain->avail_dir_pq_pairs, &port->domain_list); + } + + rsrcs->num_avail_dir_pq_pairs -= num_ports; + + return 0; + +cleanup: + + /* Return the assigned ports */ + for (j = 0; j < i; j++) { + struct hqm_dir_pq_pair *port; + + port = HQM_FUNC_LIST_HEAD(domain->avail_dir_pq_pairs, + typeof(*port)); + /* Unrecoverable internal error */ + if (!port) + break; + + port->owned = false; + + hqm_list_del(&domain->avail_dir_pq_pairs, &port->domain_list); + + hqm_list_add(&rsrcs->avail_dir_pq_pairs, &port->func_list); + } + + return -EFAULT; +} + +static int hqm_attach_ldb_credits(struct hqm_function_resources *rsrcs, + struct hqm_domain *domain, + u32 num_credits, + struct hqm_cmd_response *resp) +{ + struct hqm_bitmap *bitmap = rsrcs->avail_qed_freelist_entries; + + if (hqm_bitmap_count(bitmap) < (int)num_credits) { + resp->status = HQM_ST_LDB_CREDITS_UNAVAILABLE; + return -1; + } + + if (num_credits) { + int base; + + base = hqm_bitmap_find_set_bit_range(bitmap, num_credits); + if (base < 0) + goto error; + + domain->qed_freelist.base = base; + domain->qed_freelist.bound = base + num_credits; + domain->qed_freelist.offset = 0; + + hqm_bitmap_clear_range(bitmap, base, num_credits); + } + + return 0; + +error: + resp->status = HQM_ST_QED_FREELIST_ENTRIES_UNAVAILABLE; + return -1; +} + +static int hqm_attach_dir_credits(struct hqm_function_resources *rsrcs, + struct hqm_domain *domain, + u32 num_credits, + struct hqm_cmd_response *resp) +{ + struct hqm_bitmap *bitmap = rsrcs->avail_dqed_freelist_entries; + + if (hqm_bitmap_count(bitmap) < (int)num_credits) { + resp->status = HQM_ST_DIR_CREDITS_UNAVAILABLE; + return -1; + } + + if (num_credits) { + int base; + + base = hqm_bitmap_find_set_bit_range(bitmap, num_credits); + if (base < 0) + goto error; + + domain->dqed_freelist.base = base; + domain->dqed_freelist.bound = base + num_credits; + domain->dqed_freelist.offset = 0; + + hqm_bitmap_clear_range(bitmap, base, num_credits); + } + + return 0; + +error: + resp->status = HQM_ST_DQED_FREELIST_ENTRIES_UNAVAILABLE; + return -1; +} + +static int hqm_attach_ldb_credit_pools(struct hqm_hw *hw, + struct hqm_function_resources *rsrcs, + struct hqm_domain *domain, + u32 num_credit_pools, + struct hqm_cmd_response *resp) +{ + unsigned int i, j; + + if (rsrcs->num_avail_ldb_credit_pools < num_credit_pools) { + resp->status = HQM_ST_LDB_CREDIT_POOLS_UNAVAILABLE; + return -1; + } + + for (i = 0; i < num_credit_pools; i++) { + struct hqm_credit_pool *pool; + + pool = HQM_FUNC_LIST_HEAD(rsrcs->avail_ldb_credit_pools, + typeof(*pool)); + if (!pool) { + HQM_BASE_ERR(hw, + "[%s()] Internal error: domain validation failed\n", + __func__); + goto cleanup; + } + + hqm_list_del(&rsrcs->avail_ldb_credit_pools, + &pool->func_list); + + pool->domain_id = domain->id; + pool->owned = true; + + hqm_list_add(&domain->avail_ldb_credit_pools, + &pool->domain_list); + } + + rsrcs->num_avail_ldb_credit_pools -= num_credit_pools; + + return 0; + +cleanup: + + /* Return the assigned credit pools */ + for (j = 0; j < i; j++) { + struct hqm_credit_pool *pool; + + pool = HQM_FUNC_LIST_HEAD(domain->avail_ldb_credit_pools, + typeof(*pool)); + /* Unrecoverable internal error */ + if (!pool) + break; + + pool->owned = false; + + hqm_list_del(&domain->avail_ldb_credit_pools, + &pool->domain_list); + + hqm_list_add(&rsrcs->avail_ldb_credit_pools, + &pool->func_list); + } + + return -EFAULT; +} + +static int hqm_attach_dir_credit_pools(struct hqm_hw *hw, + struct hqm_function_resources *rsrcs, + struct hqm_domain *domain, + u32 num_credit_pools, + struct hqm_cmd_response *resp) +{ + unsigned int i, j; + + if (rsrcs->num_avail_dir_credit_pools < num_credit_pools) { + resp->status = HQM_ST_DIR_CREDIT_POOLS_UNAVAILABLE; + return -1; + } + + for (i = 0; i < num_credit_pools; i++) { + struct hqm_credit_pool *pool; + + pool = HQM_FUNC_LIST_HEAD(rsrcs->avail_dir_credit_pools, + typeof(*pool)); + if (!pool) { + HQM_BASE_ERR(hw, + "[%s()] Internal error: domain validation failed\n", + __func__); + goto cleanup; + } + + hqm_list_del(&rsrcs->avail_dir_credit_pools, + &pool->func_list); + + pool->domain_id = domain->id; + pool->owned = true; + + hqm_list_add(&domain->avail_dir_credit_pools, + &pool->domain_list); + } + + rsrcs->num_avail_dir_credit_pools -= num_credit_pools; + + return 0; + +cleanup: + + /* Return the assigned credit pools */ + for (j = 0; j < i; j++) { + struct hqm_credit_pool *pool; + + pool = HQM_FUNC_LIST_HEAD(domain->avail_dir_credit_pools, + typeof(*pool)); + /* Unrecoverable internal error */ + if (!pool) + break; + + pool->owned = false; + + hqm_list_del(&domain->avail_dir_credit_pools, + &pool->domain_list); + + hqm_list_add(&rsrcs->avail_dir_credit_pools, + &pool->func_list); + } + + return -EFAULT; +} + +static int hqm_attach_atomic_inflights(struct hqm_function_resources *rsrcs, + struct hqm_domain *domain, + u32 num_atomic_inflights, + struct hqm_cmd_response *resp) +{ + if (num_atomic_inflights) { + struct hqm_bitmap *bitmap = + rsrcs->avail_aqed_freelist_entries; + int base; + + base = hqm_bitmap_find_set_bit_range(bitmap, + num_atomic_inflights); + if (base < 0) + goto error; + + domain->aqed_freelist.base = base; + domain->aqed_freelist.bound = base + num_atomic_inflights; + domain->aqed_freelist.offset = 0; + + hqm_bitmap_clear_range(bitmap, base, num_atomic_inflights); + } + + return 0; + +error: + resp->status = HQM_ST_ATOMIC_INFLIGHTS_UNAVAILABLE; + return -1; +} + +static int +hqm_attach_domain_hist_list_entries(struct hqm_function_resources *rsrcs, + struct hqm_domain *domain, + u32 num_hist_list_entries, + struct hqm_cmd_response *resp) +{ + struct hqm_bitmap *bitmap; + int base; + + if (num_hist_list_entries) { + bitmap = rsrcs->avail_hist_list_entries; + + base = hqm_bitmap_find_set_bit_range(bitmap, + num_hist_list_entries); + if (base < 0) + goto error; + + domain->total_hist_list_entries = num_hist_list_entries; + domain->avail_hist_list_entries = num_hist_list_entries; + domain->hist_list_entry_base = base; + domain->hist_list_entry_offset = 0; + + hqm_bitmap_clear_range(bitmap, base, num_hist_list_entries); + } + return 0; + +error: + resp->status = HQM_ST_HIST_LIST_ENTRIES_UNAVAILABLE; + return -1; +} + +static unsigned int +hqm_get_num_ports_in_use(struct hqm_hw *hw) +{ + unsigned int i, n = 0; + + for (i = 0; i < HQM_MAX_NUM_LDB_PORTS; i++) + if (hw->rsrcs.ldb_ports[i].owned) + n++; + + for (i = 0; i < HQM_MAX_NUM_DIR_PORTS; i++) + if (hw->rsrcs.dir_pq_pairs[i].owned) + n++; + + return n; +} + +static int +hqm_verify_create_sched_domain_args(struct hqm_hw *hw, + struct hqm_function_resources *rsrcs, + struct hqm_create_sched_domain_args *args, + struct hqm_cmd_response *resp) +{ + struct hqm_list_entry *iter __attribute__((unused)); + struct hqm_bitmap *ldb_credit_freelist; + struct hqm_bitmap *dir_credit_freelist; + unsigned int ldb_credit_freelist_count; + unsigned int dir_credit_freelist_count; + unsigned int max_contig_aqed_entries; + unsigned int max_contig_dqed_entries; + unsigned int max_contig_qed_entries; + unsigned int max_contig_hl_entries; + struct hqm_bitmap *aqed_freelist; + enum hqm_dev_revision revision; + + ldb_credit_freelist = rsrcs->avail_qed_freelist_entries; + dir_credit_freelist = rsrcs->avail_dqed_freelist_entries; + aqed_freelist = rsrcs->avail_aqed_freelist_entries; + + ldb_credit_freelist_count = hqm_bitmap_count(ldb_credit_freelist); + dir_credit_freelist_count = hqm_bitmap_count(dir_credit_freelist); + + max_contig_hl_entries = + hqm_bitmap_longest_set_range(rsrcs->avail_hist_list_entries); + max_contig_aqed_entries = + hqm_bitmap_longest_set_range(aqed_freelist); + max_contig_qed_entries = + hqm_bitmap_longest_set_range(ldb_credit_freelist); + max_contig_dqed_entries = + hqm_bitmap_longest_set_range(dir_credit_freelist); + + if (rsrcs->num_avail_domains < 1) + resp->status = HQM_ST_DOMAIN_UNAVAILABLE; + else if (rsrcs->num_avail_ldb_queues < args->num_ldb_queues) + resp->status = HQM_ST_LDB_QUEUES_UNAVAILABLE; + else if (rsrcs->num_avail_ldb_ports < args->num_ldb_ports) + resp->status = HQM_ST_LDB_PORTS_UNAVAILABLE; + else if (args->num_ldb_queues > 0 && args->num_ldb_ports == 0) + resp->status = HQM_ST_LDB_PORT_REQUIRED_FOR_LDB_QUEUES; + else if (rsrcs->num_avail_dir_pq_pairs < args->num_dir_ports) + resp->status = HQM_ST_DIR_PORTS_UNAVAILABLE; + else if (ldb_credit_freelist_count < args->num_ldb_credits) + resp->status = HQM_ST_LDB_CREDITS_UNAVAILABLE; + else if (dir_credit_freelist_count < args->num_dir_credits) + resp->status = HQM_ST_DIR_CREDITS_UNAVAILABLE; + else if (rsrcs->num_avail_ldb_credit_pools < args->num_ldb_credit_pools) + resp->status = HQM_ST_LDB_CREDIT_POOLS_UNAVAILABLE; + else if (rsrcs->num_avail_dir_credit_pools < args->num_dir_credit_pools) + resp->status = HQM_ST_DIR_CREDIT_POOLS_UNAVAILABLE; + else if (max_contig_hl_entries < args->num_hist_list_entries) + resp->status = HQM_ST_HIST_LIST_ENTRIES_UNAVAILABLE; + else if (max_contig_aqed_entries < args->num_atomic_inflights) + resp->status = HQM_ST_ATOMIC_INFLIGHTS_UNAVAILABLE; + else if (max_contig_qed_entries < args->num_ldb_credits) + resp->status = HQM_ST_QED_FREELIST_ENTRIES_UNAVAILABLE; + else if (max_contig_dqed_entries < args->num_dir_credits) + resp->status = HQM_ST_DQED_FREELIST_ENTRIES_UNAVAILABLE; + + /* HQM A-stepping workaround for hardware write buffer lock up issue: + * limit the maximum configured ports to less than 128 and disable CQ + * occupancy interrupts. + */ + revision = os_get_dev_revision(hw); + + if (revision < HQM_B0) { + u32 n = hqm_get_num_ports_in_use(hw); + + n += args->num_ldb_ports + args->num_dir_ports; + + if (n >= HQM_A_STEP_MAX_PORTS) + resp->status = args->num_ldb_ports ? + HQM_ST_LDB_PORTS_UNAVAILABLE : + HQM_ST_DIR_PORTS_UNAVAILABLE; + } + + if (resp->status) + return -1; + + return 0; +} + +static int +hqm_verify_create_ldb_pool_args(struct hqm_hw *hw, + u32 domain_id, + struct hqm_create_ldb_pool_args *args, + struct hqm_cmd_response *resp, + bool vf_request, + unsigned int vf_id) +{ + struct hqm_freelist *qed_freelist; + struct hqm_domain *domain; + + domain = hqm_get_domain_from_id(hw, domain_id, vf_request, vf_id); + + if (!domain) { + resp->status = HQM_ST_INVALID_DOMAIN_ID; + return -1; + } + + if (!domain->configured) { + resp->status = HQM_ST_DOMAIN_NOT_CONFIGURED; + return -1; + } + + qed_freelist = &domain->qed_freelist; + + if (hqm_freelist_count(qed_freelist) < args->num_ldb_credits) { + resp->status = HQM_ST_LDB_CREDITS_UNAVAILABLE; + return -1; + } + + if (hqm_list_empty(&domain->avail_ldb_credit_pools)) { + resp->status = HQM_ST_LDB_CREDIT_POOLS_UNAVAILABLE; + return -1; + } + + if (domain->started) { + resp->status = HQM_ST_DOMAIN_STARTED; + return -1; + } + + return 0; +} + +static void +hqm_configure_ldb_credit_pool(struct hqm_hw *hw, + struct hqm_domain *domain, + struct hqm_create_ldb_pool_args *args, + struct hqm_credit_pool *pool) +{ + union hqm_sys_ldb_pool_enbld r0 = { {0} }; + union hqm_chp_ldb_pool_crd_lim r1 = { {0} }; + union hqm_chp_ldb_pool_crd_cnt r2 = { {0} }; + union hqm_chp_qed_fl_base r3 = { {0} }; + union hqm_chp_qed_fl_lim r4 = { {0} }; + union hqm_chp_qed_fl_push_ptr r5 = { {0} }; + union hqm_chp_qed_fl_pop_ptr r6 = { {0} }; + + r1.field.limit = args->num_ldb_credits; + + HQM_CSR_WR(hw, HQM_CHP_LDB_POOL_CRD_LIM(pool->id.phys_id), r1.val); + + r2.field.count = args->num_ldb_credits; + + HQM_CSR_WR(hw, HQM_CHP_LDB_POOL_CRD_CNT(pool->id.phys_id), r2.val); + + r3.field.base = domain->qed_freelist.base + domain->qed_freelist.offset; + + HQM_CSR_WR(hw, HQM_CHP_QED_FL_BASE(pool->id.phys_id), r3.val); + + r4.field.freelist_disable = 0; + r4.field.limit = r3.field.base + args->num_ldb_credits - 1; + + HQM_CSR_WR(hw, HQM_CHP_QED_FL_LIM(pool->id.phys_id), r4.val); + + r5.field.push_ptr = r3.field.base; + r5.field.generation = 1; + + HQM_CSR_WR(hw, HQM_CHP_QED_FL_PUSH_PTR(pool->id.phys_id), r5.val); + + r6.field.pop_ptr = r3.field.base; + r6.field.generation = 0; + + HQM_CSR_WR(hw, HQM_CHP_QED_FL_POP_PTR(pool->id.phys_id), r6.val); + + r0.field.pool_enabled = 1; + + HQM_CSR_WR(hw, HQM_SYS_LDB_POOL_ENBLD(pool->id.phys_id), r0.val); + + pool->avail_credits = args->num_ldb_credits; + pool->total_credits = args->num_ldb_credits; + domain->qed_freelist.offset += args->num_ldb_credits; + + pool->configured = true; +} + +static int +hqm_verify_create_dir_pool_args(struct hqm_hw *hw, + u32 domain_id, + struct hqm_create_dir_pool_args *args, + struct hqm_cmd_response *resp, + bool vf_request, + unsigned int vf_id) +{ + struct hqm_freelist *dqed_freelist; + struct hqm_domain *domain; + + domain = hqm_get_domain_from_id(hw, domain_id, vf_request, vf_id); + + if (!domain) { + resp->status = HQM_ST_INVALID_DOMAIN_ID; + return -1; + } + + if (!domain->configured) { + resp->status = HQM_ST_DOMAIN_NOT_CONFIGURED; + return -1; + } + + dqed_freelist = &domain->dqed_freelist; + + if (hqm_freelist_count(dqed_freelist) < args->num_dir_credits) { + resp->status = HQM_ST_DIR_CREDITS_UNAVAILABLE; + return -1; + } + + if (hqm_list_empty(&domain->avail_dir_credit_pools)) { + resp->status = HQM_ST_DIR_CREDIT_POOLS_UNAVAILABLE; + return -1; + } + + if (domain->started) { + resp->status = HQM_ST_DOMAIN_STARTED; + return -1; + } + + return 0; +} + +static void +hqm_configure_dir_credit_pool(struct hqm_hw *hw, + struct hqm_domain *domain, + struct hqm_create_dir_pool_args *args, + struct hqm_credit_pool *pool) +{ + union hqm_sys_dir_pool_enbld r0 = { {0} }; + union hqm_chp_dir_pool_crd_lim r1 = { {0} }; + union hqm_chp_dir_pool_crd_cnt r2 = { {0} }; + union hqm_chp_dqed_fl_base r3 = { {0} }; + union hqm_chp_dqed_fl_lim r4 = { {0} }; + union hqm_chp_dqed_fl_push_ptr r5 = { {0} }; + union hqm_chp_dqed_fl_pop_ptr r6 = { {0} }; + + r1.field.limit = args->num_dir_credits; + + HQM_CSR_WR(hw, HQM_CHP_DIR_POOL_CRD_LIM(pool->id.phys_id), r1.val); + + r2.field.count = args->num_dir_credits; + + HQM_CSR_WR(hw, HQM_CHP_DIR_POOL_CRD_CNT(pool->id.phys_id), r2.val); + + r3.field.base = domain->dqed_freelist.base + + domain->dqed_freelist.offset; + + HQM_CSR_WR(hw, HQM_CHP_DQED_FL_BASE(pool->id.phys_id), r3.val); + + r4.field.freelist_disable = 0; + r4.field.limit = r3.field.base + args->num_dir_credits - 1; + + HQM_CSR_WR(hw, HQM_CHP_DQED_FL_LIM(pool->id.phys_id), r4.val); + + r5.field.push_ptr = r3.field.base; + r5.field.generation = 1; + + HQM_CSR_WR(hw, HQM_CHP_DQED_FL_PUSH_PTR(pool->id.phys_id), r5.val); + + r6.field.pop_ptr = r3.field.base; + r6.field.generation = 0; + + HQM_CSR_WR(hw, HQM_CHP_DQED_FL_POP_PTR(pool->id.phys_id), r6.val); + + r0.field.pool_enabled = 1; + + HQM_CSR_WR(hw, HQM_SYS_DIR_POOL_ENBLD(pool->id.phys_id), r0.val); + + pool->avail_credits = args->num_dir_credits; + pool->total_credits = args->num_dir_credits; + domain->dqed_freelist.offset += args->num_dir_credits; + + pool->configured = true; +} + +static int +hqm_verify_create_ldb_queue_args(struct hqm_hw *hw, + u32 domain_id, + struct hqm_create_ldb_queue_args *args, + struct hqm_cmd_response *resp, + bool vf_request, + unsigned int vf_id) +{ + struct hqm_freelist *aqed_freelist; + struct hqm_domain *domain; + int i; + + domain = hqm_get_domain_from_id(hw, domain_id, vf_request, vf_id); + + if (!domain) { + resp->status = HQM_ST_INVALID_DOMAIN_ID; + return -1; + } + + if (!domain->configured) { + resp->status = HQM_ST_DOMAIN_NOT_CONFIGURED; + return -1; + } + + if (domain->started) { + resp->status = HQM_ST_DOMAIN_STARTED; + return -1; + } + + if (hqm_list_empty(&domain->avail_ldb_queues)) { + resp->status = HQM_ST_LDB_QUEUES_UNAVAILABLE; + return -1; + } + + if (args->num_sequence_numbers) { + for (i = 0; i < HQM_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) { + struct hqm_sn_group *group = &hw->rsrcs.sn_groups[i]; + + if (group->sequence_numbers_per_queue == + args->num_sequence_numbers && + !hqm_sn_group_full(group)) + break; + } + + if (i == HQM_MAX_NUM_SEQUENCE_NUMBER_GROUPS) { + resp->status = HQM_ST_SEQUENCE_NUMBERS_UNAVAILABLE; + return -1; + } + } + + if (args->num_qid_inflights > 4096) { + resp->status = HQM_ST_INVALID_QID_INFLIGHT_ALLOCATION; + return -1; + } + + /* Inflights must be <= number of sequence numbers if ordered */ + if (args->num_sequence_numbers != 0 && + args->num_qid_inflights > args->num_sequence_numbers) { + resp->status = HQM_ST_INVALID_QID_INFLIGHT_ALLOCATION; + return -1; + } + + aqed_freelist = &domain->aqed_freelist; + + if (hqm_freelist_count(aqed_freelist) < args->num_atomic_inflights) { + resp->status = HQM_ST_ATOMIC_INFLIGHTS_UNAVAILABLE; + return -1; + } + + return 0; +} + +static int +hqm_verify_create_dir_queue_args(struct hqm_hw *hw, + u32 domain_id, + struct hqm_create_dir_queue_args *args, + struct hqm_cmd_response *resp, + bool vf_request, + unsigned int vf_id) +{ + struct hqm_domain *domain; + + domain = hqm_get_domain_from_id(hw, domain_id, vf_request, vf_id); + + if (!domain) { + resp->status = HQM_ST_INVALID_DOMAIN_ID; + return -1; + } + + if (!domain->configured) { + resp->status = HQM_ST_DOMAIN_NOT_CONFIGURED; + return -1; + } + + if (domain->started) { + resp->status = HQM_ST_DOMAIN_STARTED; + return -1; + } + + /* If the user claims the port is already configured, validate the port + * ID, its domain, and whether the port is configured. + */ + if (args->port_id != -1) { + struct hqm_dir_pq_pair *port; + + port = hqm_get_domain_used_dir_pq(args->port_id, + vf_request, + domain); + + if (!port || port->domain_id.phys_id != domain->id.phys_id || + !port->port_configured) { + resp->status = HQM_ST_INVALID_PORT_ID; + return -1; + } + } + + /* If the queue's port is not configured, validate that a free + * port-queue pair is available. + */ + if (args->port_id == -1 && + hqm_list_empty(&domain->avail_dir_pq_pairs)) { + resp->status = HQM_ST_DIR_QUEUES_UNAVAILABLE; + return -1; + } + + return 0; +} + +static void hqm_configure_ldb_queue(struct hqm_hw *hw, + struct hqm_domain *domain, + struct hqm_ldb_queue *queue, + struct hqm_create_ldb_queue_args *args, + bool vf_request, + unsigned int vf_id) +{ + union hqm_sys_vf_ldb_vqid_v r0 = { {0} }; + union hqm_sys_vf_ldb_vqid2qid r1 = { {0} }; + union hqm_sys_ldb_qid2vqid r2 = { {0} }; + union hqm_sys_ldb_vasqid_v r3 = { {0} }; + union hqm_lsp_qid_ldb_infl_lim r4 = { {0} }; + union hqm_lsp_qid_aqed_active_lim r5 = { {0} }; + union hqm_aqed_pipe_fl_lim r6 = { {0} }; + union hqm_aqed_pipe_fl_base r7 = { {0} }; + union hqm_chp_ord_qid_sn_map r11 = { {0} }; + union hqm_sys_ldb_qid_cfg_v r12 = { {0} }; + union hqm_sys_ldb_qid_v r13 = { {0} }; + union hqm_aqed_pipe_fl_push_ptr r14 = { {0} }; + union hqm_aqed_pipe_fl_pop_ptr r15 = { {0} }; + union hqm_aqed_pipe_qid_fid_lim r16 = { {0} }; + union hqm_ro_pipe_qid2grpslt r17 = { {0} }; + struct hqm_sn_group *sn_group; + unsigned int offs; + + /* QID write permissions are turned on when the domain is started */ + r3.field.vasqid_v = 0; + + offs = domain->id.phys_id * HQM_MAX_NUM_LDB_QUEUES + queue->id.phys_id; + + HQM_CSR_WR(hw, HQM_SYS_LDB_VASQID_V(offs), r3.val); + + /* Unordered QIDs get 4K inflights, ordered get as many as the number + * of sequence numbers. + */ + r4.field.limit = args->num_qid_inflights; + + HQM_CSR_WR(hw, HQM_LSP_QID_LDB_INFL_LIM(queue->id.phys_id), r4.val); + + r5.field.limit = queue->aqed_freelist.bound - + queue->aqed_freelist.base; + + if (r5.field.limit > HQM_MAX_NUM_AQOS_ENTRIES) + r5.field.limit = HQM_MAX_NUM_AQOS_ENTRIES; + + /* AQOS */ + HQM_CSR_WR(hw, HQM_LSP_QID_AQED_ACTIVE_LIM(queue->id.phys_id), r5.val); + + r6.field.freelist_disable = 0; + r6.field.limit = queue->aqed_freelist.bound - 1; + + HQM_CSR_WR(hw, HQM_AQED_PIPE_FL_LIM(queue->id.phys_id), r6.val); + + r7.field.base = queue->aqed_freelist.base; + + HQM_CSR_WR(hw, HQM_AQED_PIPE_FL_BASE(queue->id.phys_id), r7.val); + + r14.field.push_ptr = r7.field.base; + r14.field.generation = 1; + + HQM_CSR_WR(hw, HQM_AQED_PIPE_FL_PUSH_PTR(queue->id.phys_id), r14.val); + + r15.field.pop_ptr = r7.field.base; + r15.field.generation = 0; + + HQM_CSR_WR(hw, HQM_AQED_PIPE_FL_POP_PTR(queue->id.phys_id), r15.val); + + /* Configure SNs */ + sn_group = &hw->rsrcs.sn_groups[queue->sn_group]; + r11.field.mode = sn_group->mode; + r11.field.slot = queue->sn_slot; + r11.field.grp = sn_group->id; + + HQM_CSR_WR(hw, HQM_CHP_ORD_QID_SN_MAP(queue->id.phys_id), r11.val); + + /* This register limits the number of inflight flows a queue can have + * at one time. It has an upper bound of 2048, but can be + * over-subscribed. 512 is chosen so that a single queue doesn't use + * the entire atomic storage, but can use a substantial portion if + * needed. + */ + r16.field.qid_fid_limit = 512; + + HQM_CSR_WR(hw, HQM_AQED_PIPE_QID_FID_LIM(queue->id.phys_id), r16.val); + + r17.field.group = sn_group->id; + r17.field.slot = queue->sn_slot; + + HQM_CSR_WR(hw, HQM_RO_PIPE_QID2GRPSLT(queue->id.phys_id), r17.val); + + r12.field.sn_cfg_v = (args->num_sequence_numbers != 0); + r12.field.fid_cfg_v = (args->num_atomic_inflights != 0); + + HQM_CSR_WR(hw, HQM_SYS_LDB_QID_CFG_V(queue->id.phys_id), r12.val); + + if (vf_request) { + unsigned int offs; + + r0.field.vqid_v = 1; + + offs = vf_id * HQM_MAX_NUM_LDB_QUEUES + queue->id.virt_id; + + HQM_CSR_WR(hw, HQM_SYS_VF_LDB_VQID_V(offs), r0.val); + + r1.field.qid = queue->id.phys_id; + + HQM_CSR_WR(hw, HQM_SYS_VF_LDB_VQID2QID(offs), r1.val); + + r2.field.vqid = queue->id.virt_id; + + offs = vf_id * HQM_MAX_NUM_LDB_QUEUES + queue->id.phys_id; + + HQM_CSR_WR(hw, HQM_SYS_LDB_QID2VQID(offs), r2.val); + } + + r13.field.qid_v = 1; + + HQM_CSR_WR(hw, HQM_SYS_LDB_QID_V(queue->id.phys_id), r13.val); +} + +static void hqm_configure_dir_queue(struct hqm_hw *hw, + struct hqm_domain *domain, + struct hqm_dir_pq_pair *queue, + bool vf_request, + unsigned int vf_id) +{ + union hqm_sys_dir_vasqid_v r0 = { {0} }; + unsigned int offs; + + /* QID write permissions are turned on when the domain is started */ + r0.field.vasqid_v = 0; + + offs = (domain->id.phys_id * HQM_MAX_NUM_DIR_PORTS) + queue->id.phys_id; + + HQM_CSR_WR(hw, HQM_SYS_DIR_VASQID_V(offs), r0.val); + + if (vf_request) { + union hqm_sys_vf_dir_vqid_v r1 = { {0} }; + union hqm_sys_vf_dir_vqid2qid r2 = { {0} }; + + r1.field.vqid_v = 1; + + offs = (vf_id * HQM_MAX_NUM_DIR_PORTS) + queue->id.virt_id; + + HQM_CSR_WR(hw, HQM_SYS_VF_DIR_VQID_V(offs), r1.val); + + r2.field.qid = queue->id.phys_id; + + HQM_CSR_WR(hw, HQM_SYS_VF_DIR_VQID2QID(offs), r2.val); + } else { + union hqm_sys_dir_qid_v r3 = { {0} }; + + r3.field.qid_v = 1; + + HQM_CSR_WR(hw, HQM_SYS_DIR_QID_V(queue->id.phys_id), r3.val); + } + + queue->queue_configured = true; +} + +static int +hqm_verify_create_ldb_port_args(struct hqm_hw *hw, + u32 domain_id, + uintptr_t pop_count_dma_base, + uintptr_t cq_dma_base, + struct hqm_create_ldb_port_args *args, + struct hqm_cmd_response *resp, + bool vf_request, + unsigned int vf_id) +{ + struct hqm_domain *domain; + struct hqm_credit_pool *pool; + + domain = hqm_get_domain_from_id(hw, domain_id, vf_request, vf_id); + + if (!domain) { + resp->status = HQM_ST_INVALID_DOMAIN_ID; + return -1; + } + + if (!domain->configured) { + resp->status = HQM_ST_DOMAIN_NOT_CONFIGURED; + return -1; + } + + if (domain->started) { + resp->status = HQM_ST_DOMAIN_STARTED; + return -1; + } + + if (hqm_list_empty(&domain->avail_ldb_ports)) { + resp->status = HQM_ST_LDB_PORTS_UNAVAILABLE; + return -1; + } + + /* If the scheduling domain has no LDB queues, we configure the + * hardware to not supply the port with any LDB credits. In that + * case, ignore the LDB credit arguments. + */ + if (!hqm_list_empty(&domain->used_ldb_queues) || + !hqm_list_empty(&domain->avail_ldb_queues)) { + pool = hqm_get_domain_ldb_pool(args->ldb_credit_pool_id, + vf_request, + domain); + + if (!pool || !pool->configured || + pool->domain_id.phys_id != domain->id.phys_id) { + resp->status = HQM_ST_INVALID_LDB_CREDIT_POOL_ID; + return -1; + } + + if (args->ldb_credit_high_watermark > pool->avail_credits) { + resp->status = HQM_ST_LDB_CREDITS_UNAVAILABLE; + return -1; + } + + if (args->ldb_credit_low_watermark >= + args->ldb_credit_high_watermark) { + resp->status = HQM_ST_INVALID_LDB_CREDIT_LOW_WATERMARK; + return -1; + } + + if (args->ldb_credit_quantum >= + args->ldb_credit_high_watermark) { + resp->status = HQM_ST_INVALID_LDB_CREDIT_QUANTUM; + return -1; + } + + if (args->ldb_credit_quantum > HQM_MAX_PORT_CREDIT_QUANTUM) { + resp->status = HQM_ST_INVALID_LDB_CREDIT_QUANTUM; + return -1; + } + } + + /* Likewise, if the scheduling domain has no DIR queues, we configure + * the hardware to not supply the port with any DIR credits. In that + * case, ignore the DIR credit arguments. + */ + if (!hqm_list_empty(&domain->used_dir_pq_pairs) || + !hqm_list_empty(&domain->avail_dir_pq_pairs)) { + pool = hqm_get_domain_dir_pool(args->dir_credit_pool_id, + vf_request, + domain); + + if (!pool || !pool->configured || + pool->domain_id.phys_id != domain->id.phys_id) { + resp->status = HQM_ST_INVALID_DIR_CREDIT_POOL_ID; + return -1; + } + + if (args->dir_credit_high_watermark > pool->avail_credits) { + resp->status = HQM_ST_DIR_CREDITS_UNAVAILABLE; + return -1; + } + + if (args->dir_credit_low_watermark >= + args->dir_credit_high_watermark) { + resp->status = HQM_ST_INVALID_DIR_CREDIT_LOW_WATERMARK; + return -1; + } + + if (args->dir_credit_quantum >= + args->dir_credit_high_watermark) { + resp->status = HQM_ST_INVALID_DIR_CREDIT_QUANTUM; + return -1; + } + + if (args->dir_credit_quantum > HQM_MAX_PORT_CREDIT_QUANTUM) { + resp->status = HQM_ST_INVALID_DIR_CREDIT_QUANTUM; + return -1; + } + } + + /* Check cache-line alignment */ + if ((pop_count_dma_base & 0x3F) != 0) { + resp->status = HQM_ST_INVALID_POP_COUNT_VIRT_ADDR; + return -1; + } + + if ((cq_dma_base & 0x3F) != 0) { + resp->status = HQM_ST_INVALID_CQ_VIRT_ADDR; + return -1; + } + + if (args->cq_depth != 1 && + args->cq_depth != 2 && + args->cq_depth != 4 && + args->cq_depth != 8 && + args->cq_depth != 16 && + args->cq_depth != 32 && + args->cq_depth != 64 && + args->cq_depth != 128 && + args->cq_depth != 256 && + args->cq_depth != 512 && + args->cq_depth != 1024) { + resp->status = HQM_ST_INVALID_CQ_DEPTH; + return -1; + } + + /* The history list size must be >= 1 */ + if (!args->cq_history_list_size) { + resp->status = HQM_ST_INVALID_HIST_LIST_DEPTH; + return -1; + } + + if (args->cq_history_list_size > domain->avail_hist_list_entries) { + resp->status = HQM_ST_HIST_LIST_ENTRIES_UNAVAILABLE; + return -1; + } + + return 0; +} + +static int +hqm_verify_create_dir_port_args(struct hqm_hw *hw, + u32 domain_id, + uintptr_t pop_count_dma_base, + uintptr_t cq_dma_base, + struct hqm_create_dir_port_args *args, + struct hqm_cmd_response *resp, + bool vf_request, + unsigned int vf_id) +{ + struct hqm_domain *domain; + struct hqm_credit_pool *pool; + + domain = hqm_get_domain_from_id(hw, domain_id, vf_request, vf_id); + + if (!domain) { + resp->status = HQM_ST_INVALID_DOMAIN_ID; + return -1; + } + + if (!domain->configured) { + resp->status = HQM_ST_DOMAIN_NOT_CONFIGURED; + return -1; + } + + if (domain->started) { + resp->status = HQM_ST_DOMAIN_STARTED; + return -1; + } + + /* If the user claims the queue is already configured, validate + * the queue ID, its domain, and whether the queue is configured. + */ + if (args->queue_id != -1) { + struct hqm_dir_pq_pair *queue; + + queue = hqm_get_domain_used_dir_pq(args->queue_id, + vf_request, + domain); + + if (!queue || queue->domain_id.phys_id != domain->id.phys_id || + !queue->queue_configured) { + resp->status = HQM_ST_INVALID_DIR_QUEUE_ID; + return -1; + } + } + + /* If the port's queue is not configured, validate that a free + * port-queue pair is available. + */ + if (args->queue_id == -1 && + hqm_list_empty(&domain->avail_dir_pq_pairs)) { + resp->status = HQM_ST_DIR_PORTS_UNAVAILABLE; + return -1; + } + + /* If the scheduling domain has no LDB queues, we configure the + * hardware to not supply the port with any LDB credits. In that + * case, ignore the LDB credit arguments. + */ + if (!hqm_list_empty(&domain->used_ldb_queues) || + !hqm_list_empty(&domain->avail_ldb_queues)) { + pool = hqm_get_domain_ldb_pool(args->ldb_credit_pool_id, + vf_request, + domain); + + if (!pool || !pool->configured || + pool->domain_id.phys_id != domain->id.phys_id) { + resp->status = HQM_ST_INVALID_LDB_CREDIT_POOL_ID; + return -1; + } + + if (args->ldb_credit_high_watermark > pool->avail_credits) { + resp->status = HQM_ST_LDB_CREDITS_UNAVAILABLE; + return -1; + } + + if (args->ldb_credit_low_watermark >= + args->ldb_credit_high_watermark) { + resp->status = HQM_ST_INVALID_LDB_CREDIT_LOW_WATERMARK; + return -1; + } + + if (args->ldb_credit_quantum >= + args->ldb_credit_high_watermark) { + resp->status = HQM_ST_INVALID_LDB_CREDIT_QUANTUM; + return -1; + } + + if (args->ldb_credit_quantum > HQM_MAX_PORT_CREDIT_QUANTUM) { + resp->status = HQM_ST_INVALID_LDB_CREDIT_QUANTUM; + return -1; + } + } + + pool = hqm_get_domain_dir_pool(args->dir_credit_pool_id, + vf_request, + domain); + + if (!pool || !pool->configured || + pool->domain_id.phys_id != domain->id.phys_id) { + resp->status = HQM_ST_INVALID_DIR_CREDIT_POOL_ID; + return -1; + } + + if (args->dir_credit_high_watermark > pool->avail_credits) { + resp->status = HQM_ST_DIR_CREDITS_UNAVAILABLE; + return -1; + } + + if (args->dir_credit_low_watermark >= args->dir_credit_high_watermark) { + resp->status = HQM_ST_INVALID_DIR_CREDIT_LOW_WATERMARK; + return -1; + } + + if (args->dir_credit_quantum >= args->dir_credit_high_watermark) { + resp->status = HQM_ST_INVALID_DIR_CREDIT_QUANTUM; + return -1; + } + + if (args->dir_credit_quantum > HQM_MAX_PORT_CREDIT_QUANTUM) { + resp->status = HQM_ST_INVALID_DIR_CREDIT_QUANTUM; + return -1; + } + + /* Check cache-line alignment */ + if ((pop_count_dma_base & 0x3F) != 0) { + resp->status = HQM_ST_INVALID_POP_COUNT_VIRT_ADDR; + return -1; + } + + if ((cq_dma_base & 0x3F) != 0) { + resp->status = HQM_ST_INVALID_CQ_VIRT_ADDR; + return -1; + } + + if (args->cq_depth != 8 && + args->cq_depth != 16 && + args->cq_depth != 32 && + args->cq_depth != 64 && + args->cq_depth != 128 && + args->cq_depth != 256 && + args->cq_depth != 512 && + args->cq_depth != 1024) { + resp->status = HQM_ST_INVALID_CQ_DEPTH; + return -1; + } + + return 0; +} + +static int hqm_verify_start_domain_args(struct hqm_hw *hw, + u32 domain_id, + struct hqm_cmd_response *resp, + bool vf_request, + unsigned int vf_id) +{ + struct hqm_domain *domain; + + domain = hqm_get_domain_from_id(hw, domain_id, vf_request, vf_id); + + if (!domain) { + resp->status = HQM_ST_INVALID_DOMAIN_ID; + return -1; + } + + if (!domain->configured) { + resp->status = HQM_ST_DOMAIN_NOT_CONFIGURED; + return -1; + } + + if (domain->started) { + resp->status = HQM_ST_DOMAIN_STARTED; + return -1; + } + + return 0; +} + +static int hqm_verify_map_qid_args(struct hqm_hw *hw, + u32 domain_id, + struct hqm_map_qid_args *args, + struct hqm_cmd_response *resp, + bool vf_request, + unsigned int vf_id) +{ + struct hqm_domain *domain; + struct hqm_ldb_port *port; + struct hqm_ldb_queue *queue; + int id; + + domain = hqm_get_domain_from_id(hw, domain_id, vf_request, vf_id); + + if (!domain) { + resp->status = HQM_ST_INVALID_DOMAIN_ID; + return -1; + } + + if (!domain->configured) { + resp->status = HQM_ST_DOMAIN_NOT_CONFIGURED; + return -1; + } + + id = args->port_id; + + port = hqm_get_domain_used_ldb_port(id, vf_request, domain); + + if (!port || !port->configured) { + resp->status = HQM_ST_INVALID_PORT_ID; + return -1; + } + + if (args->priority >= HQM_QID_PRIORITIES) { + resp->status = HQM_ST_INVALID_PRIORITY; + return -1; + } + + queue = hqm_get_domain_ldb_queue(args->qid, vf_request, domain); + + if (!queue || !queue->configured) { + resp->status = HQM_ST_INVALID_QID; + return -1; + } + + if (queue->domain_id.phys_id != domain->id.phys_id) { + resp->status = HQM_ST_INVALID_QID; + return -1; + } + + if (port->domain_id.phys_id != domain->id.phys_id) { + resp->status = HQM_ST_INVALID_PORT_ID; + return -1; + } + + return 0; +} + +static bool hqm_port_find_slot(struct hqm_ldb_port *port, + enum hqm_qid_map_state state, + int *slot) +{ + int i; + + for (i = 0; i < HQM_MAX_NUM_QIDS_PER_LDB_CQ; i++) { + if (port->qid_map[i].state == state) + break; + } + + *slot = i; + + return (i < HQM_MAX_NUM_QIDS_PER_LDB_CQ); +} + +static bool hqm_port_find_slot_queue(struct hqm_ldb_port *port, + enum hqm_qid_map_state state, + struct hqm_ldb_queue *queue, + int *slot) +{ + int i; + + for (i = 0; i < HQM_MAX_NUM_QIDS_PER_LDB_CQ; i++) { + if (port->qid_map[i].state == state && + port->qid_map[i].qid == queue->id.phys_id) + break; + } + + *slot = i; + + return (i < HQM_MAX_NUM_QIDS_PER_LDB_CQ); +} + +static bool +hqm_port_find_slot_with_pending_map_queue(struct hqm_ldb_port *port, + struct hqm_ldb_queue *queue, + int *slot) +{ + int i; + + for (i = 0; i < HQM_MAX_NUM_QIDS_PER_LDB_CQ; i++) { + struct hqm_ldb_port_qid_map *map = &port->qid_map[i]; + + if (map->state == HQM_QUEUE_UNMAP_IN_PROGRESS_PENDING_MAP && + map->pending_qid == queue->id.phys_id) + break; + } + + *slot = i; + + return (i < HQM_MAX_NUM_QIDS_PER_LDB_CQ); +} + +static int hqm_port_slot_state_transition(struct hqm_hw *hw, + struct hqm_ldb_port *port, + struct hqm_ldb_queue *queue, + int slot, + enum hqm_qid_map_state new_state) +{ + enum hqm_qid_map_state curr_state = port->qid_map[slot].state; + struct hqm_domain *domain; + + domain = hqm_get_domain_from_id(hw, port->domain_id.phys_id, false, 0); + if (!domain) { + HQM_BASE_ERR(hw, + "[%s()] Internal error: unable to find domain %d\n", + __func__, port->domain_id.phys_id); + return -EFAULT; + } + + switch (curr_state) { + case HQM_QUEUE_UNMAPPED: + switch (new_state) { + case HQM_QUEUE_MAPPED: + queue->num_mappings++; + port->num_mappings++; + break; + case HQM_QUEUE_MAP_IN_PROGRESS: + queue->num_pending_additions++; + domain->num_pending_additions++; + break; + default: + goto error; + } + break; + case HQM_QUEUE_MAPPED: + switch (new_state) { + case HQM_QUEUE_UNMAPPED: + queue->num_mappings--; + port->num_mappings--; + break; + case HQM_QUEUE_UNMAP_IN_PROGRESS: + port->num_pending_removals++; + domain->num_pending_removals++; + break; + case HQM_QUEUE_MAPPED: + /* Priority change, nothing to update */ + break; + default: + goto error; + } + break; + case HQM_QUEUE_MAP_IN_PROGRESS: + switch (new_state) { + case HQM_QUEUE_UNMAPPED: + queue->num_pending_additions--; + domain->num_pending_additions--; + break; + case HQM_QUEUE_MAPPED: + queue->num_mappings++; + port->num_mappings++; + queue->num_pending_additions--; + domain->num_pending_additions--; + break; + default: + goto error; + } + break; + case HQM_QUEUE_UNMAP_IN_PROGRESS: + switch (new_state) { + case HQM_QUEUE_UNMAPPED: + port->num_pending_removals--; + domain->num_pending_removals--; + queue->num_mappings--; + port->num_mappings--; + break; + case HQM_QUEUE_MAPPED: + port->num_pending_removals--; + domain->num_pending_removals--; + break; + case HQM_QUEUE_UNMAP_IN_PROGRESS_PENDING_MAP: + /* Nothing to update */ + break; + default: + goto error; + } + break; + case HQM_QUEUE_UNMAP_IN_PROGRESS_PENDING_MAP: + switch (new_state) { + case HQM_QUEUE_UNMAP_IN_PROGRESS: + /* Nothing to update */ + break; + case HQM_QUEUE_UNMAPPED: + /* An UNMAP_IN_PROGRESS_PENDING_MAP slot briefly + * becomes UNMAPPED before it transitions to + * MAP_IN_PROGRESS. + */ + queue->num_mappings--; + port->num_mappings--; + port->num_pending_removals--; + domain->num_pending_removals--; + break; + default: + goto error; + } + break; + default: + goto error; + } + + port->qid_map[slot].state = new_state; + + HQM_BASE_INFO(hw, + "[%s()] queue %d -> port %d state transition (%d -> %d)\n", + __func__, queue->id.phys_id, port->id.phys_id, curr_state, + new_state); + return 0; + +error: + HQM_BASE_ERR(hw, + "[%s()] Internal error: invalid queue %d -> port %d state transition (%d -> %d)\n", + __func__, queue->id.phys_id, port->id.phys_id, curr_state, + new_state); + return -EFAULT; +} + +static int hqm_verify_map_qid_slot_available(struct hqm_ldb_port *port, + struct hqm_ldb_queue *queue, + struct hqm_cmd_response *resp) +{ + enum hqm_qid_map_state state; + int i; + + /* Unused slot available? */ + if (port->num_mappings < HQM_MAX_NUM_QIDS_PER_LDB_CQ) + return 0; + + /* If the queue is already mapped (from the application's perspective), + * this is simply a priority update. + */ + state = HQM_QUEUE_MAPPED; + if (hqm_port_find_slot_queue(port, state, queue, &i)) + return 0; + + state = HQM_QUEUE_MAP_IN_PROGRESS; + if (hqm_port_find_slot_queue(port, state, queue, &i)) + return 0; + + if (hqm_port_find_slot_with_pending_map_queue(port, queue, &i)) + return 0; + + /* If the slot contains an unmap in progress, it's considered + * available. + */ + state = HQM_QUEUE_UNMAP_IN_PROGRESS; + if (hqm_port_find_slot(port, state, &i)) + return 0; + + state = HQM_QUEUE_UNMAPPED; + if (hqm_port_find_slot(port, state, &i)) + return 0; + + resp->status = HQM_ST_NO_QID_SLOTS_AVAILABLE; + return -EINVAL; +} + +static int hqm_verify_unmap_qid_args(struct hqm_hw *hw, + u32 domain_id, + struct hqm_unmap_qid_args *args, + struct hqm_cmd_response *resp, + bool vf_request, + unsigned int vf_id) +{ + enum hqm_qid_map_state state; + struct hqm_domain *domain; + struct hqm_ldb_port *port; + struct hqm_ldb_queue *queue; + int slot; + int id; + + domain = hqm_get_domain_from_id(hw, domain_id, vf_request, vf_id); + + if (!domain) { + resp->status = HQM_ST_INVALID_DOMAIN_ID; + return -1; + } + + if (!domain->configured) { + resp->status = HQM_ST_DOMAIN_NOT_CONFIGURED; + return -1; + } + + id = args->port_id; + + port = hqm_get_domain_used_ldb_port(id, vf_request, domain); + + if (!port || !port->configured) { + resp->status = HQM_ST_INVALID_PORT_ID; + return -1; + } + + if (port->domain_id.phys_id != domain->id.phys_id) { + resp->status = HQM_ST_INVALID_PORT_ID; + return -1; + } + + queue = hqm_get_domain_ldb_queue(args->qid, vf_request, domain); + + if (!queue || !queue->configured) { + HQM_BASE_ERR(hw, "[%s()] Can't unmap unconfigured queue %d\n", + __func__, args->qid); + resp->status = HQM_ST_INVALID_QID; + return -1; + } + + /* Verify that the port has the queue mapped. From the application's + * perspective a queue is mapped if it is actually mapped, the map is + * in progress, or the map is blocked pending an unmap. + */ + state = HQM_QUEUE_MAPPED; + if (hqm_port_find_slot_queue(port, state, queue, &slot)) + return 0; + + state = HQM_QUEUE_MAP_IN_PROGRESS; + if (hqm_port_find_slot_queue(port, state, queue, &slot)) + return 0; + + if (hqm_port_find_slot_with_pending_map_queue(port, queue, &slot)) + return 0; + + resp->status = HQM_ST_INVALID_QID; + return -1; +} + +static int +hqm_verify_enable_ldb_port_args(struct hqm_hw *hw, + u32 domain_id, + struct hqm_enable_ldb_port_args *args, + struct hqm_cmd_response *resp, + bool vf_request, + unsigned int vf_id) +{ + struct hqm_domain *domain; + struct hqm_ldb_port *port; + int id; + + domain = hqm_get_domain_from_id(hw, domain_id, vf_request, vf_id); + + if (!domain) { + resp->status = HQM_ST_INVALID_DOMAIN_ID; + return -1; + } + + if (!domain->configured) { + resp->status = HQM_ST_DOMAIN_NOT_CONFIGURED; + return -1; + } + + id = args->port_id; + + port = hqm_get_domain_used_ldb_port(id, vf_request, domain); + + if (!port || !port->configured) { + resp->status = HQM_ST_INVALID_PORT_ID; + return -1; + } + + return 0; +} + +static int +hqm_verify_enable_dir_port_args(struct hqm_hw *hw, + u32 domain_id, + struct hqm_enable_dir_port_args *args, + struct hqm_cmd_response *resp, + bool vf_request, + unsigned int vf_id) +{ + struct hqm_domain *domain; + struct hqm_dir_pq_pair *port; + int id; + + domain = hqm_get_domain_from_id(hw, domain_id, vf_request, vf_id); + + if (!domain) { + resp->status = HQM_ST_INVALID_DOMAIN_ID; + return -1; + } + + if (!domain->configured) { + resp->status = HQM_ST_DOMAIN_NOT_CONFIGURED; + return -1; + } + + id = args->port_id; + + port = hqm_get_domain_used_dir_pq(id, vf_request, domain); + + if (!port || !port->port_configured) { + resp->status = HQM_ST_INVALID_PORT_ID; + return -1; + } + + return 0; +} + +static int +hqm_verify_disable_ldb_port_args(struct hqm_hw *hw, + u32 domain_id, + struct hqm_disable_ldb_port_args *args, + struct hqm_cmd_response *resp, + bool vf_request, + unsigned int vf_id) +{ + struct hqm_domain *domain; + struct hqm_ldb_port *port; + int id; + + domain = hqm_get_domain_from_id(hw, domain_id, vf_request, vf_id); + + if (!domain) { + resp->status = HQM_ST_INVALID_DOMAIN_ID; + return -1; + } + + if (!domain->configured) { + resp->status = HQM_ST_DOMAIN_NOT_CONFIGURED; + return -1; + } + + id = args->port_id; + + port = hqm_get_domain_used_ldb_port(id, vf_request, domain); + + if (!port || !port->configured) { + resp->status = HQM_ST_INVALID_PORT_ID; + return -1; + } + + return 0; +} + +static int +hqm_verify_disable_dir_port_args(struct hqm_hw *hw, + u32 domain_id, + struct hqm_disable_dir_port_args *args, + struct hqm_cmd_response *resp, + bool vf_request, + unsigned int vf_id) +{ + struct hqm_domain *domain; + struct hqm_dir_pq_pair *port; + int id; + + domain = hqm_get_domain_from_id(hw, domain_id, vf_request, vf_id); + + if (!domain) { + resp->status = HQM_ST_INVALID_DOMAIN_ID; + return -1; + } + + if (!domain->configured) { + resp->status = HQM_ST_DOMAIN_NOT_CONFIGURED; + return -1; + } + + id = args->port_id; + + port = hqm_get_domain_used_dir_pq(id, vf_request, domain); + + if (!port || !port->port_configured) { + resp->status = HQM_ST_INVALID_PORT_ID; + return -1; + } + + return 0; +} + +static int +hqm_domain_attach_resources(struct hqm_hw *hw, + struct hqm_function_resources *rsrcs, + struct hqm_domain *domain, + struct hqm_create_sched_domain_args *args, + struct hqm_cmd_response *resp) +{ + int ret; + + ret = hqm_attach_ldb_queues(hw, + rsrcs, + domain, + args->num_ldb_queues, + resp); + if (ret < 0) + return ret; + + ret = hqm_attach_ldb_ports(hw, + rsrcs, + domain, + args->num_ldb_ports, + resp); + if (ret < 0) + return ret; + + ret = hqm_attach_dir_ports(hw, + rsrcs, + domain, + args->num_dir_ports, + resp); + if (ret < 0) + return ret; + + ret = hqm_attach_ldb_credits(rsrcs, + domain, + args->num_ldb_credits, + resp); + if (ret < 0) + return ret; + + ret = hqm_attach_dir_credits(rsrcs, + domain, + args->num_dir_credits, + resp); + if (ret < 0) + return ret; + + ret = hqm_attach_ldb_credit_pools(hw, + rsrcs, + domain, + args->num_ldb_credit_pools, + resp); + if (ret < 0) + return ret; + + ret = hqm_attach_dir_credit_pools(hw, + rsrcs, + domain, + args->num_dir_credit_pools, + resp); + if (ret < 0) + return ret; + + ret = hqm_attach_domain_hist_list_entries(rsrcs, + domain, + args->num_hist_list_entries, + resp); + if (ret < 0) + return ret; + + ret = hqm_attach_atomic_inflights(rsrcs, + domain, + args->num_atomic_inflights, + resp); + if (ret < 0) + return ret; + + domain->configured = true; + + domain->started = false; + + rsrcs->num_avail_domains--; + + return 0; +} + +static int +hqm_ldb_queue_attach_to_sn_group(struct hqm_hw *hw, + struct hqm_ldb_queue *queue, + struct hqm_create_ldb_queue_args *args) +{ + int slot = -1; + int i; + + queue->sn_cfg_valid = false; + + if (args->num_sequence_numbers == 0) + return 0; + + for (i = 0; i < HQM_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) { + struct hqm_sn_group *group = &hw->rsrcs.sn_groups[i]; + + if (group->sequence_numbers_per_queue == + args->num_sequence_numbers && + !hqm_sn_group_full(group)) { + slot = hqm_sn_group_alloc_slot(group); + if (slot >= 0) + break; + } + } + + if (slot == -1) { + HQM_BASE_ERR(hw, + "[%s():%d] Internal error: no sequence number slots available\n", + __func__, __LINE__); + return -EFAULT; + } + + queue->sn_cfg_valid = true; + queue->sn_group = i; + queue->sn_slot = slot; + return 0; +} + +static int +hqm_ldb_queue_attach_resources(struct hqm_hw *hw, + struct hqm_domain *domain, + struct hqm_ldb_queue *queue, + struct hqm_create_ldb_queue_args *args) +{ + int ret; + + ret = hqm_ldb_queue_attach_to_sn_group(hw, queue, args); + if (ret) + return ret; + + /* Attach QID inflights */ + queue->num_qid_inflights = args->num_qid_inflights; + + /* Attach atomic inflights */ + queue->aqed_freelist.base = domain->aqed_freelist.base + + domain->aqed_freelist.offset; + queue->aqed_freelist.bound = queue->aqed_freelist.base + + args->num_atomic_inflights; + domain->aqed_freelist.offset += args->num_atomic_inflights; + + return 0; +} + +static void hqm_ldb_port_cq_enable(struct hqm_hw *hw, + struct hqm_ldb_port *port) +{ + union hqm_lsp_cq_ldb_dsbl reg; + + /* Don't re-enable the port if a removal is pending. The caller should + * mark this port as enabled (if it isn't already), and when the + * removal completes the port will be enabled. + */ + if (port->num_pending_removals) + return; + + reg.field.disabled = 0; + + HQM_CSR_WR(hw, HQM_LSP_CQ_LDB_DSBL(port->id.phys_id), reg.val); + + hqm_flush_csr(hw); +} + +static void hqm_ldb_port_cq_disable(struct hqm_hw *hw, + struct hqm_ldb_port *port) +{ + union hqm_lsp_cq_ldb_dsbl reg; + + reg.field.disabled = 1; + + HQM_CSR_WR(hw, HQM_LSP_CQ_LDB_DSBL(port->id.phys_id), reg.val); + + hqm_flush_csr(hw); +} + +static void hqm_dir_port_cq_enable(struct hqm_hw *hw, + struct hqm_dir_pq_pair *port) +{ + union hqm_lsp_cq_dir_dsbl reg; + + reg.field.disabled = 0; + + HQM_CSR_WR(hw, HQM_LSP_CQ_DIR_DSBL(port->id.phys_id), reg.val); + + hqm_flush_csr(hw); +} + +static void hqm_dir_port_cq_disable(struct hqm_hw *hw, + struct hqm_dir_pq_pair *port) +{ + union hqm_lsp_cq_dir_dsbl reg; + + reg.field.disabled = 1; + + HQM_CSR_WR(hw, HQM_LSP_CQ_DIR_DSBL(port->id.phys_id), reg.val); + + hqm_flush_csr(hw); +} + +static int hqm_ldb_port_configure_pp(struct hqm_hw *hw, + struct hqm_domain *domain, + struct hqm_ldb_port *port, + struct hqm_create_ldb_port_args *args, + bool vf_request, + unsigned int vf_id) +{ + union hqm_sys_ldb_pp2ldbpool r0 = { {0} }; + union hqm_sys_ldb_pp2dirpool r1 = { {0} }; + union hqm_sys_ldb_pp2vf_pf r2 = { {0} }; + union hqm_sys_ldb_pp2vas r3 = { {0} }; + union hqm_sys_ldb_pp_v r4 = { {0} }; + union hqm_sys_ldb_pp2vpp r5 = { {0} }; + union hqm_chp_ldb_pp_ldb_crd_hwm r6 = { {0} }; + union hqm_chp_ldb_pp_dir_crd_hwm r7 = { {0} }; + union hqm_chp_ldb_pp_ldb_crd_lwm r8 = { {0} }; + union hqm_chp_ldb_pp_dir_crd_lwm r9 = { {0} }; + union hqm_chp_ldb_pp_ldb_min_crd_qnt r10 = { {0} }; + union hqm_chp_ldb_pp_dir_min_crd_qnt r11 = { {0} }; + union hqm_chp_ldb_pp_ldb_crd_cnt r12 = { {0} }; + union hqm_chp_ldb_pp_dir_crd_cnt r13 = { {0} }; + union hqm_chp_ldb_ldb_pp2pool r14 = { {0} }; + union hqm_chp_ldb_dir_pp2pool r15 = { {0} }; + union hqm_chp_ldb_pp_crd_req_state r16 = { {0} }; + union hqm_chp_ldb_pp_ldb_push_ptr r17 = { {0} }; + union hqm_chp_ldb_pp_dir_push_ptr r18 = { {0} }; + + struct hqm_credit_pool *ldb_pool = NULL; + struct hqm_credit_pool *dir_pool = NULL; + unsigned int offs; + + if (port->ldb_pool_used) { + ldb_pool = hqm_get_domain_ldb_pool(args->ldb_credit_pool_id, + vf_request, + domain); + if (!ldb_pool) { + HQM_BASE_ERR(hw, + "[%s()] Internal error: port validation failed\n", + __func__); + return -EFAULT; + } + } + + if (port->dir_pool_used) { + dir_pool = hqm_get_domain_dir_pool(args->dir_credit_pool_id, + vf_request, + domain); + if (!dir_pool) { + HQM_BASE_ERR(hw, + "[%s()] Internal error: port validation failed\n", + __func__); + return -EFAULT; + } + } + + r0.field.ldbpool = (port->ldb_pool_used) ? ldb_pool->id.phys_id : 0; + + HQM_CSR_WR(hw, HQM_SYS_LDB_PP2LDBPOOL(port->id.phys_id), r0.val); + + r1.field.dirpool = (port->dir_pool_used) ? dir_pool->id.phys_id : 0; + + HQM_CSR_WR(hw, HQM_SYS_LDB_PP2DIRPOOL(port->id.phys_id), r1.val); + + r2.field.vf = vf_id; + r2.field.is_pf = !vf_request; + + HQM_CSR_WR(hw, HQM_SYS_LDB_PP2VF_PF(port->id.phys_id), r2.val); + + r3.field.vas = domain->id.phys_id; + + HQM_CSR_WR(hw, HQM_SYS_LDB_PP2VAS(port->id.phys_id), r3.val); + + r5.field.vpp = port->id.virt_id; + + offs = (vf_id * HQM_MAX_NUM_LDB_PORTS) + port->id.phys_id; + + HQM_CSR_WR(hw, HQM_SYS_LDB_PP2VPP(offs), r5.val); + + r6.field.hwm = args->ldb_credit_high_watermark; + + HQM_CSR_WR(hw, HQM_CHP_LDB_PP_LDB_CRD_HWM(port->id.phys_id), r6.val); + + r7.field.hwm = args->dir_credit_high_watermark; + + HQM_CSR_WR(hw, HQM_CHP_LDB_PP_DIR_CRD_HWM(port->id.phys_id), r7.val); + + r8.field.lwm = args->ldb_credit_low_watermark; + + HQM_CSR_WR(hw, HQM_CHP_LDB_PP_LDB_CRD_LWM(port->id.phys_id), r8.val); + + r9.field.lwm = args->dir_credit_low_watermark; + + HQM_CSR_WR(hw, HQM_CHP_LDB_PP_DIR_CRD_LWM(port->id.phys_id), r9.val); + + r10.field.quanta = args->ldb_credit_quantum; + + HQM_CSR_WR(hw, + HQM_CHP_LDB_PP_LDB_MIN_CRD_QNT(port->id.phys_id), + r10.val); + + r11.field.quanta = args->dir_credit_quantum; + + HQM_CSR_WR(hw, + HQM_CHP_LDB_PP_DIR_MIN_CRD_QNT(port->id.phys_id), + r11.val); + + r12.field.count = args->ldb_credit_high_watermark; + + HQM_CSR_WR(hw, HQM_CHP_LDB_PP_LDB_CRD_CNT(port->id.phys_id), r12.val); + + r13.field.count = args->dir_credit_high_watermark; + + HQM_CSR_WR(hw, HQM_CHP_LDB_PP_DIR_CRD_CNT(port->id.phys_id), r13.val); + + r14.field.pool = (port->ldb_pool_used) ? ldb_pool->id.phys_id : 0; + + HQM_CSR_WR(hw, HQM_CHP_LDB_LDB_PP2POOL(port->id.phys_id), r14.val); + + r15.field.pool = (port->dir_pool_used) ? dir_pool->id.phys_id : 0; + + HQM_CSR_WR(hw, HQM_CHP_LDB_DIR_PP2POOL(port->id.phys_id), r15.val); + + r16.field.no_pp_credit_update = 0; + + HQM_CSR_WR(hw, HQM_CHP_LDB_PP_CRD_REQ_STATE(port->id.phys_id), r16.val); + + r17.field.push_pointer = 0; + + HQM_CSR_WR(hw, HQM_CHP_LDB_PP_LDB_PUSH_PTR(port->id.phys_id), r17.val); + + r18.field.push_pointer = 0; + + HQM_CSR_WR(hw, HQM_CHP_LDB_PP_DIR_PUSH_PTR(port->id.phys_id), r18.val); + + if (vf_request) { + union hqm_sys_vf_ldb_vpp2pp r16 = { {0} }; + union hqm_sys_vf_ldb_vpp_v r17 = { {0} }; + + r16.field.pp = port->id.phys_id; + + offs = vf_id * HQM_MAX_NUM_LDB_PORTS + port->id.virt_id; + + HQM_CSR_WR(hw, HQM_SYS_VF_LDB_VPP2PP(offs), r16.val); + + r17.field.vpp_v = 1; + + HQM_CSR_WR(hw, HQM_SYS_VF_LDB_VPP_V(offs), r17.val); + } + + r4.field.pp_v = 1; + + HQM_CSR_WR(hw, + HQM_SYS_LDB_PP_V(port->id.phys_id), + r4.val); + + return 0; +} + +static int hqm_ldb_port_configure_cq(struct hqm_hw *hw, + struct hqm_ldb_port *port, + uintptr_t pop_count_dma_base, + uintptr_t cq_dma_base, + struct hqm_create_ldb_port_args *args, + bool vf_request, + unsigned int vf_id) +{ + int i; + + union hqm_sys_ldb_cq_addr_l r0 = { {0} }; + union hqm_sys_ldb_cq_addr_u r1 = { {0} }; + union hqm_sys_ldb_cq2vf_pf r2 = { {0} }; + union hqm_chp_ldb_cq_tkn_depth_sel r3 = { {0} }; + union hqm_chp_hist_list_lim r4 = { {0} }; + union hqm_chp_hist_list_base r5 = { {0} }; + union hqm_lsp_cq_ldb_infl_lim r6 = { {0} }; + union hqm_lsp_cq2priov r7 = { {0} }; + union hqm_chp_hist_list_push_ptr r8 = { {0} }; + union hqm_chp_hist_list_pop_ptr r9 = { {0} }; + union hqm_lsp_cq_ldb_tkn_depth_sel r10 = { {0} }; + union hqm_sys_ldb_pp_addr_l r11 = { {0} }; + union hqm_sys_ldb_pp_addr_u r12 = { {0} }; + + /* The CQ address is 64B-aligned, and the HQM only wants bits [63:6] */ + r0.field.addr_l = cq_dma_base >> 6; + + HQM_CSR_WR(hw, + HQM_SYS_LDB_CQ_ADDR_L(port->id.phys_id), + r0.val); + + r1.field.addr_u = cq_dma_base >> 32; + + HQM_CSR_WR(hw, + HQM_SYS_LDB_CQ_ADDR_U(port->id.phys_id), + r1.val); + + r2.field.vf = vf_id; + r2.field.is_pf = !vf_request; + + HQM_CSR_WR(hw, + HQM_SYS_LDB_CQ2VF_PF(port->id.phys_id), + r2.val); + + if (args->cq_depth <= 8) { + r3.field.token_depth_select = 1; + } else if (args->cq_depth == 16) { + r3.field.token_depth_select = 2; + } else if (args->cq_depth == 32) { + r3.field.token_depth_select = 3; + } else if (args->cq_depth == 64) { + r3.field.token_depth_select = 4; + } else if (args->cq_depth == 128) { + r3.field.token_depth_select = 5; + } else if (args->cq_depth == 256) { + r3.field.token_depth_select = 6; + } else if (args->cq_depth == 512) { + r3.field.token_depth_select = 7; + } else if (args->cq_depth == 1024) { + r3.field.token_depth_select = 8; + } else { + HQM_BASE_ERR(hw, "[%s():%d] Internal error: invalid CQ depth\n", + __func__, __LINE__); + return -EFAULT; + } + + HQM_CSR_WR(hw, + HQM_CHP_LDB_CQ_TKN_DEPTH_SEL(port->id.phys_id), + r3.val); + + r10.field.token_depth_select = r3.field.token_depth_select; + r10.field.ignore_depth = 0; + r10.field.enab_shallow_cq = args->cq_depth < 8; + + HQM_CSR_WR(hw, + HQM_LSP_CQ_LDB_TKN_DEPTH_SEL(port->id.phys_id), + r10.val); + + /* To support CQs with depth less than 8, program the token count + * register with a non-zero initial value. Operations such as domain + * reset must take this initial value into account when quiescing the + * CQ. + */ + port->init_tkn_cnt = 0; + + if (args->cq_depth < 8) { + union hqm_lsp_cq_ldb_tkn_cnt r12 = { {0} }; + + port->init_tkn_cnt = 8 - args->cq_depth; + + r12.field.token_count = port->init_tkn_cnt; + + HQM_CSR_WR(hw, + HQM_LSP_CQ_LDB_TKN_CNT(port->id.phys_id), + r12.val); + } + + r4.field.limit = port->hist_list_entry_limit - 1; + + HQM_CSR_WR(hw, HQM_CHP_HIST_LIST_LIM(port->id.phys_id), r4.val); + + r5.field.base = port->hist_list_entry_base; + + HQM_CSR_WR(hw, HQM_CHP_HIST_LIST_BASE(port->id.phys_id), r5.val); + + r8.field.push_ptr = r5.field.base; + r8.field.generation = 0; + + HQM_CSR_WR(hw, HQM_CHP_HIST_LIST_PUSH_PTR(port->id.phys_id), r8.val); + + r9.field.pop_ptr = r5.field.base; + r9.field.generation = 0; + + HQM_CSR_WR(hw, HQM_CHP_HIST_LIST_POP_PTR(port->id.phys_id), r9.val); + + /* The inflight limit sets a cap on the number of QEs for which this CQ + * can owe completions at one time. + */ + r6.field.limit = args->cq_history_list_size; + + HQM_CSR_WR(hw, HQM_LSP_CQ_LDB_INFL_LIM(port->id.phys_id), r6.val); + + /* Disable the port's QID mappings */ + r7.field.v = 0; + + HQM_CSR_WR(hw, HQM_LSP_CQ2PRIOV(port->id.phys_id), r7.val); + + /* Two cache lines (128B) are dedicated for the port's pop counts */ + r11.field.addr_l = pop_count_dma_base >> 7; + + HQM_CSR_WR(hw, HQM_SYS_LDB_PP_ADDR_L(port->id.phys_id), r11.val); + + r12.field.addr_u = pop_count_dma_base >> 32; + + HQM_CSR_WR(hw, HQM_SYS_LDB_PP_ADDR_U(port->id.phys_id), r12.val); + + for (i = 0; i < HQM_MAX_NUM_QIDS_PER_LDB_CQ; i++) + port->qid_map[i].state = HQM_QUEUE_UNMAPPED; + + return 0; +} + +static void hqm_update_ldb_arb_threshold(struct hqm_hw *hw) +{ + union hqm_lsp_ctrl_config_0 r0 = { {0} }; + + /* From the hardware spec: + * "The optimal value for ldb_arb_threshold is in the region of {8 * + * #CQs}. It is expected therefore that the PF will change this value + * dynamically as the number of active ports changes." + */ + r0.val = HQM_CSR_RD(hw, HQM_LSP_CTRL_CONFIG_0); + + r0.field.ldb_arb_threshold = hw->pf.num_enabled_ldb_ports * 8; + r0.field.ldb_arb_ignore_empty = 1; + r0.field.ldb_arb_mode = 1; + + HQM_CSR_WR(hw, HQM_LSP_CTRL_CONFIG_0, r0.val); + + hqm_flush_csr(hw); +} + +static void hqm_ldb_pool_update_credit_count(struct hqm_hw *hw, + u32 pool_id, + u32 count) +{ + hw->rsrcs.ldb_credit_pools[pool_id].avail_credits -= count; +} + +static void hqm_dir_pool_update_credit_count(struct hqm_hw *hw, + u32 pool_id, + u32 count) +{ + hw->rsrcs.dir_credit_pools[pool_id].avail_credits -= count; +} + +static void hqm_ldb_pool_write_credit_count_reg(struct hqm_hw *hw, + u32 pool_id) +{ + union hqm_chp_ldb_pool_crd_cnt r0 = { {0} }; + struct hqm_credit_pool *pool; + + pool = &hw->rsrcs.ldb_credit_pools[pool_id]; + + r0.field.count = pool->avail_credits; + + HQM_CSR_WR(hw, + HQM_CHP_LDB_POOL_CRD_CNT(pool->id.phys_id), + r0.val); +} + +static void hqm_dir_pool_write_credit_count_reg(struct hqm_hw *hw, + u32 pool_id) +{ + union hqm_chp_dir_pool_crd_cnt r0 = { {0} }; + struct hqm_credit_pool *pool; + + pool = &hw->rsrcs.dir_credit_pools[pool_id]; + + r0.field.count = pool->avail_credits; + + HQM_CSR_WR(hw, + HQM_CHP_DIR_POOL_CRD_CNT(pool->id.phys_id), + r0.val); +} + +static int hqm_configure_ldb_port(struct hqm_hw *hw, + struct hqm_domain *domain, + struct hqm_ldb_port *port, + uintptr_t pop_count_dma_base, + uintptr_t cq_dma_base, + struct hqm_create_ldb_port_args *args, + bool vf_request, + unsigned int vf_id) +{ + struct hqm_credit_pool *ldb_pool, *dir_pool; + int ret; + + port->hist_list_entry_base = domain->hist_list_entry_base + + domain->hist_list_entry_offset; + port->hist_list_entry_limit = port->hist_list_entry_base + + args->cq_history_list_size; + + domain->hist_list_entry_offset += args->cq_history_list_size; + domain->avail_hist_list_entries -= args->cq_history_list_size; + + port->ldb_pool_used = !hqm_list_empty(&domain->used_ldb_queues) || + !hqm_list_empty(&domain->avail_ldb_queues); + port->dir_pool_used = !hqm_list_empty(&domain->used_dir_pq_pairs) || + !hqm_list_empty(&domain->avail_dir_pq_pairs); + + if (port->ldb_pool_used) { + u32 cnt = args->ldb_credit_high_watermark; + + ldb_pool = hqm_get_domain_ldb_pool(args->ldb_credit_pool_id, + vf_request, + domain); + if (!ldb_pool) { + HQM_BASE_ERR(hw, + "[%s()] Internal error: port validation failed\n", + __func__); + return -EFAULT; + } + + hqm_ldb_pool_update_credit_count(hw, ldb_pool->id.phys_id, cnt); + } else { + args->ldb_credit_high_watermark = 0; + args->ldb_credit_low_watermark = 0; + args->ldb_credit_quantum = 0; + } + + if (port->dir_pool_used) { + u32 cnt = args->dir_credit_high_watermark; + + dir_pool = hqm_get_domain_dir_pool(args->dir_credit_pool_id, + vf_request, + domain); + if (!dir_pool) { + HQM_BASE_ERR(hw, + "[%s()] Internal error: port validation failed\n", + __func__); + return -EFAULT; + } + + hqm_dir_pool_update_credit_count(hw, dir_pool->id.phys_id, cnt); + } else { + args->dir_credit_high_watermark = 0; + args->dir_credit_low_watermark = 0; + args->dir_credit_quantum = 0; + } + + ret = hqm_ldb_port_configure_cq(hw, + port, + pop_count_dma_base, + cq_dma_base, + args, + vf_request, + vf_id); + if (ret < 0) + return ret; + + ret = hqm_ldb_port_configure_pp(hw, + domain, + port, + args, + vf_request, + vf_id); + if (ret < 0) + return ret; + + hqm_ldb_port_cq_enable(hw, port); + + port->num_mappings = 0; + + port->enabled = true; + + hw->pf.num_enabled_ldb_ports++; + + hqm_update_ldb_arb_threshold(hw); + + port->configured = true; + + return 0; +} + +static int hqm_dir_port_configure_pp(struct hqm_hw *hw, + struct hqm_domain *domain, + struct hqm_dir_pq_pair *port, + struct hqm_create_dir_port_args *args, + bool vf_request, + unsigned int vf_id) +{ + union hqm_sys_dir_pp2ldbpool r0 = { {0} }; + union hqm_sys_dir_pp2dirpool r1 = { {0} }; + union hqm_sys_dir_pp2vf_pf r2 = { {0} }; + union hqm_sys_dir_pp2vas r3 = { {0} }; + union hqm_sys_dir_pp_v r4 = { {0} }; + union hqm_sys_dir_pp2vpp r5 = { {0} }; + union hqm_chp_dir_pp_ldb_crd_hwm r6 = { {0} }; + union hqm_chp_dir_pp_dir_crd_hwm r7 = { {0} }; + union hqm_chp_dir_pp_ldb_crd_lwm r8 = { {0} }; + union hqm_chp_dir_pp_dir_crd_lwm r9 = { {0} }; + union hqm_chp_dir_pp_ldb_min_crd_qnt r10 = { {0} }; + union hqm_chp_dir_pp_dir_min_crd_qnt r11 = { {0} }; + union hqm_chp_dir_pp_ldb_crd_cnt r12 = { {0} }; + union hqm_chp_dir_pp_dir_crd_cnt r13 = { {0} }; + union hqm_chp_dir_ldb_pp2pool r14 = { {0} }; + union hqm_chp_dir_dir_pp2pool r15 = { {0} }; + union hqm_chp_dir_pp_crd_req_state r16 = { {0} }; + union hqm_chp_dir_pp_ldb_push_ptr r17 = { {0} }; + union hqm_chp_dir_pp_dir_push_ptr r18 = { {0} }; + + struct hqm_credit_pool *ldb_pool = NULL; + struct hqm_credit_pool *dir_pool = NULL; + + if (port->ldb_pool_used) { + ldb_pool = hqm_get_domain_ldb_pool(args->ldb_credit_pool_id, + vf_request, + domain); + if (!ldb_pool) { + HQM_BASE_ERR(hw, + "[%s()] Internal error: port validation failed\n", + __func__); + return -EFAULT; + } + } + + if (port->dir_pool_used) { + dir_pool = hqm_get_domain_dir_pool(args->dir_credit_pool_id, + vf_request, + domain); + if (!dir_pool) { + HQM_BASE_ERR(hw, + "[%s()] Internal error: port validation failed\n", + __func__); + return -EFAULT; + } + } + + r0.field.ldbpool = (port->ldb_pool_used) ? ldb_pool->id.phys_id : 0; + + HQM_CSR_WR(hw, + HQM_SYS_DIR_PP2LDBPOOL(port->id.phys_id), + r0.val); + + r1.field.dirpool = (port->dir_pool_used) ? dir_pool->id.phys_id : 0; + + HQM_CSR_WR(hw, + HQM_SYS_DIR_PP2DIRPOOL(port->id.phys_id), + r1.val); + + r2.field.vf = vf_id; + r2.field.is_pf = !vf_request; + r2.field.is_hw_dsi = 0; + + HQM_CSR_WR(hw, + HQM_SYS_DIR_PP2VF_PF(port->id.phys_id), + r2.val); + + r3.field.vas = domain->id.phys_id; + + HQM_CSR_WR(hw, + HQM_SYS_DIR_PP2VAS(port->id.phys_id), + r3.val); + + r5.field.vpp = port->id.virt_id; + + HQM_CSR_WR(hw, + HQM_SYS_DIR_PP2VPP((vf_id * HQM_MAX_NUM_DIR_PORTS) + + port->id.phys_id), + r5.val); + + r6.field.hwm = args->ldb_credit_high_watermark; + + HQM_CSR_WR(hw, + HQM_CHP_DIR_PP_LDB_CRD_HWM(port->id.phys_id), + r6.val); + + r7.field.hwm = args->dir_credit_high_watermark; + + HQM_CSR_WR(hw, + HQM_CHP_DIR_PP_DIR_CRD_HWM(port->id.phys_id), + r7.val); + + r8.field.lwm = args->ldb_credit_low_watermark; + + HQM_CSR_WR(hw, + HQM_CHP_DIR_PP_LDB_CRD_LWM(port->id.phys_id), + r8.val); + + r9.field.lwm = args->dir_credit_low_watermark; + + HQM_CSR_WR(hw, + HQM_CHP_DIR_PP_DIR_CRD_LWM(port->id.phys_id), + r9.val); + + r10.field.quanta = args->ldb_credit_quantum; + + HQM_CSR_WR(hw, + HQM_CHP_DIR_PP_LDB_MIN_CRD_QNT(port->id.phys_id), + r10.val); + + r11.field.quanta = args->dir_credit_quantum; + + HQM_CSR_WR(hw, + HQM_CHP_DIR_PP_DIR_MIN_CRD_QNT(port->id.phys_id), + r11.val); + + r12.field.count = args->ldb_credit_high_watermark; + + HQM_CSR_WR(hw, + HQM_CHP_DIR_PP_LDB_CRD_CNT(port->id.phys_id), + r12.val); + + r13.field.count = args->dir_credit_high_watermark; + + HQM_CSR_WR(hw, + HQM_CHP_DIR_PP_DIR_CRD_CNT(port->id.phys_id), + r13.val); + + r14.field.pool = (port->ldb_pool_used) ? ldb_pool->id.phys_id : 0; + + HQM_CSR_WR(hw, + HQM_CHP_DIR_LDB_PP2POOL(port->id.phys_id), + r14.val); + + r15.field.pool = (port->dir_pool_used) ? dir_pool->id.phys_id : 0; + + HQM_CSR_WR(hw, + HQM_CHP_DIR_DIR_PP2POOL(port->id.phys_id), + r15.val); + + r16.field.no_pp_credit_update = 0; + + HQM_CSR_WR(hw, + HQM_CHP_DIR_PP_CRD_REQ_STATE(port->id.phys_id), + r16.val); + + r17.field.push_pointer = 0; + + HQM_CSR_WR(hw, + HQM_CHP_DIR_PP_LDB_PUSH_PTR(port->id.phys_id), + r17.val); + + r18.field.push_pointer = 0; + + HQM_CSR_WR(hw, + HQM_CHP_DIR_PP_DIR_PUSH_PTR(port->id.phys_id), + r18.val); + + if (vf_request) { + union hqm_sys_vf_dir_vpp2pp r16 = { {0} }; + union hqm_sys_vf_dir_vpp_v r17 = { {0} }; + unsigned int offs; + + r16.field.pp = port->id.phys_id; + + offs = vf_id * HQM_MAX_NUM_DIR_PORTS + port->id.virt_id; + + HQM_CSR_WR(hw, HQM_SYS_VF_DIR_VPP2PP(offs), r16.val); + + r17.field.vpp_v = 1; + + HQM_CSR_WR(hw, HQM_SYS_VF_DIR_VPP_V(offs), r17.val); + } + + r4.field.pp_v = 1; + r4.field.mb_dm = 0; + + HQM_CSR_WR(hw, HQM_SYS_DIR_PP_V(port->id.phys_id), r4.val); + + return 0; +} + +static int hqm_dir_port_configure_cq(struct hqm_hw *hw, + struct hqm_dir_pq_pair *port, + uintptr_t pop_count_dma_base, + uintptr_t cq_dma_base, + struct hqm_create_dir_port_args *args, + bool vf_request, + unsigned int vf_id) +{ + union hqm_sys_dir_cq_addr_l r0 = { {0} }; + union hqm_sys_dir_cq_addr_u r1 = { {0} }; + union hqm_sys_dir_cq2vf_pf r2 = { {0} }; + union hqm_chp_dir_cq_tkn_depth_sel r3 = { {0} }; + union hqm_lsp_cq_dir_tkn_depth_sel_dsi r4 = { {0} }; + union hqm_sys_dir_pp_addr_l r5 = { {0} }; + union hqm_sys_dir_pp_addr_u r6 = { {0} }; + + /* The CQ address is 64B-aligned, and the HQM only wants bits [63:6] */ + r0.field.addr_l = cq_dma_base >> 6; + + HQM_CSR_WR(hw, HQM_SYS_DIR_CQ_ADDR_L(port->id.phys_id), r0.val); + + r1.field.addr_u = cq_dma_base >> 32; + + HQM_CSR_WR(hw, HQM_SYS_DIR_CQ_ADDR_U(port->id.phys_id), r1.val); + + r2.field.vf = vf_id; + r2.field.is_pf = !vf_request; + + HQM_CSR_WR(hw, HQM_SYS_DIR_CQ2VF_PF(port->id.phys_id), r2.val); + + if (args->cq_depth == 8) { + r3.field.token_depth_select = 1; + } else if (args->cq_depth == 16) { + r3.field.token_depth_select = 2; + } else if (args->cq_depth == 32) { + r3.field.token_depth_select = 3; + } else if (args->cq_depth == 64) { + r3.field.token_depth_select = 4; + } else if (args->cq_depth == 128) { + r3.field.token_depth_select = 5; + } else if (args->cq_depth == 256) { + r3.field.token_depth_select = 6; + } else if (args->cq_depth == 512) { + r3.field.token_depth_select = 7; + } else if (args->cq_depth == 1024) { + r3.field.token_depth_select = 8; + } else { + HQM_BASE_ERR(hw, "[%s():%d] Internal error: invalid CQ depth\n", + __func__, __LINE__); + return -EFAULT; + } + + HQM_CSR_WR(hw, + HQM_CHP_DIR_CQ_TKN_DEPTH_SEL(port->id.phys_id), + r3.val); + + r4.field.token_depth_select = r3.field.token_depth_select; + r4.field.disable_wb_opt = 0; + + HQM_CSR_WR(hw, + HQM_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI(port->id.phys_id), + r4.val); + + /* Two cache lines (128B) are dedicated for the port's pop counts */ + r5.field.addr_l = pop_count_dma_base >> 7; + + HQM_CSR_WR(hw, HQM_SYS_DIR_PP_ADDR_L(port->id.phys_id), r5.val); + + r6.field.addr_u = pop_count_dma_base >> 32; + + HQM_CSR_WR(hw, HQM_SYS_DIR_PP_ADDR_U(port->id.phys_id), r6.val); + + return 0; +} + +static int hqm_configure_dir_port(struct hqm_hw *hw, + struct hqm_domain *domain, + struct hqm_dir_pq_pair *port, + uintptr_t pop_count_dma_base, + uintptr_t cq_dma_base, + struct hqm_create_dir_port_args *args, + bool vf_request, + unsigned int vf_id) +{ + struct hqm_credit_pool *ldb_pool, *dir_pool; + int ret; + + port->ldb_pool_used = !hqm_list_empty(&domain->used_ldb_queues) || + !hqm_list_empty(&domain->avail_ldb_queues); + + /* Each directed port has a directed queue, hence this port requires + * directed credits. + */ + port->dir_pool_used = true; + + if (port->ldb_pool_used) { + u32 cnt = args->ldb_credit_high_watermark; + + ldb_pool = hqm_get_domain_ldb_pool(args->ldb_credit_pool_id, + vf_request, + domain); + if (!ldb_pool) { + HQM_BASE_ERR(hw, + "[%s()] Internal error: port validation failed\n", + __func__); + return -EFAULT; + } + + hqm_ldb_pool_update_credit_count(hw, ldb_pool->id.phys_id, cnt); + } else { + args->ldb_credit_high_watermark = 0; + args->ldb_credit_low_watermark = 0; + args->ldb_credit_quantum = 0; + } + + dir_pool = hqm_get_domain_dir_pool(args->dir_credit_pool_id, + vf_request, + domain); + if (!dir_pool) { + HQM_BASE_ERR(hw, + "[%s()] Internal error: port validation failed\n", + __func__); + return -EFAULT; + } + + hqm_dir_pool_update_credit_count(hw, + dir_pool->id.phys_id, + args->dir_credit_high_watermark); + + ret = hqm_dir_port_configure_cq(hw, + port, + pop_count_dma_base, + cq_dma_base, + args, + vf_request, + vf_id); + + if (ret < 0) + return ret; + + ret = hqm_dir_port_configure_pp(hw, + domain, + port, + args, + vf_request, + vf_id); + if (ret < 0) + return ret; + + hqm_dir_port_cq_enable(hw, port); + + port->enabled = true; + + port->port_configured = true; + + return 0; +} + +static int hqm_ldb_port_map_qid_static(struct hqm_hw *hw, + struct hqm_ldb_port *p, + struct hqm_ldb_queue *q, + u8 priority) +{ + union hqm_lsp_cq2priov r0; + union hqm_lsp_cq2qid r1; + union hqm_atm_pipe_qid_ldb_qid2cqidx r2; + union hqm_lsp_qid_ldb_qid2cqidx r3; + union hqm_lsp_qid_ldb_qid2cqidx2 r4; + enum hqm_qid_map_state state; + int i; + + /* Look for a pending or already mapped slot, else an unused slot */ + if (!hqm_port_find_slot_queue(p, HQM_QUEUE_MAP_IN_PROGRESS, q, &i) && + !hqm_port_find_slot_queue(p, HQM_QUEUE_MAPPED, q, &i) && + !hqm_port_find_slot(p, HQM_QUEUE_UNMAPPED, &i)) { + HQM_BASE_ERR(hw, + "[%s():%d] Internal error: CQ has no available QID mapping slots\n", + __func__, __LINE__); + return -EFAULT; + } + + if (i >= HQM_MAX_NUM_QIDS_PER_LDB_CQ) { + HQM_BASE_ERR(hw, + "[%s():%d] Internal error: port slot tracking failed\n", + __func__, __LINE__); + return -EFAULT; + } + + /* Read-modify-write the priority and valid bit register */ + r0.val = HQM_CSR_RD(hw, HQM_LSP_CQ2PRIOV(p->id.phys_id)); + + r0.field.v |= 1 << i; + r0.field.prio |= (priority & 0x7) << i * 3; + + HQM_CSR_WR(hw, HQM_LSP_CQ2PRIOV(p->id.phys_id), r0.val); + + /* Read-modify-write the QID map register */ + r1.val = HQM_CSR_RD(hw, HQM_LSP_CQ2QID(p->id.phys_id, i / 4)); + + if (i == 0 || i == 4) + r1.field.qid_p0 = q->id.phys_id; + if (i == 1 || i == 5) + r1.field.qid_p1 = q->id.phys_id; + if (i == 2 || i == 6) + r1.field.qid_p2 = q->id.phys_id; + if (i == 3 || i == 7) + r1.field.qid_p3 = q->id.phys_id; + + HQM_CSR_WR(hw, HQM_LSP_CQ2QID(p->id.phys_id, i / 4), r1.val); + + r2.val = HQM_CSR_RD(hw, + HQM_ATM_PIPE_QID_LDB_QID2CQIDX(q->id.phys_id, + p->id.phys_id / 4)); + + r3.val = HQM_CSR_RD(hw, + HQM_LSP_QID_LDB_QID2CQIDX(q->id.phys_id, + p->id.phys_id / 4)); + + r4.val = HQM_CSR_RD(hw, + HQM_LSP_QID_LDB_QID2CQIDX2(q->id.phys_id, + p->id.phys_id / 4)); + + switch (p->id.phys_id % 4) { + case 0: + r2.field.cq_p0 |= 1 << i; + r3.field.cq_p0 |= 1 << i; + r4.field.cq_p0 |= 1 << i; + break; + + case 1: + r2.field.cq_p1 |= 1 << i; + r3.field.cq_p1 |= 1 << i; + r4.field.cq_p1 |= 1 << i; + break; + + case 2: + r2.field.cq_p2 |= 1 << i; + r3.field.cq_p2 |= 1 << i; + r4.field.cq_p2 |= 1 << i; + break; + + case 3: + r2.field.cq_p3 |= 1 << i; + r3.field.cq_p3 |= 1 << i; + r4.field.cq_p3 |= 1 << i; + break; + } + + HQM_CSR_WR(hw, + HQM_ATM_PIPE_QID_LDB_QID2CQIDX(q->id.phys_id, + p->id.phys_id / 4), + r2.val); + + HQM_CSR_WR(hw, + HQM_LSP_QID_LDB_QID2CQIDX(q->id.phys_id, + p->id.phys_id / 4), + r3.val); + + HQM_CSR_WR(hw, + HQM_LSP_QID_LDB_QID2CQIDX2(q->id.phys_id, + p->id.phys_id / 4), + r4.val); + + hqm_flush_csr(hw); + + p->qid_map[i].qid = q->id.phys_id; + p->qid_map[i].priority = priority; + + state = HQM_QUEUE_MAPPED; + + return hqm_port_slot_state_transition(hw, p, q, i, state); +} + +static void hqm_ldb_port_change_qid_priority(struct hqm_hw *hw, + struct hqm_ldb_port *port, + int slot, + struct hqm_map_qid_args *args) +{ + union hqm_lsp_cq2priov r0; + + /* Read-modify-write the priority and valid bit register */ + r0.val = HQM_CSR_RD(hw, HQM_LSP_CQ2PRIOV(port->id.phys_id)); + + r0.field.v |= 1 << slot; + r0.field.prio |= (args->priority & 0x7) << slot * 3; + + HQM_CSR_WR(hw, HQM_LSP_CQ2PRIOV(port->id.phys_id), r0.val); + + hqm_flush_csr(hw); + + port->qid_map[slot].priority = args->priority; +} + +static int hqm_ldb_port_set_has_work_bits(struct hqm_hw *hw, + struct hqm_ldb_port *port, + struct hqm_ldb_queue *queue, + int slot) +{ + union hqm_lsp_qid_aqed_active_cnt r0; + union hqm_lsp_qid_ldb_enqueue_cnt r1; + union hqm_lsp_ldb_sched_ctrl r2 = { {0} }; + + /* Set the atomic scheduling haswork bit */ + r0.val = HQM_CSR_RD(hw, HQM_LSP_QID_AQED_ACTIVE_CNT(queue->id.phys_id)); + + r2.field.cq = port->id.phys_id; + r2.field.qidix = slot; + r2.field.value = 1; + r2.field.rlist_haswork_v = r0.field.count > 0; + + /* Set the non-atomic scheduling haswork bit */ + HQM_CSR_WR(hw, HQM_LSP_LDB_SCHED_CTRL, r2.val); + + r1.val = HQM_CSR_RD(hw, HQM_LSP_QID_LDB_ENQUEUE_CNT(queue->id.phys_id)); + + memset(&r2, 0, sizeof(r2)); + + r2.field.cq = port->id.phys_id; + r2.field.qidix = slot; + r2.field.value = 1; + r2.field.nalb_haswork_v = (r1.field.count > 0); + + HQM_CSR_WR(hw, HQM_LSP_LDB_SCHED_CTRL, r2.val); + + hqm_flush_csr(hw); + + return 0; +} + +static void hqm_ldb_port_clear_has_work_bits(struct hqm_hw *hw, + struct hqm_ldb_port *port, + u8 slot) +{ + union hqm_lsp_ldb_sched_ctrl r2 = { {0} }; + + r2.field.cq = port->id.phys_id; + r2.field.qidix = slot; + r2.field.value = 0; + r2.field.rlist_haswork_v = 1; + + HQM_CSR_WR(hw, HQM_LSP_LDB_SCHED_CTRL, r2.val); + + memset(&r2, 0, sizeof(r2)); + + r2.field.cq = port->id.phys_id; + r2.field.qidix = slot; + r2.field.value = 0; + r2.field.nalb_haswork_v = 1; + + HQM_CSR_WR(hw, HQM_LSP_LDB_SCHED_CTRL, r2.val); + + hqm_flush_csr(hw); +} + +static void hqm_ldb_port_clear_queue_if_status(struct hqm_hw *hw, + struct hqm_ldb_port *port, + int slot) +{ + union hqm_lsp_ldb_sched_ctrl r0 = { {0} }; + + r0.field.cq = port->id.phys_id; + r0.field.qidix = slot; + r0.field.value = 0; + r0.field.inflight_ok_v = 1; + + HQM_CSR_WR(hw, HQM_LSP_LDB_SCHED_CTRL, r0.val); + + hqm_flush_csr(hw); +} + +static void hqm_ldb_port_set_queue_if_status(struct hqm_hw *hw, + struct hqm_ldb_port *port, + int slot) +{ + union hqm_lsp_ldb_sched_ctrl r0 = { {0} }; + + r0.field.cq = port->id.phys_id; + r0.field.qidix = slot; + r0.field.value = 1; + r0.field.inflight_ok_v = 1; + + HQM_CSR_WR(hw, HQM_LSP_LDB_SCHED_CTRL, r0.val); + + hqm_flush_csr(hw); +} + +static void hqm_ldb_queue_set_inflight_limit(struct hqm_hw *hw, + struct hqm_ldb_queue *queue) +{ + union hqm_lsp_qid_ldb_infl_lim r0 = { {0} }; + + r0.field.limit = queue->num_qid_inflights; + + HQM_CSR_WR(hw, HQM_LSP_QID_LDB_INFL_LIM(queue->id.phys_id), r0.val); +} + +static void hqm_ldb_queue_clear_inflight_limit(struct hqm_hw *hw, + struct hqm_ldb_queue *queue) +{ + HQM_CSR_WR(hw, + HQM_LSP_QID_LDB_INFL_LIM(queue->id.phys_id), + HQM_LSP_QID_LDB_INFL_LIM_RST); +} + +/* hqm_ldb_queue_{enable, disable}_mapped_cqs() don't operate exactly as their + * function names imply, and should only be called by the dynamic CQ mapping + * code. + */ +static void hqm_ldb_queue_disable_mapped_cqs(struct hqm_hw *hw, + struct hqm_domain *domain, + struct hqm_ldb_queue *queue) +{ + struct hqm_list_entry *iter __attribute__((unused)); + struct hqm_ldb_port *port; + int slot; + + HQM_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) { + enum hqm_qid_map_state state = HQM_QUEUE_MAPPED; + + if (!hqm_port_find_slot_queue(port, state, queue, &slot)) + continue; + + if (port->enabled) + hqm_ldb_port_cq_disable(hw, port); + } +} + +static void hqm_ldb_queue_enable_mapped_cqs(struct hqm_hw *hw, + struct hqm_domain *domain, + struct hqm_ldb_queue *queue) +{ + struct hqm_list_entry *iter __attribute__((unused)); + struct hqm_ldb_port *port; + int slot; + + HQM_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) { + enum hqm_qid_map_state state = HQM_QUEUE_MAPPED; + + if (!hqm_port_find_slot_queue(port, state, queue, &slot)) + continue; + + if (port->enabled) + hqm_ldb_port_cq_enable(hw, port); + } +} + +static int hqm_ldb_port_finish_map_qid_dynamic(struct hqm_hw *hw, + struct hqm_domain *domain, + struct hqm_ldb_port *port, + struct hqm_ldb_queue *queue) +{ + struct hqm_list_entry *iter __attribute__((unused)); + union hqm_lsp_qid_ldb_infl_cnt r0; + enum hqm_qid_map_state state; + int slot, ret; + u8 prio; + + r0.val = HQM_CSR_RD(hw, HQM_LSP_QID_LDB_INFL_CNT(queue->id.phys_id)); + + if (r0.field.count) { + HQM_BASE_ERR(hw, + "[%s()] Internal error: non-zero QID inflight count\n", + __func__); + return -EFAULT; + } + + /* For each port with a pending mapping to this queue, perform the + * static mapping and set the corresponding has_work bits. + */ + state = HQM_QUEUE_MAP_IN_PROGRESS; + if (!hqm_port_find_slot_queue(port, state, queue, &slot)) + return -EINVAL; + + if (slot >= HQM_MAX_NUM_QIDS_PER_LDB_CQ) { + HQM_BASE_ERR(hw, + "[%s():%d] Internal error: port slot tracking failed\n", + __func__, __LINE__); + return -EFAULT; + } + + prio = port->qid_map[slot].priority; + + /* Update the CQ2QID, CQ2PRIOV, and QID2CQIDX registers, and + * the port's qid_map state. + */ + ret = hqm_ldb_port_map_qid_static(hw, port, queue, prio); + if (ret) + return ret; + + ret = hqm_ldb_port_set_has_work_bits(hw, port, queue, slot); + if (ret) + return ret; + + /* Ensure IF_status(cq,qid) is 0 before enabling the port to + * prevent spurious schedules to cause the queue's inflight + * count to increase. + */ + hqm_ldb_port_clear_queue_if_status(hw, port, slot); + + /* Reset the queue's inflight status */ + HQM_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) { + state = HQM_QUEUE_MAPPED; + if (!hqm_port_find_slot_queue(port, state, queue, &slot)) + continue; + + hqm_ldb_port_set_queue_if_status(hw, port, slot); + } + + hqm_ldb_queue_set_inflight_limit(hw, queue); + + /* Re-enable CQs mapped to this queue */ + hqm_ldb_queue_enable_mapped_cqs(hw, domain, queue); + + /* If this queue has other mappings pending, clear its inflight limit */ + if (queue->num_pending_additions > 0) + hqm_ldb_queue_clear_inflight_limit(hw, queue); + + return 0; +} + +/** + * hqm_ldb_port_map_qid_dynamic() - perform a "dynamic" QID->CQ mapping + * @hw: hqm_hw handle for a particular device. + * @port: load-balanced port + * @queue: load-balanced queue + * @priority: queue servicing priority + * + * Returns 0 if the queue was mapped, 1 if the mapping is scheduled to occur + * at a later point, and <0 if an error occurred. + */ +static int hqm_ldb_port_map_qid_dynamic(struct hqm_hw *hw, + struct hqm_ldb_port *port, + struct hqm_ldb_queue *queue, + u8 priority) +{ + union hqm_lsp_qid_ldb_infl_cnt r0 = { {0} }; + enum hqm_qid_map_state state; + struct hqm_domain *domain; + int slot, ret; + + domain = hqm_get_domain_from_id(hw, port->domain_id.phys_id, false, 0); + if (!domain) { + HQM_BASE_ERR(hw, + "[%s()] Internal error: unable to find domain %d\n", + __func__, port->domain_id.phys_id); + return -EFAULT; + } + + /* Set the QID inflight limit to 0 to prevent further scheduling of the + * queue. + */ + HQM_CSR_WR(hw, HQM_LSP_QID_LDB_INFL_LIM(queue->id.phys_id), 0); + + if (!hqm_port_find_slot(port, HQM_QUEUE_UNMAPPED, &slot)) { + HQM_BASE_ERR(hw, + "Internal error: No available unmapped slots\n"); + return -EFAULT; + } + + if (slot >= HQM_MAX_NUM_QIDS_PER_LDB_CQ) { + HQM_BASE_ERR(hw, + "[%s():%d] Internal error: port slot tracking failed\n", + __func__, __LINE__); + return -EFAULT; + } + + port->qid_map[slot].qid = queue->id.phys_id; + port->qid_map[slot].priority = priority; + + state = HQM_QUEUE_MAP_IN_PROGRESS; + ret = hqm_port_slot_state_transition(hw, port, queue, slot, state); + if (ret) + return ret; + + r0.val = HQM_CSR_RD(hw, HQM_LSP_QID_LDB_INFL_CNT(queue->id.phys_id)); + + if (r0.field.count) { + /* The queue is owed completions so it's not safe to map it + * yet. Schedule a kernel thread to complete the mapping later, + * once software has completed all the queue's inflight events. + */ + if (!os_worker_active(hw)) + os_schedule_work(hw); + + return 1; + } + + /* Disable the affected CQ, and the CQs already mapped to the QID, + * before reading the QID's inflight count a second time. There is an + * unlikely race in which the QID may schedule one more QE after we + * read an inflight count of 0, and disabling the CQs guarantees that + * the race will not occur after a re-read of the inflight count + * register. + */ + if (port->enabled) + hqm_ldb_port_cq_disable(hw, port); + + hqm_ldb_queue_disable_mapped_cqs(hw, domain, queue); + + r0.val = HQM_CSR_RD(hw, HQM_LSP_QID_LDB_INFL_CNT(queue->id.phys_id)); + + if (r0.field.count) { + if (port->enabled) + hqm_ldb_port_cq_enable(hw, port); + + hqm_ldb_queue_enable_mapped_cqs(hw, domain, queue); + + /* The queue is owed completions so it's not safe to map it + * yet. Schedule a kernel thread to complete the mapping later, + * once software has completed all the queue's inflight events. + */ + if (!os_worker_active(hw)) + os_schedule_work(hw); + + return 1; + } + + return hqm_ldb_port_finish_map_qid_dynamic(hw, domain, port, queue); +} + +static int hqm_ldb_port_map_qid(struct hqm_hw *hw, + struct hqm_domain *domain, + struct hqm_ldb_port *port, + struct hqm_ldb_queue *queue, + u8 prio) +{ + if (domain->started) + return hqm_ldb_port_map_qid_dynamic(hw, port, queue, prio); + else + return hqm_ldb_port_map_qid_static(hw, port, queue, prio); +} + +static int hqm_ldb_port_unmap_qid(struct hqm_hw *hw, + struct hqm_ldb_port *port, + struct hqm_ldb_queue *queue) +{ + enum hqm_qid_map_state mapped, in_progress, pending_map, unmapped; + union hqm_lsp_cq2priov r0; + union hqm_atm_pipe_qid_ldb_qid2cqidx r1; + union hqm_lsp_qid_ldb_qid2cqidx r2; + union hqm_lsp_qid_ldb_qid2cqidx2 r3; + u32 queue_id; + u32 port_id; + int i; + + /* Find the queue's slot */ + mapped = HQM_QUEUE_MAPPED; + in_progress = HQM_QUEUE_UNMAP_IN_PROGRESS; + pending_map = HQM_QUEUE_UNMAP_IN_PROGRESS_PENDING_MAP; + + if (!hqm_port_find_slot_queue(port, mapped, queue, &i) && + !hqm_port_find_slot_queue(port, in_progress, queue, &i) && + !hqm_port_find_slot_queue(port, pending_map, queue, &i)) { + HQM_BASE_ERR(hw, + "[%s():%d] Internal error: QID %d isn't mapped\n", + __func__, __LINE__, queue->id.phys_id); + return -EFAULT; + } + + if (i >= HQM_MAX_NUM_QIDS_PER_LDB_CQ) { + HQM_BASE_ERR(hw, + "[%s():%d] Internal error: port slot tracking failed\n", + __func__, __LINE__); + return -EFAULT; + } + + port_id = port->id.phys_id; + queue_id = queue->id.phys_id; + + /* Read-modify-write the priority and valid bit register */ + r0.val = HQM_CSR_RD(hw, HQM_LSP_CQ2PRIOV(port_id)); + + r0.field.v &= ~(1 << i); + + HQM_CSR_WR(hw, HQM_LSP_CQ2PRIOV(port_id), r0.val); + + r1.val = HQM_CSR_RD(hw, + HQM_ATM_PIPE_QID_LDB_QID2CQIDX(queue_id, + port_id / 4)); + + r2.val = HQM_CSR_RD(hw, + HQM_LSP_QID_LDB_QID2CQIDX(queue_id, + port_id / 4)); + + r3.val = HQM_CSR_RD(hw, + HQM_LSP_QID_LDB_QID2CQIDX2(queue_id, + port_id / 4)); + + switch (port_id % 4) { + case 0: + r1.field.cq_p0 &= ~(1 << i); + r2.field.cq_p0 &= ~(1 << i); + r3.field.cq_p0 &= ~(1 << i); + break; + + case 1: + r1.field.cq_p1 &= ~(1 << i); + r2.field.cq_p1 &= ~(1 << i); + r3.field.cq_p1 &= ~(1 << i); + break; + + case 2: + r1.field.cq_p2 &= ~(1 << i); + r2.field.cq_p2 &= ~(1 << i); + r3.field.cq_p2 &= ~(1 << i); + break; + + case 3: + r1.field.cq_p3 &= ~(1 << i); + r2.field.cq_p3 &= ~(1 << i); + r3.field.cq_p3 &= ~(1 << i); + break; + } + + HQM_CSR_WR(hw, + HQM_ATM_PIPE_QID_LDB_QID2CQIDX(queue_id, port_id / 4), + r1.val); + + HQM_CSR_WR(hw, + HQM_LSP_QID_LDB_QID2CQIDX(queue_id, port_id / 4), + r2.val); + + HQM_CSR_WR(hw, + HQM_LSP_QID_LDB_QID2CQIDX2(queue_id, port_id / 4), + r3.val); + + hqm_flush_csr(hw); + + unmapped = HQM_QUEUE_UNMAPPED; + + return hqm_port_slot_state_transition(hw, port, queue, i, unmapped); +} + +static void +hqm_log_create_sched_domain_args(struct hqm_hw *hw, + struct hqm_create_sched_domain_args *args, + bool vf_request, + unsigned int vf_id) +{ + HQM_BASE_INFO(hw, "HQM create sched domain arguments:\n"); + if (vf_request) + HQM_BASE_INFO(hw, "(Request from VF %d)\n", vf_id); + HQM_BASE_INFO(hw, "\tNumber of LDB queues: %d\n", + args->num_ldb_queues); + HQM_BASE_INFO(hw, "\tNumber of LDB ports: %d\n", + args->num_ldb_ports); + HQM_BASE_INFO(hw, "\tNumber of DIR ports: %d\n", + args->num_dir_ports); + HQM_BASE_INFO(hw, "\tNumber of ATM inflights: %d\n", + args->num_atomic_inflights); + HQM_BASE_INFO(hw, "\tNumber of hist list entries: %d\n", + args->num_hist_list_entries); + HQM_BASE_INFO(hw, "\tNumber of LDB credits: %d\n", + args->num_ldb_credits); + HQM_BASE_INFO(hw, "\tNumber of DIR credits: %d\n", + args->num_dir_credits); + HQM_BASE_INFO(hw, "\tNumber of LDB credit pools: %d\n", + args->num_ldb_credit_pools); + HQM_BASE_INFO(hw, "\tNumber of DIR credit pools: %d\n", + args->num_dir_credit_pools); +} + +/** + * hqm_hw_create_sched_domain() - Allocate and initialize an HQM scheduling + * domain and its resources. + * @hw: Contains the current state of the HQM hardware. + * @args: User-provided arguments. + * @resp: Response to user. + * + * Return: returns < 0 on error, 0 otherwise. If the driver is unable to + * satisfy a request, resp->status will be set accordingly. + */ +int hqm_hw_create_sched_domain(struct hqm_hw *hw, + struct hqm_create_sched_domain_args *args, + struct hqm_cmd_response *resp, + bool vf_request, + unsigned int vf_id) +{ + struct hqm_domain *domain; + struct hqm_function_resources *rsrcs; + int ret; + + rsrcs = (vf_request) ? &hw->vf[vf_id] : &hw->pf; + + hqm_log_create_sched_domain_args(hw, args, vf_request, vf_id); + + /* Verify that hardware resources are available before attempting to + * satisfy the request. This simplifies the error unwinding code. + */ + if (hqm_verify_create_sched_domain_args(hw, rsrcs, args, resp)) + return -EINVAL; + + domain = HQM_FUNC_LIST_HEAD(rsrcs->avail_domains, typeof(*domain)); + + /* Verification should catch this. */ + if (!domain) { + HQM_BASE_ERR(hw, + "[%s():%d] Internal error: no available domains\n", + __func__, __LINE__); + return -EFAULT; + } + + if (domain->configured) { + HQM_BASE_ERR(hw, + "[%s()] Internal error: avail_domains contains configured domains.\n", + __func__); + return -EFAULT; + } + + hqm_init_domain_rsrc_lists(domain); + + /* Verification should catch this too. */ + ret = hqm_domain_attach_resources(hw, rsrcs, domain, args, resp); + if (ret < 0) { + HQM_BASE_ERR(hw, + "[%s()] Internal error: failed to verify args.\n", + __func__); + + return -EFAULT; + } + + hqm_list_del(&rsrcs->avail_domains, &domain->func_list); + + hqm_list_add(&rsrcs->used_domains, &domain->func_list); + + resp->id = (vf_request) ? domain->id.virt_id : domain->id.phys_id; + resp->status = 0; + + return 0; +} + +static void +hqm_log_create_ldb_pool_args(struct hqm_hw *hw, + u32 domain_id, + struct hqm_create_ldb_pool_args *args, + bool vf_request, + unsigned int vf_id) +{ + HQM_BASE_INFO(hw, "HQM create load-balanced credit pool arguments:\n"); + if (vf_request) + HQM_BASE_INFO(hw, "(Request from VF %d)\n", vf_id); + HQM_BASE_INFO(hw, "\tDomain ID: %d\n", domain_id); + HQM_BASE_INFO(hw, "\tNumber of LDB credits: %d\n", + args->num_ldb_credits); +} + +/** + * hqm_hw_create_ldb_pool() - Allocate and initialize an HQM credit pool. + * @hw: Contains the current state of the HQM hardware. + * @args: User-provided arguments. + * @resp: Response to user. + * + * Return: returns < 0 on error, 0 otherwise. If the driver is unable to + * satisfy a request, resp->status will be set accordingly. + */ +int hqm_hw_create_ldb_pool(struct hqm_hw *hw, + u32 domain_id, + struct hqm_create_ldb_pool_args *args, + struct hqm_cmd_response *resp, + bool vf_request, + unsigned int vf_id) +{ + struct hqm_credit_pool *pool; + struct hqm_domain *domain; + + hqm_log_create_ldb_pool_args(hw, domain_id, args, vf_request, vf_id); + + /* Verify that hardware resources are available before attempting to + * satisfy the request. This simplifies the error unwinding code. + */ + if (hqm_verify_create_ldb_pool_args(hw, + domain_id, + args, + resp, + vf_request, + vf_id)) + return -EINVAL; + + domain = hqm_get_domain_from_id(hw, domain_id, vf_request, vf_id); + if (!domain) { + HQM_BASE_ERR(hw, + "[%s():%d] Internal error: domain not found\n", + __func__, __LINE__); + return -EFAULT; + } + + pool = HQM_DOM_LIST_HEAD(domain->avail_ldb_credit_pools, typeof(*pool)); + + /* Verification should catch this. */ + if (!pool) { + HQM_BASE_ERR(hw, + "[%s():%d] Internal error: no available ldb credit pools\n", + __func__, __LINE__); + return -EFAULT; + } + + hqm_configure_ldb_credit_pool(hw, domain, args, pool); + + /* Configuration succeeded, so move the resource from the 'avail' to + * the 'used' list. + */ + hqm_list_del(&domain->avail_ldb_credit_pools, &pool->domain_list); + + hqm_list_add(&domain->used_ldb_credit_pools, &pool->domain_list); + + resp->status = 0; + resp->id = (vf_request) ? pool->id.virt_id : pool->id.phys_id; + + return 0; +} + +static void +hqm_log_create_dir_pool_args(struct hqm_hw *hw, + u32 domain_id, + struct hqm_create_dir_pool_args *args, + bool vf_request, + unsigned int vf_id) +{ + HQM_BASE_INFO(hw, "HQM create directed credit pool arguments:\n"); + if (vf_request) + HQM_BASE_INFO(hw, "(Request from VF %d)\n", vf_id); + HQM_BASE_INFO(hw, "\tDomain ID: %d\n", domain_id); + HQM_BASE_INFO(hw, "\tNumber of DIR credits: %d\n", + args->num_dir_credits); +} + +/** + * hqm_hw_create_dir_pool() - Allocate and initialize an HQM credit pool. + * @hw: Contains the current state of the HQM hardware. + * @args: User-provided arguments. + * @resp: Response to user. + * + * Return: returns < 0 on error, 0 otherwise. If the driver is unable to + * satisfy a request, resp->status will be set accordingly. + */ +int hqm_hw_create_dir_pool(struct hqm_hw *hw, + u32 domain_id, + struct hqm_create_dir_pool_args *args, + struct hqm_cmd_response *resp, + bool vf_request, + unsigned int vf_id) +{ + struct hqm_credit_pool *pool; + struct hqm_domain *domain; + + hqm_log_create_dir_pool_args(hw, domain_id, args, vf_request, vf_id); + + /* Verify that hardware resources are available before attempting to + * satisfy the request. This simplifies the error unwinding code. + */ + /* At least one available pool */ + if (hqm_verify_create_dir_pool_args(hw, + domain_id, + args, + resp, + vf_request, + vf_id)) + return -EINVAL; + + domain = hqm_get_domain_from_id(hw, domain_id, vf_request, vf_id); + if (!domain) { + HQM_BASE_ERR(hw, + "[%s():%d] Internal error: domain not found\n", + __func__, __LINE__); + return -EFAULT; + } + + pool = HQM_DOM_LIST_HEAD(domain->avail_dir_credit_pools, typeof(*pool)); + + /* Verification should catch this. */ + if (!pool) { + HQM_BASE_ERR(hw, + "[%s():%d] Internal error: no available dir credit pools\n", + __func__, __LINE__); + return -EFAULT; + } + + hqm_configure_dir_credit_pool(hw, domain, args, pool); + + /* Configuration succeeded, so move the resource from the 'avail' to + * the 'used' list. + */ + hqm_list_del(&domain->avail_dir_credit_pools, &pool->domain_list); + + hqm_list_add(&domain->used_dir_credit_pools, &pool->domain_list); + + resp->status = 0; + resp->id = (vf_request) ? pool->id.virt_id : pool->id.phys_id; + + return 0; +} + +static void +hqm_log_create_ldb_queue_args(struct hqm_hw *hw, + u32 domain_id, + struct hqm_create_ldb_queue_args *args, + bool vf_request, + unsigned int vf_id) +{ + HQM_BASE_INFO(hw, "HQM create load-balanced queue arguments:\n"); + if (vf_request) + HQM_BASE_INFO(hw, "(Request from VF %d)\n", vf_id); + HQM_BASE_INFO(hw, "\tDomain ID: %d\n", + domain_id); + HQM_BASE_INFO(hw, "\tNumber of sequence numbers: %d\n", + args->num_sequence_numbers); + HQM_BASE_INFO(hw, "\tNumber of QID inflights: %d\n", + args->num_qid_inflights); + HQM_BASE_INFO(hw, "\tNumber of ATM inflights: %d\n", + args->num_atomic_inflights); +} + +/** + * hqm_hw_create_ldb_queue() - Allocate and initialize an HQM LDB queue. + * @hw: Contains the current state of the HQM hardware. + * @args: User-provided arguments. + * @resp: Response to user. + * + * Return: returns < 0 on error, 0 otherwise. If the driver is unable to + * satisfy a request, resp->status will be set accordingly. + */ +int hqm_hw_create_ldb_queue(struct hqm_hw *hw, + u32 domain_id, + struct hqm_create_ldb_queue_args *args, + struct hqm_cmd_response *resp, + bool vf_request, + unsigned int vf_id) +{ + struct hqm_ldb_queue *queue; + struct hqm_domain *domain; + int ret; + + hqm_log_create_ldb_queue_args(hw, domain_id, args, vf_request, vf_id); + + /* Verify that hardware resources are available before attempting to + * satisfy the request. This simplifies the error unwinding code. + */ + /* At least one available queue */ + if (hqm_verify_create_ldb_queue_args(hw, + domain_id, + args, + resp, + vf_request, + vf_id)) + return -EINVAL; + + domain = hqm_get_domain_from_id(hw, domain_id, vf_request, vf_id); + if (!domain) { + HQM_BASE_ERR(hw, + "[%s():%d] Internal error: domain not found\n", + __func__, __LINE__); + return -EFAULT; + } + + queue = HQM_DOM_LIST_HEAD(domain->avail_ldb_queues, typeof(*queue)); + + /* Verification should catch this. */ + if (!queue) { + HQM_BASE_ERR(hw, + "[%s():%d] Internal error: no available ldb queues\n", + __func__, __LINE__); + return -EFAULT; + } + + ret = hqm_ldb_queue_attach_resources(hw, domain, queue, args); + if (ret < 0) { + HQM_BASE_ERR(hw, + "[%s():%d] Internal error: failed to attach the ldb queue resources\n", + __func__, __LINE__); + return ret; + } + + hqm_configure_ldb_queue(hw, domain, queue, args, vf_request, vf_id); + + queue->num_mappings = 0; + + queue->configured = true; + + /* Configuration succeeded, so move the resource from the 'avail' to + * the 'used' list. + */ + hqm_list_del(&domain->avail_ldb_queues, &queue->domain_list); + + hqm_list_add(&domain->used_ldb_queues, &queue->domain_list); + + resp->status = 0; + resp->id = (vf_request) ? queue->id.virt_id : queue->id.phys_id; + + return 0; +} + +static void +hqm_log_create_dir_queue_args(struct hqm_hw *hw, + u32 domain_id, + struct hqm_create_dir_queue_args *args, + bool vf_request, + unsigned int vf_id) +{ + HQM_BASE_INFO(hw, "HQM create directed queue arguments:\n"); + if (vf_request) + HQM_BASE_INFO(hw, "(Request from VF %d)\n", vf_id); + HQM_BASE_INFO(hw, "\tDomain ID: %d\n", domain_id); + HQM_BASE_INFO(hw, "\tPort ID: %d\n", args->port_id); +} + +/** + * hqm_hw_create_dir_queue() - Allocate and initialize an HQM DIR queue. + * @hw: Contains the current state of the HQM hardware. + * @args: User-provided arguments. + * @resp: Response to user. + * + * Return: returns < 0 on error, 0 otherwise. If the driver is unable to + * satisfy a request, resp->status will be set accordingly. + */ +int hqm_hw_create_dir_queue(struct hqm_hw *hw, + u32 domain_id, + struct hqm_create_dir_queue_args *args, + struct hqm_cmd_response *resp, + bool vf_request, + unsigned int vf_id) +{ + struct hqm_dir_pq_pair *queue; + struct hqm_domain *domain; + + hqm_log_create_dir_queue_args(hw, domain_id, args, vf_request, vf_id); + + /* Verify that hardware resources are available before attempting to + * satisfy the request. This simplifies the error unwinding code. + */ + if (hqm_verify_create_dir_queue_args(hw, + domain_id, + args, + resp, + vf_request, + vf_id)) + return -EINVAL; + + domain = hqm_get_domain_from_id(hw, domain_id, vf_request, vf_id); + if (!domain) { + HQM_BASE_ERR(hw, + "[%s():%d] Internal error: domain not found\n", + __func__, __LINE__); + return -EFAULT; + } + + if (args->port_id != -1) + queue = hqm_get_domain_used_dir_pq(args->port_id, + vf_request, + domain); + else + queue = HQM_DOM_LIST_HEAD(domain->avail_dir_pq_pairs, + typeof(*queue)); + + /* Verification should catch this. */ + if (!queue) { + HQM_BASE_ERR(hw, + "[%s():%d] Internal error: no available dir queues\n", + __func__, __LINE__); + return -EFAULT; + } + + hqm_configure_dir_queue(hw, domain, queue, vf_request, vf_id); + + /* Configuration succeeded, so move the resource from the 'avail' to + * the 'used' list (if it's not already there). + */ + if (args->port_id == -1) { + hqm_list_del(&domain->avail_dir_pq_pairs, &queue->domain_list); + + hqm_list_add(&domain->used_dir_pq_pairs, &queue->domain_list); + } + + resp->status = 0; + + resp->id = (vf_request) ? queue->id.virt_id : queue->id.phys_id; + + return 0; +} + +static void hqm_log_create_ldb_port_args(struct hqm_hw *hw, + u32 domain_id, + uintptr_t pop_count_dma_base, + uintptr_t cq_dma_base, + struct hqm_create_ldb_port_args *args, + bool vf_request, + unsigned int vf_id) +{ + HQM_BASE_INFO(hw, "HQM create load-balanced port arguments:\n"); + if (vf_request) + HQM_BASE_INFO(hw, "(Request from VF %d)\n", vf_id); + HQM_BASE_INFO(hw, "\tDomain ID: %d\n", + domain_id); + HQM_BASE_INFO(hw, "\tLDB credit pool ID: %d\n", + args->ldb_credit_pool_id); + HQM_BASE_INFO(hw, "\tLDB credit high watermark: %d\n", + args->ldb_credit_high_watermark); + HQM_BASE_INFO(hw, "\tLDB credit low watermark: %d\n", + args->ldb_credit_low_watermark); + HQM_BASE_INFO(hw, "\tLDB credit quantum: %d\n", + args->ldb_credit_quantum); + HQM_BASE_INFO(hw, "\tDIR credit pool ID: %d\n", + args->dir_credit_pool_id); + HQM_BASE_INFO(hw, "\tDIR credit high watermark: %d\n", + args->dir_credit_high_watermark); + HQM_BASE_INFO(hw, "\tDIR credit low watermark: %d\n", + args->dir_credit_low_watermark); + HQM_BASE_INFO(hw, "\tDIR credit quantum: %d\n", + args->dir_credit_quantum); + HQM_BASE_INFO(hw, "\tpop_count_address: 0x%lx\n", + pop_count_dma_base); + HQM_BASE_INFO(hw, "\tCQ depth: %d\n", + args->cq_depth); + HQM_BASE_INFO(hw, "\tCQ hist list size: %d\n", + args->cq_history_list_size); + HQM_BASE_INFO(hw, "\tCQ base address: 0x%lx\n", + cq_dma_base); +} + +/** + * hqm_hw_create_ldb_port() - Allocate and initialize a load-balanced port and + * its resources. + * @hw: Contains the current state of the HQM hardware. + * @args: User-provided arguments. + * @resp: Response to user. + * + * Return: returns < 0 on error, 0 otherwise. If the driver is unable to + * satisfy a request, resp->status will be set accordingly. + */ +int hqm_hw_create_ldb_port(struct hqm_hw *hw, + u32 domain_id, + struct hqm_create_ldb_port_args *args, + uintptr_t pop_count_dma_base, + uintptr_t cq_dma_base, + struct hqm_cmd_response *resp, + bool vf_request, + unsigned int vf_id) +{ + struct hqm_ldb_port *port; + struct hqm_domain *domain; + int ret; + + hqm_log_create_ldb_port_args(hw, + domain_id, + pop_count_dma_base, + cq_dma_base, + args, + vf_request, + vf_id); + + /* Verify that hardware resources are available before attempting to + * satisfy the request. This simplifies the error unwinding code. + */ + if (hqm_verify_create_ldb_port_args(hw, + domain_id, + pop_count_dma_base, + cq_dma_base, + args, + resp, + vf_request, + vf_id)) + return -EINVAL; + + domain = hqm_get_domain_from_id(hw, domain_id, vf_request, vf_id); + if (!domain) { + HQM_BASE_ERR(hw, + "[%s():%d] Internal error: domain not found\n", + __func__, __LINE__); + return -EFAULT; + } + + port = HQM_DOM_LIST_HEAD(domain->avail_ldb_ports, typeof(*port)); + + /* Verification should catch this. */ + if (!port) { + HQM_BASE_ERR(hw, + "[%s():%d] Internal error: no available ldb ports\n", + __func__, __LINE__); + return -EFAULT; + } + + if (port->configured) { + HQM_BASE_ERR(hw, + "[%s()] Internal error: avail_ldb_ports contains configured ports.\n", + __func__); + return -EFAULT; + } + + ret = hqm_configure_ldb_port(hw, + domain, + port, + pop_count_dma_base, + cq_dma_base, + args, + vf_request, + vf_id); + if (ret < 0) + return ret; + + /* Configuration succeeded, so move the resource from the 'avail' to + * the 'used' list. + */ + hqm_list_del(&domain->avail_ldb_ports, &port->domain_list); + + hqm_list_add(&domain->used_ldb_ports, &port->domain_list); + + resp->status = 0; + resp->id = (vf_request) ? port->id.virt_id : port->id.phys_id; + + return 0; +} + +static void hqm_log_create_dir_port_args(struct hqm_hw *hw, + u32 domain_id, + uintptr_t pop_count_dma_base, + uintptr_t cq_dma_base, + struct hqm_create_dir_port_args *args, + bool vf_request, + unsigned int vf_id) +{ + HQM_BASE_INFO(hw, "HQM create directed port arguments:\n"); + if (vf_request) + HQM_BASE_INFO(hw, "(Request from VF %d)\n", vf_id); + HQM_BASE_INFO(hw, "\tDomain ID: %d\n", + domain_id); + HQM_BASE_INFO(hw, "\tLDB credit pool ID: %d\n", + args->ldb_credit_pool_id); + HQM_BASE_INFO(hw, "\tLDB credit high watermark: %d\n", + args->ldb_credit_high_watermark); + HQM_BASE_INFO(hw, "\tLDB credit low watermark: %d\n", + args->ldb_credit_low_watermark); + HQM_BASE_INFO(hw, "\tLDB credit quantum: %d\n", + args->ldb_credit_quantum); + HQM_BASE_INFO(hw, "\tDIR credit pool ID: %d\n", + args->dir_credit_pool_id); + HQM_BASE_INFO(hw, "\tDIR credit high watermark: %d\n", + args->dir_credit_high_watermark); + HQM_BASE_INFO(hw, "\tDIR credit low watermark: %d\n", + args->dir_credit_low_watermark); + HQM_BASE_INFO(hw, "\tDIR credit quantum: %d\n", + args->dir_credit_quantum); + HQM_BASE_INFO(hw, "\tpop_count_address: 0x%lx\n", + pop_count_dma_base); + HQM_BASE_INFO(hw, "\tCQ depth: %d\n", + args->cq_depth); + HQM_BASE_INFO(hw, "\tCQ base address: 0x%lx\n", + cq_dma_base); +} + +/** + * hqm_hw_create_dir_port() - Allocate and initialize an HQM directed port and + * queue. The port/queue pair have the same ID and name. + * @hw: Contains the current state of the HQM hardware. + * @args: User-provided arguments. + * @resp: Response to user. + * + * Return: returns < 0 on error, 0 otherwise. If the driver is unable to + * satisfy a request, resp->status will be set accordingly. + */ +int hqm_hw_create_dir_port(struct hqm_hw *hw, + u32 domain_id, + struct hqm_create_dir_port_args *args, + uintptr_t pop_count_dma_base, + uintptr_t cq_dma_base, + struct hqm_cmd_response *resp, + bool vf_request, + unsigned int vf_id) +{ + struct hqm_dir_pq_pair *port; + struct hqm_domain *domain; + + hqm_log_create_dir_port_args(hw, + domain_id, + pop_count_dma_base, + cq_dma_base, + args, + vf_request, + vf_id); + + /* Verify that hardware resources are available before attempting to + * satisfy the request. This simplifies the error unwinding code. + */ + if (hqm_verify_create_dir_port_args(hw, + domain_id, + pop_count_dma_base, + cq_dma_base, + args, + resp, + vf_request, + vf_id)) + return -EINVAL; + + domain = hqm_get_domain_from_id(hw, domain_id, vf_request, vf_id); + if (!domain) { + HQM_BASE_ERR(hw, + "[%s():%d] Internal error: domain not found\n", + __func__, __LINE__); + return -EFAULT; + } + + if (args->queue_id != -1) + port = hqm_get_domain_used_dir_pq(args->queue_id, + vf_request, + domain); + else + port = HQM_DOM_LIST_HEAD(domain->avail_dir_pq_pairs, + typeof(*port)); + + /* Verification should catch this. */ + if (!port) { + HQM_BASE_ERR(hw, + "[%s():%d] Internal error: no available dir ports\n", + __func__, __LINE__); + return -EFAULT; + } + + hqm_configure_dir_port(hw, + domain, + port, + pop_count_dma_base, + cq_dma_base, + args, + vf_request, + vf_id); + + /* Configuration succeeded, so move the resource from the 'avail' to + * the 'used' list (if it's not already there). + */ + if (args->queue_id == -1) { + hqm_list_del(&domain->avail_dir_pq_pairs, &port->domain_list); + + hqm_list_add(&domain->used_dir_pq_pairs, &port->domain_list); + } + + resp->status = 0; + resp->id = (vf_request) ? port->id.virt_id : port->id.phys_id; + + return 0; +} + +static void hqm_log_start_domain(struct hqm_hw *hw, + u32 domain_id, + bool vf_request, + unsigned int vf_id) +{ + HQM_BASE_INFO(hw, "HQM start domain arguments:\n"); + if (vf_request) + HQM_BASE_INFO(hw, "(Request from VF %d)\n", vf_id); + HQM_BASE_INFO(hw, "\tDomain ID: %d\n", domain_id); +} + +/** + * hqm_hw_start_domain() - Lock the domain configuration + * @hw: Contains the current state of the HQM hardware. + * @args: User-provided arguments. + * @resp: Response to user. + * + * Return: returns < 0 on error, 0 otherwise. If the driver is unable to + * satisfy a request, resp->status will be set accordingly. + */ +int hqm_hw_start_domain(struct hqm_hw *hw, + u32 domain_id, + __attribute((unused)) struct hqm_start_domain_args *arg, + struct hqm_cmd_response *resp, + bool vf_request, + unsigned int vf_id) +{ + struct hqm_list_entry *iter __attribute__((unused)); + struct hqm_dir_pq_pair *dir_queue; + struct hqm_ldb_queue *ldb_queue; + struct hqm_credit_pool *pool; + struct hqm_domain *domain; + + hqm_log_start_domain(hw, domain_id, vf_request, vf_id); + + if (hqm_verify_start_domain_args(hw, + domain_id, + resp, + vf_request, + vf_id)) + return -EINVAL; + + domain = hqm_get_domain_from_id(hw, domain_id, vf_request, vf_id); + if (!domain) { + HQM_BASE_ERR(hw, + "[%s():%d] Internal error: domain not found\n", + __func__, __LINE__); + return -EFAULT; + } + + /* Write the domain's pool credit counts, which have been updated + * during port configuration. The sum of the pool credit count plus + * each producer port's credit count must equal the pool's credit + * allocation *before* traffic is sent. + */ + HQM_DOM_LIST_FOR(domain->used_ldb_credit_pools, pool, iter) + hqm_ldb_pool_write_credit_count_reg(hw, pool->id.phys_id); + + HQM_DOM_LIST_FOR(domain->used_dir_credit_pools, pool, iter) + hqm_dir_pool_write_credit_count_reg(hw, pool->id.phys_id); + + /* Enable load-balanced and directed queue write permissions for the + * queues this domain owns. Without this, the HQM will drop all + * incoming traffic to those queues. + */ + HQM_DOM_LIST_FOR(domain->used_ldb_queues, ldb_queue, iter) { + union hqm_sys_ldb_vasqid_v r0 = { {0} }; + unsigned int offs; + + r0.field.vasqid_v = 1; + + offs = domain->id.phys_id * HQM_MAX_NUM_LDB_QUEUES + + ldb_queue->id.phys_id; + + HQM_CSR_WR(hw, HQM_SYS_LDB_VASQID_V(offs), r0.val); + } + + HQM_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_queue, iter) { + union hqm_sys_dir_vasqid_v r0 = { {0} }; + unsigned int offs; + + r0.field.vasqid_v = 1; + + offs = domain->id.phys_id * HQM_MAX_NUM_DIR_PORTS + + dir_queue->id.phys_id; + + HQM_CSR_WR(hw, HQM_SYS_DIR_VASQID_V(offs), r0.val); + } + + hqm_flush_csr(hw); + + domain->started = true; + + resp->status = 0; + + return 0; +} + +static void hqm_domain_finish_unmap_port_slot(struct hqm_hw *hw, + struct hqm_domain *domain, + struct hqm_ldb_port *port, + int slot) +{ + enum hqm_qid_map_state state; + struct hqm_ldb_queue *queue; + + queue = &hw->rsrcs.ldb_queues[port->qid_map[slot].qid]; + + state = port->qid_map[slot].state; + + /* Update the QID2CQIDX and CQ2QID vectors */ + hqm_ldb_port_unmap_qid(hw, port, queue); + + /* Ensure the QID will not be serviced by this {CQ, slot} by clearing + * the has_work bits + */ + hqm_ldb_port_clear_has_work_bits(hw, port, slot); + + /* Reset the {CQ, slot} to its default state */ + hqm_ldb_port_set_queue_if_status(hw, port, slot); + + /* Re-enable the CQ if it wasn't manually disabled by the user */ + if (port->enabled) + hqm_ldb_port_cq_enable(hw, port); + + /* If there is a mapping that is pending this slot's removal, perform + * the mapping now. + */ + if (state == HQM_QUEUE_UNMAP_IN_PROGRESS_PENDING_MAP) { + struct hqm_ldb_port_qid_map *map; + struct hqm_ldb_queue *map_queue; + u8 prio; + + map = &port->qid_map[slot]; + + map->qid = map->pending_qid; + map->priority = map->pending_priority; + + map_queue = &hw->rsrcs.ldb_queues[map->qid]; + prio = map->priority; + + hqm_ldb_port_map_qid(hw, domain, port, map_queue, prio); + } +} + +static bool hqm_domain_finish_unmap_port(struct hqm_hw *hw, + struct hqm_domain *domain, + struct hqm_ldb_port *port) +{ + union hqm_lsp_cq_ldb_infl_cnt r0; + int i; + + if (port->num_pending_removals == 0) + return false; + + /* The unmap requires all the CQ's outstanding inflights to be + * completed. + */ + r0.val = HQM_CSR_RD(hw, HQM_LSP_CQ_LDB_INFL_CNT(port->id.phys_id)); + if (r0.field.count > 0) + return false; + + for (i = 0; i < HQM_MAX_NUM_QIDS_PER_LDB_CQ; i++) { + struct hqm_ldb_port_qid_map *map; + + map = &port->qid_map[i]; + + if (map->state != HQM_QUEUE_UNMAP_IN_PROGRESS && + map->state != HQM_QUEUE_UNMAP_IN_PROGRESS_PENDING_MAP) + continue; + + hqm_domain_finish_unmap_port_slot(hw, domain, port, i); + } + + return true; +} + +static unsigned int +hqm_domain_finish_unmap_qid_procedures(struct hqm_hw *hw, + struct hqm_domain *domain) +{ + struct hqm_list_entry *iter __attribute__((unused)); + struct hqm_ldb_port *port; + + if (!domain->configured || domain->num_pending_removals == 0) + return 0; + + HQM_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) + hqm_domain_finish_unmap_port(hw, domain, port); + + return domain->num_pending_removals; +} + +unsigned int hqm_finish_unmap_qid_procedures(struct hqm_hw *hw) +{ + int i, num = 0; + + /* Finish queue unmap jobs for any domain that needs it */ + for (i = 0; i < HQM_MAX_NUM_DOMAINS; i++) { + struct hqm_domain *domain = &hw->domains[i]; + + num += hqm_domain_finish_unmap_qid_procedures(hw, domain); + } + + return num; +} + +static void hqm_domain_finish_map_port(struct hqm_hw *hw, + struct hqm_domain *domain, + struct hqm_ldb_port *port) +{ + int i; + + for (i = 0; i < HQM_MAX_NUM_QIDS_PER_LDB_CQ; i++) { + union hqm_lsp_qid_ldb_infl_cnt r0; + struct hqm_ldb_queue *queue; + int qid; + + if (port->qid_map[i].state != HQM_QUEUE_MAP_IN_PROGRESS) + continue; + + qid = port->qid_map[i].qid; + + queue = hqm_get_ldb_queue_from_id(hw, qid, false, 0); + + if (!queue) { + HQM_BASE_ERR(hw, + "[%s()] Internal error: unable to find queue %d\n", + __func__, qid); + continue; + } + + r0.val = HQM_CSR_RD(hw, HQM_LSP_QID_LDB_INFL_CNT(qid)); + + if (r0.field.count) + continue; + + /* Disable the affected CQ, and the CQs already mapped to the + * QID, before reading the QID's inflight count a second time. + * There is an unlikely race in which the QID may schedule one + * more QE after we read an inflight count of 0, and disabling + * the CQs guarantees that the race will not occur after a + * re-read of the inflight count register. + */ + if (port->enabled) + hqm_ldb_port_cq_disable(hw, port); + + hqm_ldb_queue_disable_mapped_cqs(hw, domain, queue); + + r0.val = HQM_CSR_RD(hw, HQM_LSP_QID_LDB_INFL_CNT(qid)); + + if (r0.field.count) { + if (port->enabled) + hqm_ldb_port_cq_enable(hw, port); + + hqm_ldb_queue_enable_mapped_cqs(hw, domain, queue); + + continue; + } + + hqm_ldb_port_finish_map_qid_dynamic(hw, domain, port, queue); + } +} + +static unsigned int +hqm_domain_finish_map_qid_procedures(struct hqm_hw *hw, + struct hqm_domain *domain) +{ + struct hqm_list_entry *iter __attribute__((unused)); + struct hqm_ldb_port *port; + + if (!domain->configured || domain->num_pending_additions == 0) + return 0; + + HQM_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) + hqm_domain_finish_map_port(hw, domain, port); + + return domain->num_pending_additions; +} + +unsigned int hqm_finish_map_qid_procedures(struct hqm_hw *hw) +{ + int i, num = 0; + + /* Finish queue map jobs for any domain that needs it */ + for (i = 0; i < HQM_MAX_NUM_DOMAINS; i++) { + struct hqm_domain *domain = &hw->domains[i]; + + num += hqm_domain_finish_map_qid_procedures(hw, domain); + } + + return num; +} + +static void hqm_log_map_qid(struct hqm_hw *hw, + u32 domain_id, + struct hqm_map_qid_args *args, + bool vf_request, + unsigned int vf_id) +{ + HQM_BASE_INFO(hw, "HQM map QID arguments:\n"); + if (vf_request) + HQM_BASE_INFO(hw, "(Request from VF %d)\n", vf_id); + HQM_BASE_INFO(hw, "\tDomain ID: %d\n", + domain_id); + HQM_BASE_INFO(hw, "\tPort ID: %d\n", + args->port_id); + HQM_BASE_INFO(hw, "\tQueue ID: %d\n", + args->qid); + HQM_BASE_INFO(hw, "\tPriority: %d\n", + args->priority); +} + +int hqm_hw_map_qid(struct hqm_hw *hw, + u32 domain_id, + struct hqm_map_qid_args *args, + struct hqm_cmd_response *resp, + bool vf_request, + unsigned int vf_id) +{ + enum hqm_qid_map_state state; + struct hqm_ldb_queue *queue; + struct hqm_ldb_port *port; + struct hqm_domain *domain; + int ret, i, id; + u8 prio; + + hqm_log_map_qid(hw, domain_id, args, vf_request, vf_id); + + /* Verify that hardware resources are available before attempting to + * satisfy the request. This simplifies the error unwinding code. + */ + if (hqm_verify_map_qid_args(hw, + domain_id, + args, + resp, + vf_request, + vf_id)) + return -EINVAL; + + prio = args->priority; + + domain = hqm_get_domain_from_id(hw, domain_id, vf_request, vf_id); + if (!domain) { + HQM_BASE_ERR(hw, + "[%s():%d] Internal error: domain not found\n", + __func__, __LINE__); + return -EFAULT; + } + + id = args->port_id; + + port = hqm_get_domain_used_ldb_port(id, vf_request, domain); + if (!port) { + HQM_BASE_ERR(hw, + "[%s():%d] Internal error: port not found\n", + __func__, __LINE__); + return -EFAULT; + } + + queue = hqm_get_domain_ldb_queue(args->qid, vf_request, domain); + if (!queue) { + HQM_BASE_ERR(hw, + "[%s():%d] Internal error: queue not found\n", + __func__, __LINE__); + return -EFAULT; + } + + /* If there are any outstanding detach operations for this port, + * attempt to complete them. This may be necessary to free up a QID + * slot for this requested mapping. + */ + if (port->num_pending_removals) + hqm_domain_finish_unmap_port(hw, domain, port); + + ret = hqm_verify_map_qid_slot_available(port, queue, resp); + if (ret) + return ret; + + /* Hardware requires disabling the CQ before mapping QIDs. */ + if (port->enabled) + hqm_ldb_port_cq_disable(hw, port); + + /* If this is only a priority change, don't perform the full QID->CQ + * mapping procedure + */ + state = HQM_QUEUE_MAPPED; + if (hqm_port_find_slot_queue(port, state, queue, &i)) { + if (i >= HQM_MAX_NUM_QIDS_PER_LDB_CQ) { + HQM_BASE_ERR(hw, + "[%s():%d] Internal error: port slot tracking failed\n", + __func__, __LINE__); + return -EFAULT; + } + + if (prio != port->qid_map[i].priority) { + hqm_ldb_port_change_qid_priority(hw, port, i, args); + HQM_BASE_INFO(hw, "HQM map: priority change only\n"); + } + + state = HQM_QUEUE_MAPPED; + ret = hqm_port_slot_state_transition(hw, port, queue, i, state); + if (ret) + return ret; + + goto map_qid_done; + } + + state = HQM_QUEUE_UNMAP_IN_PROGRESS; + if (hqm_port_find_slot_queue(port, state, queue, &i)) { + if (i >= HQM_MAX_NUM_QIDS_PER_LDB_CQ) { + HQM_BASE_ERR(hw, + "[%s():%d] Internal error: port slot tracking failed\n", + __func__, __LINE__); + return -EFAULT; + } + + if (prio != port->qid_map[i].priority) { + hqm_ldb_port_change_qid_priority(hw, port, i, args); + HQM_BASE_INFO(hw, "HQM map: priority change only\n"); + } + + state = HQM_QUEUE_MAPPED; + ret = hqm_port_slot_state_transition(hw, port, queue, i, state); + if (ret) + return ret; + + goto map_qid_done; + } + + /* If this is a priority change on an in-progress mapping, don't + * perform the full QID->CQ mapping procedure. + */ + state = HQM_QUEUE_MAP_IN_PROGRESS; + if (hqm_port_find_slot_queue(port, state, queue, &i)) { + if (i >= HQM_MAX_NUM_QIDS_PER_LDB_CQ) { + HQM_BASE_ERR(hw, + "[%s():%d] Internal error: port slot tracking failed\n", + __func__, __LINE__); + return -EFAULT; + } + + port->qid_map[i].priority = prio; + + HQM_BASE_INFO(hw, "HQM map: priority change only\n"); + + goto map_qid_done; + } + + /* If this is a priority change on a pending mapping, update the + * pending priority + */ + if (hqm_port_find_slot_with_pending_map_queue(port, queue, &i)) { + if (i >= HQM_MAX_NUM_QIDS_PER_LDB_CQ) { + HQM_BASE_ERR(hw, + "[%s():%d] Internal error: port slot tracking failed\n", + __func__, __LINE__); + return -EFAULT; + } + + port->qid_map[i].pending_priority = prio; + + HQM_BASE_INFO(hw, "HQM map: priority change only\n"); + + goto map_qid_done; + } + + /* If all the CQ's slots are in use, then there's an unmap in progress + * (guaranteed by hqm_verify_map_qid_slot_available()), so add this + * mapping to pending_map and return. When the removal is completed for + * the slot's current occupant, this mapping will be performed. + */ + if (!hqm_port_find_slot(port, HQM_QUEUE_UNMAPPED, &i)) { + if (hqm_port_find_slot(port, HQM_QUEUE_UNMAP_IN_PROGRESS, &i)) { + enum hqm_qid_map_state state; + + if (i >= HQM_MAX_NUM_QIDS_PER_LDB_CQ) { + HQM_BASE_ERR(hw, + "[%s():%d] Internal error: port slot tracking failed\n", + __func__, __LINE__); + return -EFAULT; + } + + port->qid_map[i].pending_qid = queue->id.phys_id; + port->qid_map[i].pending_priority = prio; + + state = HQM_QUEUE_UNMAP_IN_PROGRESS_PENDING_MAP; + + ret = hqm_port_slot_state_transition(hw, port, queue, + i, state); + if (ret) + return ret; + + HQM_BASE_INFO(hw, "HQM map: map pending removal\n"); + + goto map_qid_done; + } + } + + /* If the domain has started, a special "dynamic" CQ->queue mapping + * procedure is required in order to safely update the CQ<->QID tables. + * The "static" procedure cannot be used when traffic is flowing, + * because the CQ<->QID tables cannot be updated atomically and the + * scheduler won't see the new mapping unless the queue's if_status + * changes, which isn't guaranteed. + */ + ret = hqm_ldb_port_map_qid(hw, domain, port, queue, prio); + + /* If ret is less than zero, it's due to an internal error */ + if (ret < 0) + return ret; + +map_qid_done: + if (port->enabled) + hqm_ldb_port_cq_enable(hw, port); + + resp->status = 0; + + return 0; +} + +static void hqm_log_unmap_qid(struct hqm_hw *hw, + u32 domain_id, + struct hqm_unmap_qid_args *args, + bool vf_request, + unsigned int vf_id) +{ + HQM_BASE_INFO(hw, "HQM unmap QID arguments:\n"); + if (vf_request) + HQM_BASE_INFO(hw, "(Request from VF %d)\n", vf_id); + HQM_BASE_INFO(hw, "\tDomain ID: %d\n", + domain_id); + HQM_BASE_INFO(hw, "\tPort ID: %d\n", + args->port_id); + HQM_BASE_INFO(hw, "\tQueue ID: %d\n", + args->qid); + if (args->qid < HQM_MAX_NUM_LDB_QUEUES) + HQM_BASE_INFO(hw, "\tQueue's num mappings: %d\n", + hw->rsrcs.ldb_queues[args->qid].num_mappings); +} + +int hqm_hw_unmap_qid(struct hqm_hw *hw, + u32 domain_id, + struct hqm_unmap_qid_args *args, + struct hqm_cmd_response *resp, + bool vf_request, + unsigned int vf_id) +{ + enum hqm_qid_map_state state; + struct hqm_ldb_queue *queue; + struct hqm_ldb_port *port; + struct hqm_domain *domain; + bool unmap_complete; + int i, ret, id; + + hqm_log_unmap_qid(hw, domain_id, args, vf_request, vf_id); + + /* Verify that hardware resources are available before attempting to + * satisfy the request. This simplifies the error unwinding code. + */ + if (hqm_verify_unmap_qid_args(hw, + domain_id, + args, + resp, + vf_request, + vf_id)) + return -EINVAL; + + domain = hqm_get_domain_from_id(hw, domain_id, vf_request, vf_id); + if (!domain) { + HQM_BASE_ERR(hw, + "[%s():%d] Internal error: domain not found\n", + __func__, __LINE__); + return -EFAULT; + } + + id = args->port_id; + + port = hqm_get_domain_used_ldb_port(id, vf_request, domain); + if (!port) { + HQM_BASE_ERR(hw, + "[%s():%d] Internal error: port not found\n", + __func__, __LINE__); + return -EFAULT; + } + + queue = hqm_get_domain_ldb_queue(args->qid, vf_request, domain); + if (!queue) { + HQM_BASE_ERR(hw, + "[%s():%d] Internal error: queue not found\n", + __func__, __LINE__); + return -EFAULT; + } + + /* If the queue hasn't been mapped yet, we need to update the slot's + * state and re-enable the queue's inflights. + */ + state = HQM_QUEUE_MAP_IN_PROGRESS; + if (hqm_port_find_slot_queue(port, state, queue, &i)) { + if (i >= HQM_MAX_NUM_QIDS_PER_LDB_CQ) { + HQM_BASE_ERR(hw, + "[%s():%d] Internal error: port slot tracking failed\n", + __func__, __LINE__); + return -EFAULT; + } + + /* Since the in-progress map was aborted, re-enable the QID's + * inflights. + */ + if (queue->num_pending_additions == 0) + hqm_ldb_queue_set_inflight_limit(hw, queue); + + state = HQM_QUEUE_UNMAPPED; + ret = hqm_port_slot_state_transition(hw, port, queue, i, state); + if (ret) + return ret; + + goto unmap_qid_done; + } + + /* If the queue mapping is on hold pending an unmap, we simply need to + * update the slot's state. + */ + if (hqm_port_find_slot_with_pending_map_queue(port, queue, &i)) { + if (i >= HQM_MAX_NUM_QIDS_PER_LDB_CQ) { + HQM_BASE_ERR(hw, + "[%s():%d] Internal error: port slot tracking failed\n", + __func__, __LINE__); + return -EFAULT; + } + + state = HQM_QUEUE_UNMAP_IN_PROGRESS; + ret = hqm_port_slot_state_transition(hw, port, queue, i, state); + if (ret) + return ret; + + goto unmap_qid_done; + } + + state = HQM_QUEUE_MAPPED; + if (!hqm_port_find_slot_queue(port, state, queue, &i)) { + HQM_BASE_ERR(hw, + "[%s()] Internal error: no available CQ slots\n", + __func__); + return -EFAULT; + } + + if (i >= HQM_MAX_NUM_QIDS_PER_LDB_CQ) { + HQM_BASE_ERR(hw, + "[%s():%d] Internal error: port slot tracking failed\n", + __func__, __LINE__); + return -EFAULT; + } + + /* QID->CQ mapping removal is an asychronous procedure. It requires + * stopping the HQM from scheduling this CQ, draining all inflights + * from the CQ, then unmapping the queue from the CQ. This function + * simply marks the port as needing the queue unmapped, and (if + * necessary) starts the unmapping worker thread. + */ + hqm_ldb_port_cq_disable(hw, port); + + state = HQM_QUEUE_UNMAP_IN_PROGRESS; + ret = hqm_port_slot_state_transition(hw, port, queue, i, state); + if (ret) + return ret; + + /* Attempt to finish the unmapping now, in case the port has no + * outstanding inflights. If that's not the case, this will fail and + * the unmapping will be completed at a later time. + */ + unmap_complete = hqm_domain_finish_unmap_port(hw, domain, port); + + /* If the unmapping couldn't complete immediately, launch the worker + * thread (if it isn't already launched) to finish it later. + */ + if (!unmap_complete && !os_worker_active(hw)) + os_schedule_work(hw); + +unmap_qid_done: + resp->status = 0; + + return 0; +} + +static void hqm_log_enable_port(struct hqm_hw *hw, + u32 domain_id, + u32 port_id, + bool vf_request, + unsigned int vf_id) +{ + HQM_BASE_INFO(hw, "HQM enable port arguments:\n"); + if (vf_request) + HQM_BASE_INFO(hw, "(Request from VF %d)\n", vf_id); + HQM_BASE_INFO(hw, "\tDomain ID: %d\n", + domain_id); + HQM_BASE_INFO(hw, "\tPort ID: %d\n", + port_id); +} + +int hqm_hw_enable_ldb_port(struct hqm_hw *hw, + u32 domain_id, + struct hqm_enable_ldb_port_args *args, + struct hqm_cmd_response *resp, + bool vf_request, + unsigned int vf_id) +{ + struct hqm_ldb_port *port; + struct hqm_domain *domain; + int id; + + hqm_log_enable_port(hw, domain_id, args->port_id, vf_request, vf_id); + + /* Verify that hardware resources are available before attempting to + * satisfy the request. This simplifies the error unwinding code. + */ + if (hqm_verify_enable_ldb_port_args(hw, + domain_id, + args, + resp, + vf_request, + vf_id)) + return -EINVAL; + + domain = hqm_get_domain_from_id(hw, domain_id, vf_request, vf_id); + if (!domain) { + HQM_BASE_ERR(hw, + "[%s():%d] Internal error: domain not found\n", + __func__, __LINE__); + return -EFAULT; + } + + id = args->port_id; + + port = hqm_get_domain_used_ldb_port(id, vf_request, domain); + if (!port) { + HQM_BASE_ERR(hw, + "[%s():%d] Internal error: port not found\n", + __func__, __LINE__); + return -EFAULT; + } + + /* Hardware requires disabling the CQ before unmapping QIDs. */ + if (!port->enabled) { + hqm_ldb_port_cq_enable(hw, port); + port->enabled = true; + + hw->pf.num_enabled_ldb_ports++; + hqm_update_ldb_arb_threshold(hw); + } + + resp->status = 0; + + return 0; +} + +static void hqm_log_disable_port(struct hqm_hw *hw, + u32 domain_id, + u32 port_id, + bool vf_request, + unsigned int vf_id) +{ + HQM_BASE_INFO(hw, "HQM disable port arguments:\n"); + if (vf_request) + HQM_BASE_INFO(hw, "(Request from VF %d)\n", vf_id); + HQM_BASE_INFO(hw, "\tDomain ID: %d\n", + domain_id); + HQM_BASE_INFO(hw, "\tPort ID: %d\n", + port_id); +} + +int hqm_hw_disable_ldb_port(struct hqm_hw *hw, + u32 domain_id, + struct hqm_disable_ldb_port_args *args, + struct hqm_cmd_response *resp, + bool vf_request, + unsigned int vf_id) +{ + struct hqm_ldb_port *port; + struct hqm_domain *domain; + int id; + + hqm_log_disable_port(hw, domain_id, args->port_id, vf_request, vf_id); + + /* Verify that hardware resources are available before attempting to + * satisfy the request. This simplifies the error unwinding code. + */ + if (hqm_verify_disable_ldb_port_args(hw, + domain_id, + args, + resp, + vf_request, + vf_id)) + return -EINVAL; + + domain = hqm_get_domain_from_id(hw, domain_id, vf_request, vf_id); + if (!domain) { + HQM_BASE_ERR(hw, + "[%s():%d] Internal error: domain not found\n", + __func__, __LINE__); + return -EFAULT; + } + + id = args->port_id; + + port = hqm_get_domain_used_ldb_port(id, vf_request, domain); + if (!port) { + HQM_BASE_ERR(hw, + "[%s():%d] Internal error: port not found\n", + __func__, __LINE__); + return -EFAULT; + } + + /* Hardware requires disabling the CQ before unmapping QIDs. */ + if (port->enabled) { + hqm_ldb_port_cq_disable(hw, port); + port->enabled = false; + + hw->pf.num_enabled_ldb_ports--; + hqm_update_ldb_arb_threshold(hw); + } + + resp->status = 0; + + return 0; +} + +int hqm_hw_enable_dir_port(struct hqm_hw *hw, + u32 domain_id, + struct hqm_enable_dir_port_args *args, + struct hqm_cmd_response *resp, + bool vf_request, + unsigned int vf_id) +{ + struct hqm_dir_pq_pair *port; + struct hqm_domain *domain; + int id; + + hqm_log_enable_port(hw, domain_id, args->port_id, vf_request, vf_id); + + /* Verify that hardware resources are available before attempting to + * satisfy the request. This simplifies the error unwinding code. + */ + if (hqm_verify_enable_dir_port_args(hw, + domain_id, + args, + resp, + vf_request, + vf_id)) + return -EINVAL; + + domain = hqm_get_domain_from_id(hw, domain_id, vf_request, vf_id); + if (!domain) { + HQM_BASE_ERR(hw, + "[%s():%d] Internal error: domain not found\n", + __func__, __LINE__); + return -EFAULT; + } + + id = args->port_id; + + port = hqm_get_domain_used_dir_pq(id, vf_request, domain); + if (!port) { + HQM_BASE_ERR(hw, + "[%s():%d] Internal error: port not found\n", + __func__, __LINE__); + return -EFAULT; + } + + /* Hardware requires disabling the CQ before unmapping QIDs. */ + if (!port->enabled) { + hqm_dir_port_cq_enable(hw, port); + port->enabled = true; + } + + resp->status = 0; + + return 0; +} + +int hqm_hw_disable_dir_port(struct hqm_hw *hw, + u32 domain_id, + struct hqm_disable_dir_port_args *args, + struct hqm_cmd_response *resp, + bool vf_request, + unsigned int vf_id) +{ + struct hqm_dir_pq_pair *port; + struct hqm_domain *domain; + int id; + + hqm_log_disable_port(hw, domain_id, args->port_id, vf_request, vf_id); + + /* Verify that hardware resources are available before attempting to + * satisfy the request. This simplifies the error unwinding code. + */ + if (hqm_verify_disable_dir_port_args(hw, + domain_id, + args, + resp, + vf_request, + vf_id)) + return -EINVAL; + + domain = hqm_get_domain_from_id(hw, domain_id, vf_request, vf_id); + if (!domain) { + HQM_BASE_ERR(hw, + "[%s():%d] Internal error: domain not found\n", + __func__, __LINE__); + return -EFAULT; + } + + id = args->port_id; + + port = hqm_get_domain_used_dir_pq(id, vf_request, domain); + if (!port) { + HQM_BASE_ERR(hw, + "[%s():%d] Internal error: port not found\n", + __func__, __LINE__); + return -EFAULT; + } + + /* Hardware requires disabling the CQ before unmapping QIDs. */ + if (port->enabled) { + hqm_dir_port_cq_disable(hw, port); + port->enabled = false; + } + + resp->status = 0; + + return 0; +} + +int hqm_notify_vf(struct hqm_hw *hw, + unsigned int vf_id, + enum hqm_mbox_vf_notification_type notification) +{ + struct hqm_mbox_vf_notification_cmd_req req; + int retry_cnt; + + req.hdr.type = HQM_MBOX_VF_CMD_NOTIFICATION; + req.notification = notification; + + hqm_pf_write_vf_mbox_req(hw, vf_id, &req, sizeof(req)); + + hqm_send_async_pf_to_vf_msg(hw, vf_id); + + /* Timeout after 1 second of inactivity */ + retry_cnt = 0; + while (!hqm_pf_to_vf_complete(hw, vf_id)) { + os_msleep(1); + if (++retry_cnt >= 1000) { + HQM_BASE_ERR(hw, + "PF driver timed out waiting for mbox response\n"); + return -1; + } + } + + /* No response data expected for notifications. */ + + return 0; +} + +int hqm_vf_in_use(struct hqm_hw *hw, unsigned int vf_id) +{ + struct hqm_mbox_vf_in_use_cmd_resp resp; + struct hqm_mbox_vf_in_use_cmd_req req; + int retry_cnt; + + req.hdr.type = HQM_MBOX_VF_CMD_IN_USE; + + hqm_pf_write_vf_mbox_req(hw, vf_id, &req, sizeof(req)); + + hqm_send_async_pf_to_vf_msg(hw, vf_id); + + /* Timeout after 1 second of inactivity */ + retry_cnt = 0; + while (!hqm_pf_to_vf_complete(hw, vf_id)) { + os_msleep(1); + if (++retry_cnt >= 1000) { + HQM_BASE_ERR(hw, + "PF driver timed out waiting for mbox response\n"); + return -1; + } + } + + hqm_pf_read_vf_mbox_resp(hw, vf_id, &resp, sizeof(resp)); + + if (resp.hdr.status != HQM_MBOX_ST_SUCCESS) { + HQM_BASE_ERR(hw, + "[%s()]: failed with mailbox error: %s\n", + __func__, + HQM_MBOX_ST_STRING(&resp)); + + return -1; + } + + return resp.in_use; +} + +static int hqm_notify_vf_alarm(struct hqm_hw *hw, + unsigned int vf_id, + u32 domain_id, + u32 alert_id, + u32 aux_alert_data) +{ + struct hqm_mbox_vf_alert_cmd_req req; + int retry_cnt; + + req.hdr.type = HQM_MBOX_VF_CMD_DOMAIN_ALERT; + req.domain_id = domain_id; + req.alert_id = alert_id; + req.aux_alert_data = aux_alert_data; + + hqm_pf_write_vf_mbox_req(hw, vf_id, &req, sizeof(req)); + + hqm_send_async_pf_to_vf_msg(hw, vf_id); + + /* Timeout after 1 second of inactivity */ + retry_cnt = 0; + while (!hqm_pf_to_vf_complete(hw, vf_id)) { + os_msleep(1); + if (++retry_cnt >= 1000) { + HQM_BASE_ERR(hw, + "PF driver timed out waiting for mbox response\n"); + return -1; + } + } + + /* No response data expected for alarm notifications. */ + + return 0; +} + +void hqm_set_msix_mode(struct hqm_hw *hw, int mode) +{ + union hqm_sys_msix_mode r0 = { {0} }; + + r0.field.mode = mode; + + HQM_CSR_WR(hw, HQM_SYS_MSIX_MODE, r0.val); +} + +int hqm_configure_ldb_cq_interrupt(struct hqm_hw *hw, + int port_id, + int vector, + int mode, + unsigned int vf, + unsigned int owner_vf, + u16 threshold) +{ + union hqm_chp_ldb_cq_int_depth_thrsh r0 = { {0} }; + union hqm_chp_ldb_cq_int_enb r1 = { {0} }; + union hqm_sys_ldb_cq_isr r2 = { {0} }; + struct hqm_ldb_port *port; + bool vf_request; + + vf_request = (mode == HQM_CQ_ISR_MODE_MSI); + + port = hqm_get_ldb_port_from_id(hw, port_id, vf_request, vf); + if (!port) { + HQM_BASE_ERR(hw, + "[%s()]: Internal error: failed to enable LDB CQ int\n\tport_id: %u, vf_req: %u, vf: %u\n", + __func__, port_id, vf_request, vf); + return -EINVAL; + } + + /* Trigger the interrupt when threshold or more QEs arrive in the CQ */ + r0.field.depth_threshold = threshold - 1; + + HQM_CSR_WR(hw, + HQM_CHP_LDB_CQ_INT_DEPTH_THRSH(port->id.phys_id), + r0.val); + + r1.field.en_depth = 1; + + HQM_CSR_WR(hw, HQM_CHP_LDB_CQ_INT_ENB(port->id.phys_id), r1.val); + + r2.field.vector = vector; + r2.field.vf = owner_vf; + r2.field.en_code = mode; + + HQM_CSR_WR(hw, HQM_SYS_LDB_CQ_ISR(port->id.phys_id), r2.val); + + return 0; +} + +int hqm_configure_dir_cq_interrupt(struct hqm_hw *hw, + int port_id, + int vector, + int mode, + unsigned int vf, + unsigned int owner_vf, + u16 threshold) +{ + union hqm_chp_dir_cq_int_depth_thrsh r0 = { {0} }; + union hqm_chp_dir_cq_int_enb r1 = { {0} }; + union hqm_sys_dir_cq_isr r2 = { {0} }; + struct hqm_dir_pq_pair *port; + bool vf_request; + + vf_request = (mode == HQM_CQ_ISR_MODE_MSI); + + port = hqm_get_dir_pq_from_id(hw, port_id, vf_request, vf); + if (!port) { + HQM_BASE_ERR(hw, + "[%s()]: Internal error: failed to enable DIR CQ int\n\tport_id: %u, vf_req: %u, vf: %u\n", + __func__, port_id, vf_request, vf); + return -EINVAL; + } + + /* Trigger the interrupt when threshold or more QEs arrive in the CQ */ + r0.field.depth_threshold = threshold - 1; + + HQM_CSR_WR(hw, + HQM_CHP_DIR_CQ_INT_DEPTH_THRSH(port->id.phys_id), + r0.val); + + r1.field.en_depth = 1; + + HQM_CSR_WR(hw, HQM_CHP_DIR_CQ_INT_ENB(port->id.phys_id), r1.val); + + r2.field.vector = vector; + r2.field.vf = owner_vf; + r2.field.en_code = mode; + + HQM_CSR_WR(hw, HQM_SYS_DIR_CQ_ISR(port->id.phys_id), r2.val); + + return 0; +} + +int hqm_cq_depth(struct hqm_hw *hw, + int port_id, + bool is_ldb, + bool vf_request, + unsigned int vf_id) +{ + union hqm_chp_ldb_cq_depth r0; + u32 reg; + + if (vf_request && is_ldb) { + struct hqm_ldb_port *ldb_port; + + ldb_port = hqm_get_ldb_port_from_id(hw, port_id, true, vf_id); + + if (!ldb_port || !ldb_port->configured) + return -EINVAL; + + port_id = ldb_port->id.phys_id; + } else if (vf_request && !is_ldb) { + struct hqm_dir_pq_pair *dir_port; + + dir_port = hqm_get_dir_pq_from_id(hw, port_id, true, vf_id); + + if (!dir_port || !dir_port->port_configured) + return -EINVAL; + + port_id = dir_port->id.phys_id; + } + + if (is_ldb) + reg = HQM_CHP_LDB_CQ_DEPTH(port_id); + else + reg = HQM_CHP_DIR_CQ_DEPTH(port_id); + + r0.val = HQM_CSR_RD(hw, reg); + + return r0.field.depth; +} + +int hqm_arm_cq_interrupt(struct hqm_hw *hw, + int port_id, + bool is_ldb, + bool vf_request, + unsigned int vf_id) +{ + u32 val; + u32 reg; + + if (vf_request && is_ldb) { + struct hqm_ldb_port *ldb_port; + + ldb_port = hqm_get_ldb_port_from_id(hw, port_id, true, vf_id); + + if (!ldb_port || !ldb_port->configured) + return -EINVAL; + + port_id = ldb_port->id.phys_id; + } else if (vf_request && !is_ldb) { + struct hqm_dir_pq_pair *dir_port; + + dir_port = hqm_get_dir_pq_from_id(hw, port_id, true, vf_id); + + if (!dir_port || !dir_port->port_configured) + return -EINVAL; + + port_id = dir_port->id.phys_id; + } + + val = 1 << (port_id % 32); + + if (is_ldb && port_id < 32) + reg = HQM_CHP_LDB_CQ_INTR_ARMED0; + else if (is_ldb && port_id < 64) + reg = HQM_CHP_LDB_CQ_INTR_ARMED1; + else if (!is_ldb && port_id < 32) + reg = HQM_CHP_DIR_CQ_INTR_ARMED0; + else if (!is_ldb && port_id < 64) + reg = HQM_CHP_DIR_CQ_INTR_ARMED1; + else if (!is_ldb && port_id < 96) + reg = HQM_CHP_DIR_CQ_INTR_ARMED2; + else + reg = HQM_CHP_DIR_CQ_INTR_ARMED3; + + HQM_CSR_WR(hw, reg, val); + + hqm_flush_csr(hw); + + return 0; +} + +void hqm_read_compressed_cq_intr_status(struct hqm_hw *hw, + u32 *ldb_interrupts, + u32 *dir_interrupts) +{ + /* Read every CQ's interrupt status */ + + ldb_interrupts[0] = HQM_CSR_RD(hw, HQM_SYS_LDB_CQ_31_0_OCC_INT_STS); + ldb_interrupts[1] = HQM_CSR_RD(hw, HQM_SYS_LDB_CQ_63_32_OCC_INT_STS); + + dir_interrupts[0] = HQM_CSR_RD(hw, HQM_SYS_DIR_CQ_31_0_OCC_INT_STS); + dir_interrupts[1] = HQM_CSR_RD(hw, HQM_SYS_DIR_CQ_63_32_OCC_INT_STS); + dir_interrupts[2] = HQM_CSR_RD(hw, HQM_SYS_DIR_CQ_95_64_OCC_INT_STS); + dir_interrupts[3] = HQM_CSR_RD(hw, HQM_SYS_DIR_CQ_127_96_OCC_INT_STS); +} + +static void hqm_ack_msix_interrupt(struct hqm_hw *hw, int vector) +{ + union hqm_sys_msix_ack r0 = { {0} }; + + switch (vector) { + case 0: + r0.field.msix_0_ack = 1; + break; + case 1: + r0.field.msix_1_ack = 1; + break; + case 2: + r0.field.msix_2_ack = 1; + break; + case 3: + r0.field.msix_3_ack = 1; + break; + case 4: + r0.field.msix_4_ack = 1; + break; + case 5: + r0.field.msix_5_ack = 1; + break; + case 6: + r0.field.msix_6_ack = 1; + break; + case 7: + r0.field.msix_7_ack = 1; + break; + case 8: + r0.field.msix_8_ack = 1; + /* + * CSSY-1650 + * workaround h/w bug for lost MSI-X interrupts + * + * The recommended workaround for acknowledging + * vector 8 interrupts is : + * 1: set MSI-X mask + * 2: set MSIX_PASSTHROUGH + * 3: clear MSIX_ACK + * 4: clear MSIX_PASSTHROUGH + * 5: clear MSI-X mask + * + * The MSIX-ACK (step 3) is cleared for all vectors + * below. We handle steps 1 & 2 for vector 8 here. + * + * The bitfields for MSIX_ACK and MSIX_PASSTHRU are + * defined the same, so we just use the MSIX_ACK + * value when writing to PASSTHRU. + */ + + /* set MSI-X mask and passthrough for vector 8 */ + HQM_FUNC_WR(hw, HQM_MSIX_MEM_VECTOR_CTRL(8), 1); + HQM_CSR_WR(hw, HQM_SYS_MSIX_PASSTHRU, r0.val); + break; + } + + /* clear MSIX_ACK (write one to clear) */ + HQM_CSR_WR(hw, HQM_SYS_MSIX_ACK, r0.val); + + if (vector == 8) { + /* + * finish up steps 4 & 5 of the workaround - + * clear pasthrough and mask + */ + HQM_CSR_WR(hw, HQM_SYS_MSIX_PASSTHRU, 0); + HQM_FUNC_WR(hw, HQM_MSIX_MEM_VECTOR_CTRL(8), 0); + } + + hqm_flush_csr(hw); +} + +void hqm_ack_compressed_cq_intr(struct hqm_hw *hw, + u32 *ldb_interrupts, + u32 *dir_interrupts) +{ + /* Write back the status regs to ack the interrupts */ + if (ldb_interrupts[0]) + HQM_CSR_WR(hw, + HQM_SYS_LDB_CQ_31_0_OCC_INT_STS, + ldb_interrupts[0]); + if (ldb_interrupts[1]) + HQM_CSR_WR(hw, + HQM_SYS_LDB_CQ_63_32_OCC_INT_STS, + ldb_interrupts[1]); + + if (dir_interrupts[0]) + HQM_CSR_WR(hw, + HQM_SYS_DIR_CQ_31_0_OCC_INT_STS, + dir_interrupts[0]); + if (dir_interrupts[1]) + HQM_CSR_WR(hw, + HQM_SYS_DIR_CQ_63_32_OCC_INT_STS, + dir_interrupts[1]); + if (dir_interrupts[2]) + HQM_CSR_WR(hw, + HQM_SYS_DIR_CQ_95_64_OCC_INT_STS, + dir_interrupts[2]); + if (dir_interrupts[3]) + HQM_CSR_WR(hw, + HQM_SYS_DIR_CQ_127_96_OCC_INT_STS, + dir_interrupts[3]); + + hqm_ack_msix_interrupt(hw, HQM_PF_COMPRESSED_MODE_CQ_VECTOR_ID); +} + +u32 hqm_read_vf_intr_status(struct hqm_hw *hw) +{ + return HQM_FUNC_RD(hw, HQM_FUNC_VF_VF_MSI_ISR); +} + +void hqm_ack_vf_intr_status(struct hqm_hw *hw, u32 interrupts) +{ + HQM_FUNC_WR(hw, HQM_FUNC_VF_VF_MSI_ISR, interrupts); +} + +void hqm_ack_vf_msi_intr(struct hqm_hw *hw, u32 interrupts) +{ + HQM_FUNC_WR(hw, HQM_FUNC_VF_VF_MSI_ISR_PEND, interrupts); +} + +void hqm_ack_pf_mbox_int(struct hqm_hw *hw) +{ + union hqm_func_vf_pf2vf_mailbox_isr r0; + + r0.field.pf_isr = 1; + + HQM_FUNC_WR(hw, HQM_FUNC_VF_PF2VF_MAILBOX_ISR, r0.val); +} + +void hqm_enable_alarm_interrupts(struct hqm_hw *hw) +{ + union hqm_sys_ingress_alarm_enbl r0; + + r0.val = HQM_CSR_RD(hw, HQM_SYS_INGRESS_ALARM_ENBL); + + r0.field.illegal_hcw = 1; + r0.field.illegal_pp = 1; + r0.field.disabled_pp = 1; + r0.field.illegal_qid = 1; + r0.field.disabled_qid = 1; + r0.field.illegal_ldb_qid_cfg = 1; + r0.field.illegal_cqid = 1; + + HQM_CSR_WR(hw, HQM_SYS_INGRESS_ALARM_ENBL, r0.val); +} + +static void hqm_log_alarm_syndrome(struct hqm_hw *hw, + const char *str, + union hqm_sys_alarm_hw_synd r0) +{ + HQM_BASE_ERR(hw, "%s:\n", str); + HQM_BASE_ERR(hw, "\tsyndrome: 0x%x\n", r0.field.syndrome); + HQM_BASE_ERR(hw, "\trtype: 0x%x\n", r0.field.rtype); + HQM_BASE_ERR(hw, "\tfrom_dmv: 0x%x\n", r0.field.from_dmv); + HQM_BASE_ERR(hw, "\tis_ldb: 0x%x\n", r0.field.is_ldb); + HQM_BASE_ERR(hw, "\tcls: 0x%x\n", r0.field.cls); + HQM_BASE_ERR(hw, "\taid: 0x%x\n", r0.field.aid); + HQM_BASE_ERR(hw, "\tunit: 0x%x\n", r0.field.unit); + HQM_BASE_ERR(hw, "\tsource: 0x%x\n", r0.field.source); + HQM_BASE_ERR(hw, "\tmore: 0x%x\n", r0.field.more); + HQM_BASE_ERR(hw, "\tvalid: 0x%x\n", r0.field.valid); +} + +static void hqm_log_pf_vf_syndrome(struct hqm_hw *hw, + const char *str, + union hqm_sys_alarm_pf_synd0 r0, + union hqm_sys_alarm_pf_synd1 r1, + union hqm_sys_alarm_pf_synd2 r2) +{ + HQM_BASE_ERR(hw, "%s:\n", str); + HQM_BASE_ERR(hw, "\tsyndrome: 0x%x\n", r0.field.syndrome); + HQM_BASE_ERR(hw, "\trtype: 0x%x\n", r0.field.rtype); + HQM_BASE_ERR(hw, "\tfrom_dmv: 0x%x\n", r0.field.from_dmv); + HQM_BASE_ERR(hw, "\tis_ldb: 0x%x\n", r0.field.is_ldb); + HQM_BASE_ERR(hw, "\tcls: 0x%x\n", r0.field.cls); + HQM_BASE_ERR(hw, "\taid: 0x%x\n", r0.field.aid); + HQM_BASE_ERR(hw, "\tunit: 0x%x\n", r0.field.unit); + HQM_BASE_ERR(hw, "\tsource: 0x%x\n", r0.field.source); + HQM_BASE_ERR(hw, "\tmore: 0x%x\n", r0.field.more); + HQM_BASE_ERR(hw, "\tvalid: 0x%x\n", r0.field.valid); + HQM_BASE_ERR(hw, "\tdsi: 0x%x\n", r1.field.dsi); + HQM_BASE_ERR(hw, "\tqid: 0x%x\n", r1.field.qid); + HQM_BASE_ERR(hw, "\tqtype: 0x%x\n", r1.field.qtype); + HQM_BASE_ERR(hw, "\tqpri: 0x%x\n", r1.field.qpri); + HQM_BASE_ERR(hw, "\tmsg_type: 0x%x\n", r1.field.msg_type); + HQM_BASE_ERR(hw, "\tlock_id: 0x%x\n", r2.field.lock_id); + HQM_BASE_ERR(hw, "\tmeas: 0x%x\n", r2.field.meas); + HQM_BASE_ERR(hw, "\tdebug: 0x%x\n", r2.field.debug); + HQM_BASE_ERR(hw, "\tcq_pop: 0x%x\n", r2.field.cq_pop); + HQM_BASE_ERR(hw, "\tqe_uhl: 0x%x\n", r2.field.qe_uhl); + HQM_BASE_ERR(hw, "\tqe_orsp: 0x%x\n", r2.field.qe_orsp); + HQM_BASE_ERR(hw, "\tqe_valid: 0x%x\n", r2.field.qe_valid); + HQM_BASE_ERR(hw, "\tcq_int_rearm: 0x%x\n", r2.field.cq_int_rearm); + HQM_BASE_ERR(hw, "\tdsi_error: 0x%x\n", r2.field.dsi_error); +} + +static void hqm_clear_syndrome_register(struct hqm_hw *hw, u32 offset) +{ + union hqm_sys_alarm_hw_synd r0 = { {0} }; + + r0.field.valid = 1; + r0.field.more = 1; + + HQM_CSR_WR(hw, offset, r0.val); +} + +void hqm_process_alarm_interrupt(struct hqm_hw *hw) +{ + union hqm_sys_alarm_hw_synd r0; + + r0.val = HQM_CSR_RD(hw, HQM_SYS_ALARM_HW_SYND); + + hqm_log_alarm_syndrome(hw, "HW alarm syndrome", r0); + + hqm_clear_syndrome_register(hw, HQM_SYS_ALARM_HW_SYND); + + hqm_ack_msix_interrupt(hw, HQM_INT_ALARM); +} + +u32 hqm_read_vf_to_pf_int_bitvec(struct hqm_hw *hw) +{ + /* The PF has one VF->PF MBOX ISR register per VF space, but they all + * alias to the same physical register. + */ + return HQM_FUNC_RD(hw, HQM_FUNC_PF_VF2PF_MAILBOX_ISR(0)); +} + +void hqm_ack_vf_mbox_int(struct hqm_hw *hw, u32 bitvec) +{ + /* The PF has one VF->PF MBOX ISR register per VF space, but they all + * alias to the same physical register. + */ + HQM_FUNC_WR(hw, HQM_FUNC_PF_VF2PF_MAILBOX_ISR(0), bitvec); +} + +u32 hqm_read_vf_flr_int_bitvec(struct hqm_hw *hw) +{ + /* The PF has one VF->PF FLR ISR register per VF space, but they all + * alias to the same physical register. + */ + return HQM_FUNC_RD(hw, HQM_FUNC_PF_VF2PF_FLR_ISR(0)); +} + +void hqm_set_vf_reset_in_progress(struct hqm_hw *hw, int vf) +{ + u32 bitvec = HQM_FUNC_RD(hw, HQM_FUNC_PF_VF_RESET_IN_PROGRESS(0)); + + bitvec |= (1 << vf); + + HQM_FUNC_WR(hw, HQM_FUNC_PF_VF_RESET_IN_PROGRESS(0), bitvec); +} + +void hqm_clr_vf_reset_in_progress(struct hqm_hw *hw, int vf) +{ + u32 bitvec = HQM_FUNC_RD(hw, HQM_FUNC_PF_VF_RESET_IN_PROGRESS(0)); + + bitvec &= ~(1 << vf); + + HQM_FUNC_WR(hw, HQM_FUNC_PF_VF_RESET_IN_PROGRESS(0), bitvec); +} + +void hqm_ack_vf_flr_int(struct hqm_hw *hw, u32 bitvec, bool a_stepping) +{ + union hqm_sys_func_vf_bar_dsbl r0 = { {0} }; + u32 clear; + int i; + + /* Re-enable access to the VF BAR */ + r0.field.func_vf_bar_dis = 0; + for (i = 0; i < HQM_MAX_NUM_VFS; i++) { + if (!(bitvec & (1 << i))) + continue; + + HQM_CSR_WR(hw, HQM_SYS_FUNC_VF_BAR_DSBL(i), r0.val); + } + + /* Notify the VF driver that the reset has completed. This register is + * RW in A-stepping devices, WOCLR otherwise. + */ + if (a_stepping) { + clear = HQM_FUNC_RD(hw, HQM_FUNC_PF_VF_RESET_IN_PROGRESS(0)); + clear &= ~bitvec; + } else { + clear = bitvec; + } + + HQM_FUNC_WR(hw, HQM_FUNC_PF_VF_RESET_IN_PROGRESS(0), clear); + + /* Mark the FLR ISR as complete */ + HQM_FUNC_WR(hw, HQM_FUNC_PF_VF2PF_FLR_ISR(0), bitvec); +} + +void hqm_ack_vf_to_pf_int(struct hqm_hw *hw, + u32 mbox_bitvec, + u32 flr_bitvec) +{ + int i; + + hqm_ack_msix_interrupt(hw, HQM_INT_VF_TO_PF_MBOX); + + for (i = 0; i < HQM_MAX_NUM_VFS; i++) { + union hqm_func_pf_vf2pf_isr_pend r0 = { {0} }; + + if (!((mbox_bitvec & (1 << i)) || (flr_bitvec & (1 << i)))) + continue; + + /* Unset the VF's ISR pending bit */ + r0.field.isr_pend = 1; + HQM_FUNC_WR(hw, HQM_FUNC_PF_VF2PF_ISR_PEND(i), r0.val); + } +} + +static void hqm_process_ingress_error(struct hqm_hw *hw, + union hqm_sys_alarm_pf_synd0 r0, + bool vf_error, + unsigned int vf_id) +{ + struct hqm_domain *domain; + u32 alert_id; + u8 port_id; + bool is_ldb; + + port_id = r0.field.syndrome & 0x7F; + if (r0.field.source == HQM_ALARM_HW_SOURCE_SYS) + is_ldb = r0.field.is_ldb; + else + is_ldb = (r0.field.syndrome & 0x80) != 0; + + /* Get the domain ID and, if it's a VF domain, the virtual port ID */ + if (is_ldb) { + struct hqm_ldb_port *port; + + port = hqm_get_ldb_port_from_id(hw, port_id, vf_error, vf_id); + + if (!port) { + HQM_BASE_ERR(hw, + "[%s()]: Internal error: unable to find LDB port\n\tport: %u, vf_error: %u, vf_id: %u\n", + __func__, port_id, vf_error, vf_id); + return; + } + + domain = &hw->domains[port->domain_id.phys_id]; + } else { + struct hqm_dir_pq_pair *port; + + port = hqm_get_dir_pq_from_id(hw, port_id, vf_error, vf_id); + + if (!port) { + HQM_BASE_ERR(hw, + "[%s()]: Internal error: unable to find DIR port\n\tport: %u, vf_error: %u, vf_id: %u\n", + __func__, port_id, vf_error, vf_id); + return; + } + + domain = &hw->domains[port->domain_id.phys_id]; + } + + if (r0.field.unit == HQM_ALARM_HW_UNIT_CHP && + r0.field.aid == HQM_ALARM_HW_CHP_AID_OUT_OF_CREDITS) + alert_id = HQM_DOMAIN_ALERT_PP_OUT_OF_CREDITS; + else if (r0.field.unit == HQM_ALARM_HW_UNIT_CHP && + r0.field.aid == HQM_ALARM_HW_CHP_AID_ILLEGAL_ENQ) + alert_id = HQM_DOMAIN_ALERT_PP_ILLEGAL_ENQ; + else if (r0.field.unit == HQM_ALARM_HW_UNIT_LSP && + r0.field.aid == HQM_ALARM_HW_LSP_AID_EXCESS_TOKEN_POPS) + alert_id = HQM_DOMAIN_ALERT_PP_EXCESS_TOKEN_POPS; + else if (r0.field.source == HQM_ALARM_HW_SOURCE_SYS && + r0.field.aid == HQM_ALARM_SYS_AID_ILLEGAL_HCW) + alert_id = HQM_DOMAIN_ALERT_ILLEGAL_HCW; + else if (r0.field.source == HQM_ALARM_HW_SOURCE_SYS && + r0.field.aid == HQM_ALARM_SYS_AID_ILLEGAL_QID) + alert_id = HQM_DOMAIN_ALERT_ILLEGAL_QID; + else if (r0.field.source == HQM_ALARM_HW_SOURCE_SYS && + r0.field.aid == HQM_ALARM_SYS_AID_DISABLED_QID) + alert_id = HQM_DOMAIN_ALERT_DISABLED_QID; + else + return; + + if (vf_error) + hqm_notify_vf_alarm(hw, + vf_id, + domain->id.virt_id, + alert_id, + (is_ldb << 8) | port_id); + else + os_notify_user_space(hw, + domain->id.phys_id, + alert_id, + (is_ldb << 8) | port_id); +} + +void hqm_process_ingress_error_interrupt(struct hqm_hw *hw) +{ + union hqm_sys_alarm_pf_synd0 r0; + union hqm_sys_alarm_pf_synd1 r1; + union hqm_sys_alarm_pf_synd2 r2; + int i; + + r0.val = HQM_CSR_RD(hw, HQM_SYS_ALARM_PF_SYND0); + + if (r0.field.valid) { + r1.val = HQM_CSR_RD(hw, HQM_SYS_ALARM_PF_SYND1); + r2.val = HQM_CSR_RD(hw, HQM_SYS_ALARM_PF_SYND2); + + hqm_log_pf_vf_syndrome(hw, + "PF Ingress error alarm", + r0, r1, r2); + + hqm_clear_syndrome_register(hw, HQM_SYS_ALARM_PF_SYND0); + + hqm_process_ingress_error(hw, r0, false, 0); + } + + for (i = 0; i < HQM_MAX_NUM_VFS; i++) { + r0.val = HQM_CSR_RD(hw, HQM_SYS_ALARM_VF_SYND0(i)); + + if (!r0.field.valid) + continue; + + r1.val = HQM_CSR_RD(hw, HQM_SYS_ALARM_VF_SYND1(i)); + r2.val = HQM_CSR_RD(hw, HQM_SYS_ALARM_VF_SYND2(i)); + + hqm_log_pf_vf_syndrome(hw, + "VF Ingress error alarm", + r0, r1, r2); + + hqm_clear_syndrome_register(hw, + HQM_SYS_ALARM_VF_SYND0(i)); + + hqm_process_ingress_error(hw, r0, true, i); + } + + hqm_ack_msix_interrupt(hw, HQM_INT_INGRESS_ERROR); +} + +int hqm_get_group_sequence_numbers(struct hqm_hw *hw, unsigned int group_id) +{ + if (group_id >= HQM_MAX_NUM_SEQUENCE_NUMBER_GROUPS) + return -EINVAL; + + return hw->rsrcs.sn_groups[group_id].sequence_numbers_per_queue; +} + +int hqm_get_group_sequence_number_occupancy(struct hqm_hw *hw, + unsigned int group_id) +{ + if (group_id >= HQM_MAX_NUM_SEQUENCE_NUMBER_GROUPS) + return -EINVAL; + + return hqm_sn_group_used_slots(&hw->rsrcs.sn_groups[group_id]); +} + +static void hqm_log_set_group_sequence_numbers(struct hqm_hw *hw, + unsigned int group_id, + unsigned long val) +{ + HQM_BASE_INFO(hw, "HQM set group sequence numbers:\n"); + HQM_BASE_INFO(hw, "\tGroup ID: %u\n", group_id); + HQM_BASE_INFO(hw, "\tValue: %lu\n", val); +} + +int hqm_set_group_sequence_numbers(struct hqm_hw *hw, + unsigned int group_id, + unsigned long val) +{ + u32 valid_allocations[6] = {32, 64, 128, 256, 512, 1024}; + union hqm_ro_pipe_grp_sn_mode r0 = { {0} }; + struct hqm_sn_group *group; + int mode; + + if (group_id >= HQM_MAX_NUM_SEQUENCE_NUMBER_GROUPS) + return -EINVAL; + + group = &hw->rsrcs.sn_groups[group_id]; + + /* Once the first load-balanced queue using an SN group is configured, + * the group cannot be changed. + */ + if (group->slot_use_bitmap != 0) + return -EPERM; + + for (mode = 0; mode < HQM_MAX_NUM_SEQUENCE_NUMBER_MODES; mode++) + if (val == valid_allocations[mode]) + break; + + if (mode == HQM_MAX_NUM_SEQUENCE_NUMBER_MODES) + return -EINVAL; + + group->mode = mode; + group->sequence_numbers_per_queue = val; + + r0.field.sn_mode_0 = hw->rsrcs.sn_groups[0].mode; + r0.field.sn_mode_1 = hw->rsrcs.sn_groups[1].mode; + r0.field.sn_mode_2 = hw->rsrcs.sn_groups[2].mode; + r0.field.sn_mode_3 = hw->rsrcs.sn_groups[3].mode; + + HQM_CSR_WR(hw, HQM_RO_PIPE_GRP_SN_MODE, r0.val); + + hqm_log_set_group_sequence_numbers(hw, group_id, val); + + return 0; +} + +void hqm_disable_dp_vasr_feature(struct hqm_hw *hw) +{ + union hqm_dp_dir_csr_ctrl r0; + + r0.val = HQM_CSR_RD(hw, HQM_DP_DIR_CSR_CTRL); + + r0.field.cfg_vasr_dis = 1; + + HQM_CSR_WR(hw, HQM_DP_DIR_CSR_CTRL, r0.val); +} + +void hqm_enable_excess_tokens_alarm(struct hqm_hw *hw) +{ + union hqm_chp_cfg_chp_csr_ctrl r0; + + r0.val = HQM_CSR_RD(hw, HQM_CHP_CFG_CHP_CSR_CTRL); + + r0.val |= 1 << HQM_CHP_CFG_EXCESS_TOKENS_SHIFT; + + HQM_CSR_WR(hw, HQM_CHP_CFG_CHP_CSR_CTRL, r0.val); +} + +static int hqm_reset_hw_resource(struct hqm_hw *hw, int type, int id) +{ + union hqm_cfg_mstr_diag_reset_sts r0 = { {0} }; + union hqm_cfg_mstr_bcast_reset_vf_start r1 = { {0} }; + int i; + + r1.field.vf_reset_start = 1; + + r1.field.vf_reset_type = type; + r1.field.vf_reset_id = id; + + HQM_CSR_WR(hw, HQM_CFG_MSTR_BCAST_RESET_VF_START, r1.val); + + /* Wait for hardware to complete. This is a finite time operation, + * but wait set a loop bound just in case. + */ + for (i = 0; i < 1024 * 1024; i++) { + r0.val = HQM_CSR_RD(hw, HQM_CFG_MSTR_DIAG_RESET_STS); + + if (r0.field.chp_vf_reset_done && + r0.field.rop_vf_reset_done && + r0.field.lsp_vf_reset_done && + r0.field.nalb_vf_reset_done && + r0.field.ap_vf_reset_done && + r0.field.dp_vf_reset_done && + r0.field.qed_vf_reset_done && + r0.field.dqed_vf_reset_done && + r0.field.aqed_vf_reset_done) + return 0; + + os_udelay(1); + } + + return -ETIMEDOUT; +} + +static int hqm_domain_reset_hw_resources(struct hqm_hw *hw, + struct hqm_domain *domain) +{ + struct hqm_list_entry *iter __attribute__((unused)); + struct hqm_dir_pq_pair *dir_port; + struct hqm_ldb_queue *ldb_queue; + struct hqm_ldb_port *ldb_port; + struct hqm_credit_pool *pool; + int ret; + + HQM_DOM_LIST_FOR(domain->used_ldb_credit_pools, pool, iter) { + ret = hqm_reset_hw_resource(hw, + VF_RST_TYPE_POOL_LDB, + pool->id.phys_id); + if (ret) + return ret; + } + + HQM_DOM_LIST_FOR(domain->used_dir_credit_pools, pool, iter) { + ret = hqm_reset_hw_resource(hw, + VF_RST_TYPE_POOL_DIR, + pool->id.phys_id); + if (ret) + return ret; + } + + HQM_DOM_LIST_FOR(domain->used_ldb_queues, ldb_queue, iter) { + ret = hqm_reset_hw_resource(hw, + VF_RST_TYPE_QID_LDB, + ldb_queue->id.phys_id); + if (ret) + return ret; + } + + HQM_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_port, iter) { + ret = hqm_reset_hw_resource(hw, + VF_RST_TYPE_QID_DIR, + dir_port->id.phys_id); + if (ret) + return ret; + } + + HQM_DOM_LIST_FOR(domain->used_ldb_ports, ldb_port, iter) { + ret = hqm_reset_hw_resource(hw, + VF_RST_TYPE_CQ_LDB, + ldb_port->id.phys_id); + if (ret) + return ret; + } + + HQM_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_port, iter) { + ret = hqm_reset_hw_resource(hw, + VF_RST_TYPE_CQ_DIR, + dir_port->id.phys_id); + if (ret) + return ret; + } + + return 0; +} + +static u32 hqm_ldb_cq_inflight_count(struct hqm_hw *hw, + struct hqm_ldb_port *port) +{ + union hqm_lsp_cq_ldb_infl_cnt r0; + + r0.val = HQM_CSR_RD(hw, HQM_LSP_CQ_LDB_INFL_CNT(port->id.phys_id)); + + return r0.field.count; +} + +static u32 hqm_ldb_cq_token_count(struct hqm_hw *hw, + struct hqm_ldb_port *port) +{ + union hqm_lsp_cq_ldb_tkn_cnt r0; + + r0.val = HQM_CSR_RD(hw, HQM_LSP_CQ_LDB_TKN_CNT(port->id.phys_id)); + + return r0.field.token_count; +} + +static int hqm_drain_ldb_cq(struct hqm_hw *hw, struct hqm_ldb_port *port) +{ + u32 infl_cnt, tkn_cnt; + unsigned int i; + + infl_cnt = hqm_ldb_cq_inflight_count(hw, port); + + /* Account for the initial token count, which is used in order to + * provide a CQ with depth less than 8. + */ + tkn_cnt = hqm_ldb_cq_token_count(hw, port) - port->init_tkn_cnt; + + if (infl_cnt || tkn_cnt) { + struct hqm_hcw hcw_mem[8], *hcw; + void __iomem *pp_addr; + + pp_addr = os_map_producer_port(hw, port->id.phys_id, true); + + /* Point hcw to a 64B-aligned location */ + hcw = (struct hqm_hcw *)((uintptr_t)&hcw_mem[4] & ~0x3F); + + /* Program the first HCW for a completion and token return and + * the other HCWs as NOOPS + */ + + memset(hcw, 0, 4 * sizeof(*hcw)); + hcw->qe_comp = (infl_cnt > 0); + hcw->cq_token = (tkn_cnt > 0); + hcw->lock_id = tkn_cnt - 1; + + /* Return tokens in the first HCW */ + os_enqueue_four_hcws(hw, hcw, pp_addr); + + hcw->cq_token = 0; + + /* Issue remaining completions (if any) */ + for (i = 1; i < infl_cnt; i++) + os_enqueue_four_hcws(hw, hcw, pp_addr); + + os_fence_hcw(hw, pp_addr); + + os_unmap_producer_port(hw, pp_addr); + } + + return 0; +} + +static int hqm_domain_wait_for_ldb_cqs_to_empty(struct hqm_hw *hw, + struct hqm_domain *domain) +{ + struct hqm_list_entry *iter __attribute__((unused)); + struct hqm_ldb_port *port; + + HQM_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) { + int i; + + for (i = 0; i < HQM_MAX_CQ_COMP_CHECK_LOOPS; i++) { + if (hqm_ldb_cq_inflight_count(hw, port) == 0) + break; + } + + if (i == HQM_MAX_CQ_COMP_CHECK_LOOPS) { + HQM_BASE_ERR(hw, + "[%s()] Internal error: failed to flush load-balanced port %d's completions.\n", + __func__, port->id.phys_id); + return -EFAULT; + } + } + + return 0; +} + +static int hqm_domain_reset_software_state(struct hqm_hw *hw, + struct hqm_domain *domain) +{ + struct hqm_ldb_queue *tmp_ldb_queue __attribute__((unused)); + struct hqm_dir_pq_pair *tmp_dir_port __attribute__((unused)); + struct hqm_ldb_port *tmp_ldb_port __attribute__((unused)); + struct hqm_credit_pool *tmp_pool __attribute__((unused)); + struct hqm_list_entry *iter1 __attribute__((unused)); + struct hqm_list_entry *iter2 __attribute__((unused)); + struct hqm_ldb_queue *ldb_queue; + struct hqm_dir_pq_pair *dir_port; + struct hqm_ldb_port *ldb_port; + struct hqm_credit_pool *pool; + + struct hqm_function_resources *rsrcs; + struct hqm_list_head *list; + int ret; + + rsrcs = domain->parent_func; + + /* Move the domain's ldb queues to the function's avail list */ + list = &domain->used_ldb_queues; + HQM_DOM_LIST_FOR_SAFE(*list, ldb_queue, tmp_ldb_queue, iter1, iter2) { + if (ldb_queue->sn_cfg_valid) { + struct hqm_sn_group *grp; + + grp = &hw->rsrcs.sn_groups[ldb_queue->sn_group]; + + hqm_sn_group_free_slot(grp, ldb_queue->sn_slot); + ldb_queue->sn_cfg_valid = false; + } + + ldb_queue->owned = false; + ldb_queue->num_mappings = 0; + ldb_queue->num_pending_additions = 0; + + hqm_list_del(&domain->used_ldb_queues, &ldb_queue->domain_list); + hqm_list_add(&rsrcs->avail_ldb_queues, &ldb_queue->func_list); + rsrcs->num_avail_ldb_queues++; + } + + list = &domain->avail_ldb_queues; + HQM_DOM_LIST_FOR_SAFE(*list, ldb_queue, tmp_ldb_queue, iter1, iter2) { + ldb_queue->owned = false; + + hqm_list_del(&domain->avail_ldb_queues, + &ldb_queue->domain_list); + hqm_list_add(&rsrcs->avail_ldb_queues, + &ldb_queue->func_list); + rsrcs->num_avail_ldb_queues++; + } + + /* Move the domain's ldb ports to the function's avail list */ + list = &domain->used_ldb_ports; + HQM_DOM_LIST_FOR_SAFE(*list, ldb_port, tmp_ldb_port, iter1, iter2) { + int i; + + ldb_port->owned = false; + ldb_port->configured = false; + ldb_port->num_pending_removals = 0; + ldb_port->num_mappings = 0; + for (i = 0; i < HQM_MAX_NUM_QIDS_PER_LDB_CQ; i++) + ldb_port->qid_map[i].state = HQM_QUEUE_UNMAPPED; + + hqm_list_del(&domain->used_ldb_ports, &ldb_port->domain_list); + hqm_list_add(&rsrcs->avail_ldb_ports, &ldb_port->func_list); + rsrcs->num_avail_ldb_ports++; + } + + list = &domain->avail_ldb_ports; + HQM_DOM_LIST_FOR_SAFE(*list, ldb_port, tmp_ldb_port, iter1, iter2) { + ldb_port->owned = false; + + hqm_list_del(&domain->avail_ldb_ports, &ldb_port->domain_list); + hqm_list_add(&rsrcs->avail_ldb_ports, &ldb_port->func_list); + rsrcs->num_avail_ldb_ports++; + } + + /* Move the domain's dir ports to the function's avail list */ + list = &domain->used_dir_pq_pairs; + HQM_DOM_LIST_FOR_SAFE(*list, dir_port, tmp_dir_port, iter1, iter2) { + dir_port->owned = false; + dir_port->port_configured = false; + + hqm_list_del(&domain->used_dir_pq_pairs, + &dir_port->domain_list); + + hqm_list_add(&rsrcs->avail_dir_pq_pairs, + &dir_port->func_list); + rsrcs->num_avail_dir_pq_pairs++; + } + + list = &domain->avail_dir_pq_pairs; + HQM_DOM_LIST_FOR_SAFE(*list, dir_port, tmp_dir_port, iter1, iter2) { + dir_port->owned = false; + + hqm_list_del(&domain->avail_dir_pq_pairs, + &dir_port->domain_list); + + hqm_list_add(&rsrcs->avail_dir_pq_pairs, + &dir_port->func_list); + rsrcs->num_avail_dir_pq_pairs++; + } + + /* Return hist list entries to the function */ + ret = hqm_bitmap_set_range(rsrcs->avail_hist_list_entries, + domain->hist_list_entry_base, + domain->total_hist_list_entries); + if (ret) { + HQM_BASE_ERR(hw, + "[%s()] Internal error: domain hist list base doesn't match the function's bitmap.\n", + __func__); + return -EFAULT; + } + + domain->total_hist_list_entries = 0; + domain->avail_hist_list_entries = 0; + domain->hist_list_entry_base = 0; + domain->hist_list_entry_offset = 0; + + /* Return QED entries to the function */ + ret = hqm_bitmap_set_range(rsrcs->avail_qed_freelist_entries, + domain->qed_freelist.base, + (domain->qed_freelist.bound - + domain->qed_freelist.base)); + if (ret) { + HQM_BASE_ERR(hw, + "[%s()] Internal error: domain QED base doesn't match the function's bitmap.\n", + __func__); + return -EFAULT; + } + + domain->qed_freelist.base = 0; + domain->qed_freelist.bound = 0; + domain->qed_freelist.offset = 0; + + /* Return DQED entries back to the function */ + ret = hqm_bitmap_set_range(rsrcs->avail_dqed_freelist_entries, + domain->dqed_freelist.base, + (domain->dqed_freelist.bound - + domain->dqed_freelist.base)); + if (ret) { + HQM_BASE_ERR(hw, + "[%s()] Internal error: domain DQED base doesn't match the function's bitmap.\n", + __func__); + return -EFAULT; + } + + domain->dqed_freelist.base = 0; + domain->dqed_freelist.bound = 0; + domain->dqed_freelist.offset = 0; + + /* Return AQED entries back to the function */ + ret = hqm_bitmap_set_range(rsrcs->avail_aqed_freelist_entries, + domain->aqed_freelist.base, + (domain->aqed_freelist.bound - + domain->aqed_freelist.base)); + if (ret) { + HQM_BASE_ERR(hw, + "[%s()] Internal error: domain AQED base doesn't match the function's bitmap.\n", + __func__); + return -EFAULT; + } + + domain->aqed_freelist.base = 0; + domain->aqed_freelist.bound = 0; + domain->aqed_freelist.offset = 0; + + /* Return ldb credit pools back to the function's avail list */ + list = &domain->used_ldb_credit_pools; + HQM_DOM_LIST_FOR_SAFE(*list, pool, tmp_pool, iter1, iter2) { + pool->owned = false; + pool->configured = false; + + hqm_list_del(&domain->used_ldb_credit_pools, + &pool->domain_list); + hqm_list_add(&rsrcs->avail_ldb_credit_pools, + &pool->func_list); + rsrcs->num_avail_ldb_credit_pools++; + } + + list = &domain->avail_ldb_credit_pools; + HQM_DOM_LIST_FOR_SAFE(*list, pool, tmp_pool, iter1, iter2) { + pool->owned = false; + + hqm_list_del(&domain->avail_ldb_credit_pools, + &pool->domain_list); + hqm_list_add(&rsrcs->avail_ldb_credit_pools, + &pool->func_list); + rsrcs->num_avail_ldb_credit_pools++; + } + + /* Move dir credit pools back to the function */ + list = &domain->used_dir_credit_pools; + HQM_DOM_LIST_FOR_SAFE(*list, pool, tmp_pool, iter1, iter2) { + pool->owned = false; + pool->configured = false; + + hqm_list_del(&domain->used_dir_credit_pools, + &pool->domain_list); + hqm_list_add(&rsrcs->avail_dir_credit_pools, + &pool->func_list); + rsrcs->num_avail_dir_credit_pools++; + } + + list = &domain->avail_dir_credit_pools; + HQM_DOM_LIST_FOR_SAFE(*list, pool, tmp_pool, iter1, iter2) { + pool->owned = false; + + hqm_list_del(&domain->avail_dir_credit_pools, + &pool->domain_list); + hqm_list_add(&rsrcs->avail_dir_credit_pools, + &pool->func_list); + rsrcs->num_avail_dir_credit_pools++; + } + + domain->num_pending_removals = 0; + domain->num_pending_additions = 0; + domain->configured = false; + domain->started = false; + + /* Move the domain out of the used_domains list and back to the + * function's avail_domains list. + */ + hqm_list_del(&rsrcs->used_domains, &domain->func_list); + hqm_list_add(&rsrcs->avail_domains, &domain->func_list); + rsrcs->num_avail_domains++; + + return 0; +} + +void hqm_resource_reset(struct hqm_hw *hw) +{ + struct hqm_domain *domain, *next __attribute__((unused)); + struct hqm_list_entry *iter1 __attribute__((unused)); + struct hqm_list_entry *iter2 __attribute__((unused)); + int i; + + for (i = 0; i < HQM_MAX_NUM_VFS; i++) { + HQM_FUNC_LIST_FOR_SAFE(hw->vf[i].used_domains, domain, + next, iter1, iter2) + hqm_domain_reset_software_state(hw, domain); + } + + HQM_FUNC_LIST_FOR_SAFE(hw->pf.used_domains, domain, next, iter1, iter2) + hqm_domain_reset_software_state(hw, domain); +} + +static u32 hqm_dir_queue_depth(struct hqm_hw *hw, + struct hqm_dir_pq_pair *queue) +{ + union hqm_lsp_qid_dir_enqueue_cnt r0; + + r0.val = HQM_CSR_RD(hw, HQM_LSP_QID_DIR_ENQUEUE_CNT(queue->id.phys_id)); + + return r0.field.count; +} + +static bool hqm_dir_queue_is_empty(struct hqm_hw *hw, + struct hqm_dir_pq_pair *queue) +{ + return hqm_dir_queue_depth(hw, queue) == 0; +} + +int hqm_hw_get_dir_queue_depth(struct hqm_hw *hw, + u32 domain_id, + struct hqm_get_dir_queue_depth_args *args, + struct hqm_cmd_response *resp, + bool vf_request, + unsigned int vf_id) +{ + struct hqm_dir_pq_pair *queue; + struct hqm_domain *domain; + int id; + + id = domain_id; + + domain = hqm_get_domain_from_id(hw, id, vf_request, vf_id); + if (!domain) { + resp->status = HQM_ST_INVALID_DOMAIN_ID; + return -EINVAL; + } + + id = args->queue_id; + + queue = hqm_get_domain_used_dir_pq(id, vf_request, domain); + if (!queue) { + resp->status = HQM_ST_INVALID_QID; + return -EINVAL; + } + + resp->id = hqm_dir_queue_depth(hw, queue); + + return 0; +} + +static void +hqm_log_pending_port_unmaps_args(struct hqm_hw *hw, + struct hqm_pending_port_unmaps_args *args, + bool vf_request, + unsigned int vf_id) +{ + HQM_BASE_INFO(hw, "HQM pending port unmaps arguments:\n"); + if (vf_request) + HQM_BASE_INFO(hw, "(Request from VF %d)\n", vf_id); + HQM_BASE_INFO(hw, "\tPort ID: %d\n", args->port_id); +} + +int hqm_hw_pending_port_unmaps(struct hqm_hw *hw, + u32 domain_id, + struct hqm_pending_port_unmaps_args *args, + struct hqm_cmd_response *resp, + bool vf_request, + unsigned int vf_id) +{ + struct hqm_ldb_port *port; + + hqm_log_pending_port_unmaps_args(hw, args, vf_request, vf_id); + + port = hqm_get_ldb_port_from_id(hw, args->port_id, vf_request, vf_id); + if (!port) { + resp->status = HQM_ST_INVALID_PORT_ID; + return -EINVAL; + } + + resp->id = port->num_pending_removals; + + return 0; +} + +/* Returns whether the queue is empty, including its inflight and replay + * counts. + */ +static bool hqm_ldb_queue_is_empty(struct hqm_hw *hw, + struct hqm_ldb_queue *queue) +{ + union hqm_lsp_qid_ldb_replay_cnt r0; + union hqm_lsp_qid_aqed_active_cnt r1; + union hqm_lsp_qid_atq_enqueue_cnt r2; + union hqm_lsp_qid_ldb_enqueue_cnt r3; + union hqm_lsp_qid_ldb_infl_cnt r4; + + r0.val = HQM_CSR_RD(hw, + HQM_LSP_QID_LDB_REPLAY_CNT(queue->id.phys_id)); + if (r0.val) + return false; + + r1.val = HQM_CSR_RD(hw, + HQM_LSP_QID_AQED_ACTIVE_CNT(queue->id.phys_id)); + if (r1.val) + return false; + + r2.val = HQM_CSR_RD(hw, + HQM_LSP_QID_ATQ_ENQUEUE_CNT(queue->id.phys_id)); + if (r2.val) + return false; + + r3.val = HQM_CSR_RD(hw, + HQM_LSP_QID_LDB_ENQUEUE_CNT(queue->id.phys_id)); + if (r3.val) + return false; + + r4.val = HQM_CSR_RD(hw, + HQM_LSP_QID_LDB_INFL_CNT(queue->id.phys_id)); + if (r4.val) + return false; + + return true; +} + +int hqm_hw_get_ldb_queue_depth(struct hqm_hw *hw, + u32 domain_id, + struct hqm_get_ldb_queue_depth_args *args, + struct hqm_cmd_response *resp, + bool vf_req, + unsigned int vf_id) +{ + union hqm_lsp_qid_aqed_active_cnt r0; + union hqm_lsp_qid_atq_enqueue_cnt r1; + union hqm_lsp_qid_ldb_enqueue_cnt r2; + struct hqm_ldb_queue *queue; + struct hqm_domain *domain; + + domain = hqm_get_domain_from_id(hw, domain_id, vf_req, vf_id); + if (!domain) { + resp->status = HQM_ST_INVALID_DOMAIN_ID; + return -EINVAL; + } + + queue = hqm_get_domain_ldb_queue(args->queue_id, vf_req, domain); + if (!queue) { + resp->status = HQM_ST_INVALID_QID; + return -EINVAL; + } + + r0.val = HQM_CSR_RD(hw, + HQM_LSP_QID_AQED_ACTIVE_CNT(queue->id.phys_id)); + + r1.val = HQM_CSR_RD(hw, + HQM_LSP_QID_ATQ_ENQUEUE_CNT(queue->id.phys_id)); + + r2.val = HQM_CSR_RD(hw, + HQM_LSP_QID_LDB_ENQUEUE_CNT(queue->id.phys_id)); + + resp->id = r0.val + r1.val + r2.val; + + return 0; +} + +static u32 hqm_dir_cq_token_count(struct hqm_hw *hw, + struct hqm_dir_pq_pair *port) +{ + union hqm_lsp_cq_dir_tkn_cnt r0; + + r0.val = HQM_CSR_RD(hw, HQM_LSP_CQ_DIR_TKN_CNT(port->id.phys_id)); + + return r0.field.count; +} + +static int hqm_domain_verify_reset_success(struct hqm_hw *hw, + struct hqm_domain *domain) +{ + struct hqm_list_entry *iter __attribute__((unused)); + struct hqm_dir_pq_pair *dir_port; + struct hqm_ldb_port *ldb_port; + struct hqm_credit_pool *pool; + struct hqm_ldb_queue *queue; + + /* Confirm that all credits are returned to the domain's credit pools */ + HQM_DOM_LIST_FOR(domain->used_dir_credit_pools, pool, iter) { + union hqm_chp_dqed_fl_pop_ptr r0; + union hqm_chp_dqed_fl_push_ptr r1; + + r0.val = HQM_CSR_RD(hw, + HQM_CHP_DQED_FL_POP_PTR(pool->id.phys_id)); + + r1.val = HQM_CSR_RD(hw, + HQM_CHP_DQED_FL_PUSH_PTR(pool->id.phys_id)); + + if (r0.field.pop_ptr != r1.field.push_ptr || + r0.field.generation == r1.field.generation) { + HQM_BASE_ERR(hw, + "[%s()] Internal error: failed to refill directed pool %d's credits.\n", + __func__, pool->id.phys_id); + return -EFAULT; + } + } + + /* Confirm that all the domain's queue's inflight counts and AQED + * active counts are 0. + */ + HQM_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) { + if (!hqm_ldb_queue_is_empty(hw, queue)) { + HQM_BASE_ERR(hw, + "[%s()] Internal error: failed to empty ldb queue %d\n", + __func__, queue->id.phys_id); + return -EFAULT; + } + } + + /* Confirm that all the domain's CQs inflight and token counts are 0. */ + HQM_DOM_LIST_FOR(domain->used_ldb_ports, ldb_port, iter) { + if (hqm_ldb_cq_inflight_count(hw, ldb_port) || + hqm_ldb_cq_token_count(hw, ldb_port)) { + HQM_BASE_ERR(hw, + "[%s()] Internal error: failed to empty ldb port %d\n", + __func__, ldb_port->id.phys_id); + return -EFAULT; + } + } + + HQM_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_port, iter) { + if (!hqm_dir_queue_is_empty(hw, dir_port)) { + HQM_BASE_ERR(hw, + "[%s()] Internal error: failed to empty dir queue %d\n", + __func__, dir_port->id.phys_id); + return -EFAULT; + } + + if (hqm_dir_cq_token_count(hw, dir_port)) { + HQM_BASE_ERR(hw, + "[%s()] Internal error: failed to empty dir port %d\n", + __func__, dir_port->id.phys_id); + return -EFAULT; + } + } + + return 0; +} + +static void __hqm_domain_reset_ldb_port_registers(struct hqm_hw *hw, + struct hqm_ldb_port *port) +{ + union hqm_chp_ldb_pp_state_reset r0 = { {0} }; + + HQM_CSR_WR(hw, + HQM_CHP_LDB_PP_CRD_REQ_STATE(port->id.phys_id), + HQM_CHP_LDB_PP_CRD_REQ_STATE_RST); + + /* Reset the port's load-balanced and directed credit state */ + r0.field.dir_type = 0; + r0.field.reset_pp_state = 1; + + HQM_CSR_WR(hw, + HQM_CHP_LDB_PP_STATE_RESET(port->id.phys_id), + r0.val); + + r0.field.dir_type = 1; + r0.field.reset_pp_state = 1; + + HQM_CSR_WR(hw, + HQM_CHP_LDB_PP_STATE_RESET(port->id.phys_id), + r0.val); + + HQM_CSR_WR(hw, + HQM_CHP_LDB_PP_DIR_PUSH_PTR(port->id.phys_id), + HQM_CHP_LDB_PP_DIR_PUSH_PTR_RST); + + HQM_CSR_WR(hw, + HQM_CHP_LDB_PP_LDB_PUSH_PTR(port->id.phys_id), + HQM_CHP_LDB_PP_LDB_PUSH_PTR_RST); + + HQM_CSR_WR(hw, + HQM_CHP_LDB_PP_LDB_MIN_CRD_QNT(port->id.phys_id), + HQM_CHP_LDB_PP_LDB_MIN_CRD_QNT_RST); + + HQM_CSR_WR(hw, + HQM_CHP_LDB_PP_LDB_CRD_LWM(port->id.phys_id), + HQM_CHP_LDB_PP_LDB_CRD_LWM_RST); + + HQM_CSR_WR(hw, + HQM_CHP_LDB_PP_LDB_CRD_HWM(port->id.phys_id), + HQM_CHP_LDB_PP_LDB_CRD_HWM_RST); + + HQM_CSR_WR(hw, + HQM_CHP_LDB_LDB_PP2POOL(port->id.phys_id), + HQM_CHP_LDB_LDB_PP2POOL_RST); + + HQM_CSR_WR(hw, + HQM_CHP_LDB_PP_DIR_MIN_CRD_QNT(port->id.phys_id), + HQM_CHP_LDB_PP_DIR_MIN_CRD_QNT_RST); + + HQM_CSR_WR(hw, + HQM_CHP_LDB_PP_DIR_CRD_LWM(port->id.phys_id), + HQM_CHP_LDB_PP_DIR_CRD_LWM_RST); + + HQM_CSR_WR(hw, + HQM_CHP_LDB_PP_DIR_CRD_HWM(port->id.phys_id), + HQM_CHP_LDB_PP_DIR_CRD_HWM_RST); + + HQM_CSR_WR(hw, + HQM_CHP_LDB_DIR_PP2POOL(port->id.phys_id), + HQM_CHP_LDB_DIR_PP2POOL_RST); + + HQM_CSR_WR(hw, + HQM_SYS_LDB_PP2LDBPOOL(port->id.phys_id), + HQM_SYS_LDB_PP2LDBPOOL_RST); + + HQM_CSR_WR(hw, + HQM_SYS_LDB_PP2DIRPOOL(port->id.phys_id), + HQM_SYS_LDB_PP2DIRPOOL_RST); + + HQM_CSR_WR(hw, + HQM_CHP_HIST_LIST_LIM(port->id.phys_id), + HQM_CHP_HIST_LIST_LIM_RST); + + HQM_CSR_WR(hw, + HQM_CHP_HIST_LIST_BASE(port->id.phys_id), + HQM_CHP_HIST_LIST_BASE_RST); + + HQM_CSR_WR(hw, + HQM_CHP_HIST_LIST_POP_PTR(port->id.phys_id), + HQM_CHP_HIST_LIST_POP_PTR_RST); + + HQM_CSR_WR(hw, + HQM_CHP_HIST_LIST_PUSH_PTR(port->id.phys_id), + HQM_CHP_HIST_LIST_PUSH_PTR_RST); + + HQM_CSR_WR(hw, + HQM_CHP_LDB_CQ_WPTR(port->id.phys_id), + HQM_CHP_LDB_CQ_WPTR_RST); + + HQM_CSR_WR(hw, + HQM_CHP_LDB_CQ_INT_DEPTH_THRSH(port->id.phys_id), + HQM_CHP_LDB_CQ_INT_DEPTH_THRSH_RST); + + HQM_CSR_WR(hw, + HQM_CHP_LDB_CQ_TMR_THRESHOLD(port->id.phys_id), + HQM_CHP_LDB_CQ_TMR_THRESHOLD_RST); + + HQM_CSR_WR(hw, + HQM_CHP_LDB_CQ_INT_ENB(port->id.phys_id), + HQM_CHP_LDB_CQ_INT_ENB_RST); + + HQM_CSR_WR(hw, + HQM_LSP_CQ_LDB_INFL_LIM(port->id.phys_id), + HQM_LSP_CQ_LDB_INFL_LIM_RST); + + HQM_CSR_WR(hw, + HQM_LSP_CQ2PRIOV(port->id.phys_id), + HQM_LSP_CQ2PRIOV_RST); + + HQM_CSR_WR(hw, + HQM_LSP_CQ_LDB_TOT_SCH_CNT_CTRL(port->id.phys_id), + HQM_LSP_CQ_LDB_TOT_SCH_CNT_CTRL_RST); + + HQM_CSR_WR(hw, + HQM_LSP_CQ_LDB_TKN_DEPTH_SEL(port->id.phys_id), + HQM_LSP_CQ_LDB_TKN_DEPTH_SEL_RST); + + HQM_CSR_WR(hw, + HQM_CHP_LDB_CQ_TKN_DEPTH_SEL(port->id.phys_id), + HQM_CHP_LDB_CQ_TKN_DEPTH_SEL_RST); + + HQM_CSR_WR(hw, + HQM_LSP_CQ_LDB_DSBL(port->id.phys_id), + HQM_LSP_CQ_LDB_DSBL_RST); + + HQM_CSR_WR(hw, + HQM_SYS_LDB_CQ2VF_PF(port->id.phys_id), + HQM_SYS_LDB_CQ2VF_PF_RST); + + HQM_CSR_WR(hw, + HQM_SYS_LDB_PP2VF_PF(port->id.phys_id), + HQM_SYS_LDB_PP2VF_PF_RST); + + HQM_CSR_WR(hw, + HQM_SYS_LDB_CQ_ADDR_L(port->id.phys_id), + HQM_SYS_LDB_CQ_ADDR_L_RST); + + HQM_CSR_WR(hw, + HQM_SYS_LDB_CQ_ADDR_U(port->id.phys_id), + HQM_SYS_LDB_CQ_ADDR_U_RST); + + HQM_CSR_WR(hw, + HQM_SYS_LDB_PP_ADDR_L(port->id.phys_id), + HQM_SYS_LDB_PP_ADDR_L_RST); + + HQM_CSR_WR(hw, + HQM_SYS_LDB_PP_ADDR_U(port->id.phys_id), + HQM_SYS_LDB_PP_ADDR_U_RST); + + HQM_CSR_WR(hw, + HQM_SYS_LDB_PP_V(port->id.phys_id), + HQM_SYS_LDB_PP_V_RST); + + HQM_CSR_WR(hw, + HQM_SYS_LDB_PP2VAS(port->id.phys_id), + HQM_SYS_LDB_PP2VAS_RST); + + HQM_CSR_WR(hw, + HQM_SYS_LDB_CQ_ISR(port->id.phys_id), + HQM_SYS_LDB_CQ_ISR_RST); + + HQM_CSR_WR(hw, + HQM_SYS_WBUF_LDB_FLAGS(port->id.phys_id), + HQM_SYS_WBUF_LDB_FLAGS_RST); +} + +static void hqm_domain_reset_ldb_port_registers(struct hqm_hw *hw, + struct hqm_domain *domain) +{ + struct hqm_list_entry *iter __attribute__((unused)); + struct hqm_ldb_port *port; + + HQM_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) + __hqm_domain_reset_ldb_port_registers(hw, port); +} + +static void __hqm_domain_reset_dir_port_registers(struct hqm_hw *hw, + struct hqm_dir_pq_pair *port) +{ + union hqm_chp_dir_pp_state_reset r0 = { {0} }; + + HQM_CSR_WR(hw, + HQM_CHP_DIR_PP_CRD_REQ_STATE(port->id.phys_id), + HQM_CHP_DIR_PP_CRD_REQ_STATE_RST); + + /* Reset the port's load-balanced and directed credit state */ + r0.field.dir_type = 0; + r0.field.reset_pp_state = 1; + + HQM_CSR_WR(hw, + HQM_CHP_DIR_PP_STATE_RESET(port->id.phys_id), + r0.val); + + r0.field.dir_type = 1; + r0.field.reset_pp_state = 1; + + HQM_CSR_WR(hw, + HQM_CHP_DIR_PP_STATE_RESET(port->id.phys_id), + r0.val); + + HQM_CSR_WR(hw, + HQM_CHP_DIR_PP_DIR_PUSH_PTR(port->id.phys_id), + HQM_CHP_DIR_PP_DIR_PUSH_PTR_RST); + + HQM_CSR_WR(hw, + HQM_CHP_DIR_PP_LDB_PUSH_PTR(port->id.phys_id), + HQM_CHP_DIR_PP_LDB_PUSH_PTR_RST); + + HQM_CSR_WR(hw, + HQM_CHP_DIR_PP_LDB_MIN_CRD_QNT(port->id.phys_id), + HQM_CHP_DIR_PP_LDB_MIN_CRD_QNT_RST); + + HQM_CSR_WR(hw, + HQM_CHP_DIR_PP_LDB_CRD_LWM(port->id.phys_id), + HQM_CHP_DIR_PP_LDB_CRD_LWM_RST); + + HQM_CSR_WR(hw, + HQM_CHP_DIR_PP_LDB_CRD_HWM(port->id.phys_id), + HQM_CHP_DIR_PP_LDB_CRD_HWM_RST); + + HQM_CSR_WR(hw, + HQM_CHP_DIR_LDB_PP2POOL(port->id.phys_id), + HQM_CHP_DIR_LDB_PP2POOL_RST); + + HQM_CSR_WR(hw, + HQM_CHP_DIR_PP_DIR_MIN_CRD_QNT(port->id.phys_id), + HQM_CHP_DIR_PP_DIR_MIN_CRD_QNT_RST); + + HQM_CSR_WR(hw, + HQM_CHP_DIR_PP_DIR_CRD_LWM(port->id.phys_id), + HQM_CHP_DIR_PP_DIR_CRD_LWM_RST); + + HQM_CSR_WR(hw, + HQM_CHP_DIR_PP_DIR_CRD_HWM(port->id.phys_id), + HQM_CHP_DIR_PP_DIR_CRD_HWM_RST); + + HQM_CSR_WR(hw, + HQM_CHP_DIR_DIR_PP2POOL(port->id.phys_id), + HQM_CHP_DIR_DIR_PP2POOL_RST); + + HQM_CSR_WR(hw, + HQM_SYS_DIR_PP2LDBPOOL(port->id.phys_id), + HQM_SYS_DIR_PP2LDBPOOL_RST); + + HQM_CSR_WR(hw, + HQM_SYS_DIR_PP2DIRPOOL(port->id.phys_id), + HQM_SYS_DIR_PP2DIRPOOL_RST); + + HQM_CSR_WR(hw, + HQM_CHP_DIR_CQ_WPTR(port->id.phys_id), + HQM_CHP_DIR_CQ_WPTR_RST); + + HQM_CSR_WR(hw, + HQM_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI(port->id.phys_id), + HQM_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI_RST); + + HQM_CSR_WR(hw, + HQM_CHP_DIR_CQ_TKN_DEPTH_SEL(port->id.phys_id), + HQM_CHP_DIR_CQ_TKN_DEPTH_SEL_RST); + + HQM_CSR_WR(hw, + HQM_LSP_CQ_DIR_DSBL(port->id.phys_id), + HQM_LSP_CQ_DIR_DSBL_RST); + + HQM_CSR_WR(hw, + HQM_CHP_DIR_CQ_WPTR(port->id.phys_id), + HQM_CHP_DIR_CQ_WPTR_RST); + + HQM_CSR_WR(hw, + HQM_CHP_DIR_CQ_INT_DEPTH_THRSH(port->id.phys_id), + HQM_CHP_DIR_CQ_INT_DEPTH_THRSH_RST); + + HQM_CSR_WR(hw, + HQM_CHP_DIR_CQ_TMR_THRESHOLD(port->id.phys_id), + HQM_CHP_DIR_CQ_TMR_THRESHOLD_RST); + + HQM_CSR_WR(hw, + HQM_CHP_DIR_CQ_INT_ENB(port->id.phys_id), + HQM_CHP_DIR_CQ_INT_ENB_RST); + + HQM_CSR_WR(hw, + HQM_SYS_DIR_CQ2VF_PF(port->id.phys_id), + HQM_SYS_DIR_CQ2VF_PF_RST); + + HQM_CSR_WR(hw, + HQM_SYS_DIR_PP2VF_PF(port->id.phys_id), + HQM_SYS_DIR_PP2VF_PF_RST); + + HQM_CSR_WR(hw, + HQM_SYS_DIR_CQ_ADDR_L(port->id.phys_id), + HQM_SYS_DIR_CQ_ADDR_L_RST); + + HQM_CSR_WR(hw, + HQM_SYS_DIR_CQ_ADDR_U(port->id.phys_id), + HQM_SYS_DIR_CQ_ADDR_U_RST); + + HQM_CSR_WR(hw, + HQM_SYS_DIR_PP_ADDR_L(port->id.phys_id), + HQM_SYS_DIR_PP_ADDR_L_RST); + + HQM_CSR_WR(hw, + HQM_SYS_DIR_PP_ADDR_U(port->id.phys_id), + HQM_SYS_DIR_PP_ADDR_U_RST); + + HQM_CSR_WR(hw, + HQM_SYS_DIR_PP_V(port->id.phys_id), + HQM_SYS_DIR_PP_V_RST); + + HQM_CSR_WR(hw, + HQM_SYS_DIR_PP2VAS(port->id.phys_id), + HQM_SYS_DIR_PP2VAS_RST); + + HQM_CSR_WR(hw, + HQM_SYS_DIR_CQ_ISR(port->id.phys_id), + HQM_SYS_DIR_CQ_ISR_RST); + + HQM_CSR_WR(hw, + HQM_SYS_WBUF_DIR_FLAGS(port->id.phys_id), + HQM_SYS_WBUF_DIR_FLAGS_RST); +} + +static void hqm_domain_reset_dir_port_registers(struct hqm_hw *hw, + struct hqm_domain *domain) +{ + struct hqm_list_entry *iter __attribute__((unused)); + struct hqm_dir_pq_pair *port; + + HQM_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) + __hqm_domain_reset_dir_port_registers(hw, port); +} + +static void hqm_domain_reset_ldb_queue_registers(struct hqm_hw *hw, + struct hqm_domain *domain) +{ + struct hqm_list_entry *iter __attribute__((unused)); + struct hqm_ldb_queue *queue; + + HQM_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) { + HQM_CSR_WR(hw, + HQM_AQED_PIPE_FL_LIM(queue->id.phys_id), + HQM_AQED_PIPE_FL_LIM_RST); + + HQM_CSR_WR(hw, + HQM_AQED_PIPE_FL_BASE(queue->id.phys_id), + HQM_AQED_PIPE_FL_BASE_RST); + + HQM_CSR_WR(hw, + HQM_AQED_PIPE_FL_POP_PTR(queue->id.phys_id), + HQM_AQED_PIPE_FL_POP_PTR_RST); + + HQM_CSR_WR(hw, + HQM_AQED_PIPE_FL_PUSH_PTR(queue->id.phys_id), + HQM_AQED_PIPE_FL_PUSH_PTR_RST); + + HQM_CSR_WR(hw, + HQM_AQED_PIPE_QID_FID_LIM(queue->id.phys_id), + HQM_AQED_PIPE_QID_FID_LIM_RST); + + HQM_CSR_WR(hw, + HQM_LSP_QID_AQED_ACTIVE_LIM(queue->id.phys_id), + HQM_LSP_QID_AQED_ACTIVE_LIM_RST); + + HQM_CSR_WR(hw, + HQM_LSP_QID_LDB_INFL_LIM(queue->id.phys_id), + HQM_LSP_QID_LDB_INFL_LIM_RST); + + HQM_CSR_WR(hw, + HQM_SYS_LDB_QID_V(queue->id.phys_id), + HQM_SYS_LDB_QID_V_RST); + + HQM_CSR_WR(hw, + HQM_SYS_LDB_QID_V(queue->id.phys_id), + HQM_SYS_LDB_QID_V_RST); + + HQM_CSR_WR(hw, + HQM_CHP_ORD_QID_SN(queue->id.phys_id), + HQM_CHP_ORD_QID_SN_RST); + + HQM_CSR_WR(hw, + HQM_CHP_ORD_QID_SN_MAP(queue->id.phys_id), + HQM_CHP_ORD_QID_SN_MAP_RST); + + HQM_CSR_WR(hw, + HQM_RO_PIPE_QID2GRPSLT(queue->id.phys_id), + HQM_RO_PIPE_QID2GRPSLT_RST); + } +} + +static void hqm_domain_reset_dir_queue_registers(struct hqm_hw *hw, + struct hqm_domain *domain) +{ + struct hqm_list_entry *iter __attribute__((unused)); + struct hqm_dir_pq_pair *queue; + + HQM_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) { + HQM_CSR_WR(hw, + HQM_SYS_DIR_QID_V(queue->id.phys_id), + HQM_SYS_DIR_QID_V_RST); + } +} + +static void hqm_domain_reset_ldb_pool_registers(struct hqm_hw *hw, + struct hqm_domain *domain) +{ + struct hqm_list_entry *iter __attribute__((unused)); + struct hqm_credit_pool *pool; + + HQM_DOM_LIST_FOR(domain->used_ldb_credit_pools, pool, iter) { + HQM_CSR_WR(hw, + HQM_CHP_LDB_POOL_CRD_LIM(pool->id.phys_id), + HQM_CHP_LDB_POOL_CRD_LIM_RST); + + HQM_CSR_WR(hw, + HQM_CHP_LDB_POOL_CRD_CNT(pool->id.phys_id), + HQM_CHP_LDB_POOL_CRD_CNT_RST); + + HQM_CSR_WR(hw, + HQM_CHP_QED_FL_BASE(pool->id.phys_id), + HQM_CHP_QED_FL_BASE_RST); + + HQM_CSR_WR(hw, + HQM_CHP_QED_FL_LIM(pool->id.phys_id), + HQM_CHP_QED_FL_LIM_RST); + + HQM_CSR_WR(hw, + HQM_CHP_QED_FL_PUSH_PTR(pool->id.phys_id), + HQM_CHP_QED_FL_PUSH_PTR_RST); + + HQM_CSR_WR(hw, + HQM_CHP_QED_FL_POP_PTR(pool->id.phys_id), + HQM_CHP_QED_FL_POP_PTR_RST); + } +} + +static void hqm_domain_reset_dir_pool_registers(struct hqm_hw *hw, + struct hqm_domain *domain) +{ + struct hqm_list_entry *iter __attribute__((unused)); + struct hqm_credit_pool *pool; + + HQM_DOM_LIST_FOR(domain->used_dir_credit_pools, pool, iter) { + HQM_CSR_WR(hw, + HQM_CHP_DIR_POOL_CRD_LIM(pool->id.phys_id), + HQM_CHP_DIR_POOL_CRD_LIM_RST); + + HQM_CSR_WR(hw, + HQM_CHP_DIR_POOL_CRD_CNT(pool->id.phys_id), + HQM_CHP_DIR_POOL_CRD_CNT_RST); + + HQM_CSR_WR(hw, + HQM_CHP_DQED_FL_BASE(pool->id.phys_id), + HQM_CHP_DQED_FL_BASE_RST); + + HQM_CSR_WR(hw, + HQM_CHP_DQED_FL_LIM(pool->id.phys_id), + HQM_CHP_DQED_FL_LIM_RST); + + HQM_CSR_WR(hw, + HQM_CHP_DQED_FL_PUSH_PTR(pool->id.phys_id), + HQM_CHP_DQED_FL_PUSH_PTR_RST); + + HQM_CSR_WR(hw, + HQM_CHP_DQED_FL_POP_PTR(pool->id.phys_id), + HQM_CHP_DQED_FL_POP_PTR_RST); + } +} + +static void hqm_domain_reset_registers(struct hqm_hw *hw, + struct hqm_domain *domain) +{ + hqm_domain_reset_ldb_port_registers(hw, domain); + + hqm_domain_reset_dir_port_registers(hw, domain); + + hqm_domain_reset_ldb_queue_registers(hw, domain); + + hqm_domain_reset_dir_queue_registers(hw, domain); + + hqm_domain_reset_ldb_pool_registers(hw, domain); + + hqm_domain_reset_dir_pool_registers(hw, domain); +} + +static int hqm_domain_drain_ldb_cqs(struct hqm_hw *hw, + struct hqm_domain *domain, + bool toggle_port) +{ + struct hqm_list_entry *iter __attribute__((unused)); + struct hqm_ldb_port *port; + int ret; + + /* If the domain hasn't been started, there's no traffic to drain */ + if (!domain->started) + return 0; + + HQM_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) { + if (toggle_port) + hqm_ldb_port_cq_disable(hw, port); + + ret = hqm_drain_ldb_cq(hw, port); + if (ret < 0) + return ret; + + if (toggle_port) + hqm_ldb_port_cq_enable(hw, port); + } + + return 0; +} + +static bool hqm_domain_mapped_queues_empty(struct hqm_hw *hw, + struct hqm_domain *domain) +{ + struct hqm_list_entry *iter __attribute__((unused)); + struct hqm_ldb_queue *queue; + + HQM_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) { + if (queue->num_mappings == 0) + continue; + + if (!hqm_ldb_queue_is_empty(hw, queue)) + return false; + } + + return true; +} + +static int hqm_domain_drain_mapped_queues(struct hqm_hw *hw, + struct hqm_domain *domain) +{ + int i, ret; + + /* If the domain hasn't been started, there's no traffic to drain */ + if (!domain->started) + return 0; + + if (domain->num_pending_removals > 0) { + HQM_BASE_ERR(hw, + "[%s()] Internal error: failed to unmap domain queues\n", + __func__); + return -EFAULT; + } + + for (i = 0; i < HQM_MAX_QID_EMPTY_CHECK_LOOPS; i++) { + ret = hqm_domain_drain_ldb_cqs(hw, domain, true); + if (ret < 0) + return ret; + + if (hqm_domain_mapped_queues_empty(hw, domain)) + break; + } + + if (i == HQM_MAX_QID_EMPTY_CHECK_LOOPS) { + HQM_BASE_ERR(hw, + "[%s()] Internal error: failed to empty queues\n", + __func__); + return -EFAULT; + } + + /* Drain the CQs one more time. For the queues to go empty, they would + * have scheduled one or more QEs. + */ + ret = hqm_domain_drain_ldb_cqs(hw, domain, true); + if (ret < 0) + return ret; + + return 0; +} + +static int hqm_domain_drain_unmapped_queue(struct hqm_hw *hw, + struct hqm_domain *domain, + struct hqm_ldb_queue *queue) +{ + struct hqm_ldb_port *port; + int ret; + + /* If a domain has LDB queues, it must have LDB ports */ + if (hqm_list_empty(&domain->used_ldb_ports)) { + HQM_BASE_ERR(hw, + "[%s()] Internal error: No configured LDB ports\n", + __func__); + return -EFAULT; + } + + port = HQM_DOM_LIST_HEAD(domain->used_ldb_ports, typeof(*port)); + + /* If necessary, free up a QID slot in this CQ */ + if (port->num_mappings == HQM_MAX_NUM_QIDS_PER_LDB_CQ) { + struct hqm_ldb_queue *mapped_queue; + + mapped_queue = &hw->rsrcs.ldb_queues[port->qid_map[0].qid]; + + ret = hqm_ldb_port_unmap_qid(hw, port, mapped_queue); + if (ret) + return ret; + } + + ret = hqm_ldb_port_map_qid_dynamic(hw, port, queue, 0); + if (ret) + return ret; + + return hqm_domain_drain_mapped_queues(hw, domain); +} + +static int hqm_domain_drain_unmapped_queues(struct hqm_hw *hw, + struct hqm_domain *domain) +{ + struct hqm_list_entry *iter __attribute__((unused)); + struct hqm_ldb_queue *queue; + int ret; + + /* If the domain hasn't been started, there's no traffic to drain */ + if (!domain->started) + return 0; + + HQM_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) { + if (queue->num_mappings != 0 || + hqm_ldb_queue_is_empty(hw, queue)) + continue; + + ret = hqm_domain_drain_unmapped_queue(hw, domain, queue); + if (ret) + return ret; + } + + return 0; +} + +static int hqm_drain_dir_cq(struct hqm_hw *hw, struct hqm_dir_pq_pair *port) +{ + unsigned int port_id = port->id.phys_id; + u32 cnt; + + /* Return any outstanding tokens */ + cnt = hqm_dir_cq_token_count(hw, port); + + if (cnt != 0) { + struct hqm_hcw hcw_mem[8], *hcw; + void __iomem *pp_addr; + + pp_addr = os_map_producer_port(hw, port_id, true); + + /* Point hcw to a 64B-aligned location */ + hcw = (struct hqm_hcw *)((uintptr_t)&hcw_mem[4] & ~0x3F); + + /* Program the first HCW for a batch token return and + * the rest as NOOPS + */ + memset(hcw, 0, 4 * sizeof(*hcw)); + hcw->cq_token = 1; + hcw->lock_id = cnt - 1; + + os_enqueue_four_hcws(hw, hcw, pp_addr); + + os_fence_hcw(hw, pp_addr); + + os_unmap_producer_port(hw, pp_addr); + } + + return 0; +} + +static int hqm_domain_drain_dir_cqs(struct hqm_hw *hw, + struct hqm_domain *domain, + bool toggle_port) +{ + struct hqm_list_entry *iter __attribute__((unused)); + struct hqm_dir_pq_pair *port; + int ret; + + HQM_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) { + /* Can't drain a port if it's not configured, and there's + * nothing to drain if its queue is unconfigured. + */ + if (!port->port_configured || !port->queue_configured) + continue; + + if (toggle_port) + hqm_dir_port_cq_disable(hw, port); + + ret = hqm_drain_dir_cq(hw, port); + if (ret < 0) + return ret; + + if (toggle_port) + hqm_dir_port_cq_enable(hw, port); + } + + return 0; +} + +static bool hqm_domain_dir_queues_empty(struct hqm_hw *hw, + struct hqm_domain *domain) +{ + struct hqm_list_entry *iter __attribute__((unused)); + struct hqm_dir_pq_pair *queue; + + HQM_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) { + if (!hqm_dir_queue_is_empty(hw, queue)) + return false; + } + + return true; +} + +static int hqm_domain_drain_dir_queues(struct hqm_hw *hw, + struct hqm_domain *domain) +{ + int i, ret; + + /* If the domain hasn't been started, there's no traffic to drain */ + if (!domain->started) + return 0; + + for (i = 0; i < HQM_MAX_QID_EMPTY_CHECK_LOOPS; i++) { + ret = hqm_domain_drain_dir_cqs(hw, domain, true); + if (ret < 0) + return ret; + + if (hqm_domain_dir_queues_empty(hw, domain)) + break; + } + + if (i == HQM_MAX_QID_EMPTY_CHECK_LOOPS) { + HQM_BASE_ERR(hw, + "[%s()] Internal error: failed to empty queues\n", + __func__); + return -EFAULT; + } + + /* Drain the CQs one more time. For the queues to go empty, they would + * have scheduled one or more QEs. + */ + ret = hqm_domain_drain_dir_cqs(hw, domain, true); + if (ret < 0) + return ret; + + return 0; +} + +static void hqm_domain_disable_dir_producer_ports(struct hqm_hw *hw, + struct hqm_domain *domain) +{ + struct hqm_list_entry *iter __attribute__((unused)); + struct hqm_dir_pq_pair *port; + union hqm_sys_dir_pp_v r1; + + r1.field.pp_v = 0; + + HQM_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) + HQM_CSR_WR(hw, + HQM_SYS_DIR_PP_V(port->id.phys_id), + r1.val); +} + +static void hqm_domain_disable_ldb_producer_ports(struct hqm_hw *hw, + struct hqm_domain *domain) +{ + struct hqm_list_entry *iter __attribute__((unused)); + union hqm_sys_ldb_pp_v r1; + struct hqm_ldb_port *port; + + r1.field.pp_v = 0; + + HQM_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) { + HQM_CSR_WR(hw, + HQM_SYS_LDB_PP_V(port->id.phys_id), + r1.val); + + hw->pf.num_enabled_ldb_ports--; + } +} + +static void hqm_domain_disable_dir_vpps(struct hqm_hw *hw, + struct hqm_domain *domain, + unsigned int vf_id) +{ + struct hqm_list_entry *iter __attribute__((unused)); + union hqm_sys_vf_dir_vpp_v r1; + struct hqm_dir_pq_pair *port; + + r1.field.vpp_v = 0; + + HQM_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) { + unsigned int offs; + + offs = vf_id * HQM_MAX_NUM_DIR_PORTS + port->id.virt_id; + + HQM_CSR_WR(hw, HQM_SYS_VF_DIR_VPP_V(offs), r1.val); + } +} + +static void hqm_domain_disable_ldb_vpps(struct hqm_hw *hw, + struct hqm_domain *domain, + unsigned int vf_id) +{ + struct hqm_list_entry *iter __attribute__((unused)); + union hqm_sys_vf_ldb_vpp_v r1; + struct hqm_ldb_port *port; + + r1.field.vpp_v = 0; + + HQM_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) { + unsigned int offs; + + offs = vf_id * HQM_MAX_NUM_LDB_PORTS + port->id.virt_id; + + HQM_CSR_WR(hw, HQM_SYS_VF_LDB_VPP_V(offs), r1.val); + } +} + +static void hqm_domain_disable_dir_pools(struct hqm_hw *hw, + struct hqm_domain *domain) +{ + struct hqm_list_entry *iter __attribute__((unused)); + union hqm_sys_dir_pool_enbld r0 = { {0} }; + struct hqm_credit_pool *pool; + + HQM_DOM_LIST_FOR(domain->used_dir_credit_pools, pool, iter) + HQM_CSR_WR(hw, + HQM_SYS_DIR_POOL_ENBLD(pool->id.phys_id), + r0.val); +} + +static void hqm_domain_disable_ldb_pools(struct hqm_hw *hw, + struct hqm_domain *domain) +{ + struct hqm_list_entry *iter __attribute__((unused)); + union hqm_sys_ldb_pool_enbld r0 = { {0} }; + struct hqm_credit_pool *pool; + + HQM_DOM_LIST_FOR(domain->used_ldb_credit_pools, pool, iter) + HQM_CSR_WR(hw, + HQM_SYS_LDB_POOL_ENBLD(pool->id.phys_id), + r0.val); +} + +static void hqm_domain_disable_ldb_seq_checks(struct hqm_hw *hw, + struct hqm_domain *domain) +{ + struct hqm_list_entry *iter __attribute__((unused)); + union hqm_chp_sn_chk_enbl r1; + struct hqm_ldb_port *port; + + r1.field.en = 0; + + HQM_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) + HQM_CSR_WR(hw, + HQM_CHP_SN_CHK_ENBL(port->id.phys_id), + r1.val); +} + +static void hqm_domain_disable_ldb_port_crd_updates(struct hqm_hw *hw, + struct hqm_domain *domain) +{ + struct hqm_list_entry *iter __attribute__((unused)); + union hqm_chp_ldb_pp_crd_req_state r0; + struct hqm_ldb_port *port; + + r0.field.no_pp_credit_update = 1; + + HQM_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) + HQM_CSR_WR(hw, + HQM_CHP_LDB_PP_CRD_REQ_STATE(port->id.phys_id), + r0.val); +} + +static void hqm_domain_disable_ldb_port_interrupts(struct hqm_hw *hw, + struct hqm_domain *domain) +{ + struct hqm_list_entry *iter __attribute__((unused)); + union hqm_chp_ldb_cq_int_enb r0 = { {0} }; + union hqm_chp_ldb_cq_wd_enb r1 = { {0} }; + struct hqm_ldb_port *port; + + r0.field.en_tim = 0; + r0.field.en_depth = 0; + + r1.field.wd_enable = 0; + + HQM_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) { + HQM_CSR_WR(hw, + HQM_CHP_LDB_CQ_INT_ENB(port->id.phys_id), + r0.val); + + HQM_CSR_WR(hw, + HQM_CHP_LDB_CQ_WD_ENB(port->id.phys_id), + r1.val); + } +} + +static void hqm_domain_disable_dir_port_interrupts(struct hqm_hw *hw, + struct hqm_domain *domain) +{ + struct hqm_list_entry *iter __attribute__((unused)); + union hqm_chp_dir_cq_int_enb r0 = { {0} }; + union hqm_chp_dir_cq_wd_enb r1 = { {0} }; + struct hqm_dir_pq_pair *port; + + r0.field.en_tim = 0; + r0.field.en_depth = 0; + + r1.field.wd_enable = 0; + + HQM_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) { + HQM_CSR_WR(hw, + HQM_CHP_DIR_CQ_INT_ENB(port->id.phys_id), + r0.val); + + HQM_CSR_WR(hw, + HQM_CHP_DIR_CQ_WD_ENB(port->id.phys_id), + r1.val); + } +} + +static void hqm_domain_disable_dir_port_crd_updates(struct hqm_hw *hw, + struct hqm_domain *domain) +{ + struct hqm_list_entry *iter __attribute__((unused)); + union hqm_chp_dir_pp_crd_req_state r0; + struct hqm_dir_pq_pair *port; + + r0.field.no_pp_credit_update = 1; + + HQM_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) + HQM_CSR_WR(hw, + HQM_CHP_DIR_PP_CRD_REQ_STATE(port->id.phys_id), + r0.val); +} + +static void hqm_domain_disable_ldb_queue_write_perms(struct hqm_hw *hw, + struct hqm_domain *domain) +{ + int domain_offset = domain->id.phys_id * HQM_MAX_NUM_LDB_QUEUES; + struct hqm_list_entry *iter __attribute__((unused)); + union hqm_sys_ldb_vasqid_v r0; + struct hqm_ldb_queue *queue; + + r0.field.vasqid_v = 0; + + HQM_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) { + int idx = domain_offset + queue->id.phys_id; + + HQM_CSR_WR(hw, HQM_SYS_LDB_VASQID_V(idx), r0.val); + } +} + +static void hqm_domain_disable_dir_queue_write_perms(struct hqm_hw *hw, + struct hqm_domain *domain) +{ + int domain_offset = domain->id.phys_id * HQM_MAX_NUM_DIR_PORTS; + struct hqm_list_entry *iter __attribute__((unused)); + union hqm_sys_dir_vasqid_v r0; + struct hqm_dir_pq_pair *port; + + r0.field.vasqid_v = 0; + + HQM_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) { + int idx = domain_offset + port->id.phys_id; + + HQM_CSR_WR(hw, HQM_SYS_DIR_VASQID_V(idx), r0.val); + } +} + +static void hqm_domain_disable_dir_cqs(struct hqm_hw *hw, + struct hqm_domain *domain) +{ + struct hqm_list_entry *iter __attribute__((unused)); + struct hqm_dir_pq_pair *port; + + HQM_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) { + port->enabled = false; + + hqm_dir_port_cq_disable(hw, port); + } +} + +static void hqm_domain_disable_ldb_cqs(struct hqm_hw *hw, + struct hqm_domain *domain) +{ + struct hqm_list_entry *iter __attribute__((unused)); + struct hqm_ldb_port *port; + + HQM_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) { + port->enabled = false; + + hqm_ldb_port_cq_disable(hw, port); + } +} + +static void hqm_domain_enable_ldb_cqs(struct hqm_hw *hw, + struct hqm_domain *domain) +{ + struct hqm_list_entry *iter __attribute__((unused)); + struct hqm_ldb_port *port; + + HQM_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) { + port->enabled = true; + + hqm_ldb_port_cq_enable(hw, port); + } +} + +static int hqm_domain_wait_for_ldb_pool_refill(struct hqm_hw *hw, + struct hqm_domain *domain) +{ + struct hqm_list_entry *iter __attribute__((unused)); + struct hqm_credit_pool *pool; + + /* Confirm that all credits are returned to the domain's credit pools */ + HQM_DOM_LIST_FOR(domain->used_ldb_credit_pools, pool, iter) { + union hqm_chp_qed_fl_push_ptr r0; + union hqm_chp_qed_fl_pop_ptr r1; + unsigned long pop_offs, push_offs; + int i; + + push_offs = HQM_CHP_QED_FL_PUSH_PTR(pool->id.phys_id); + pop_offs = HQM_CHP_QED_FL_POP_PTR(pool->id.phys_id); + + for (i = 0; i < HQM_MAX_QID_EMPTY_CHECK_LOOPS; i++) { + r0.val = HQM_CSR_RD(hw, push_offs); + + r1.val = HQM_CSR_RD(hw, pop_offs); + + /* Break early if the freelist is replenished */ + if (r1.field.pop_ptr == r0.field.push_ptr && + r1.field.generation != r0.field.generation) { + break; + } + } + + /* Error if the freelist is not full */ + if (r1.field.pop_ptr != r0.field.push_ptr || + r1.field.generation == r0.field.generation) { + return -EFAULT; + } + } + + return 0; +} + +static int hqm_domain_wait_for_dir_pool_refill(struct hqm_hw *hw, + struct hqm_domain *domain) +{ + struct hqm_list_entry *iter __attribute__((unused)); + struct hqm_credit_pool *pool; + + /* Confirm that all credits are returned to the domain's credit pools */ + HQM_DOM_LIST_FOR(domain->used_dir_credit_pools, pool, iter) { + union hqm_chp_dqed_fl_push_ptr r0; + union hqm_chp_dqed_fl_pop_ptr r1; + unsigned long pop_offs, push_offs; + int i; + + push_offs = HQM_CHP_DQED_FL_PUSH_PTR(pool->id.phys_id); + pop_offs = HQM_CHP_DQED_FL_POP_PTR(pool->id.phys_id); + + for (i = 0; i < HQM_MAX_QID_EMPTY_CHECK_LOOPS; i++) { + r0.val = HQM_CSR_RD(hw, push_offs); + + r1.val = HQM_CSR_RD(hw, pop_offs); + + /* Break early if the freelist is replenished */ + if (r1.field.pop_ptr == r0.field.push_ptr && + r1.field.generation != r0.field.generation) { + break; + } + } + + /* Error if the freelist is not full */ + if (r1.field.pop_ptr != r0.field.push_ptr || + r1.field.generation == r0.field.generation) { + return -EFAULT; + } + } + + return 0; +} + +static void hqm_log_reset_domain(struct hqm_hw *hw, + u32 domain_id, + bool vf_request, + unsigned int vf_id) +{ + HQM_BASE_INFO(hw, "HQM reset domain:\n"); + if (vf_request) + HQM_BASE_INFO(hw, "(Request from VF %d)\n", vf_id); + HQM_BASE_INFO(hw, "\tDomain ID: %d\n", domain_id); +} + +static int __hqm_reset_domain(struct hqm_hw *hw, + u32 domain_id, + bool vf_request, + unsigned int vf_id) +{ + struct hqm_domain *domain; + int ret; + + hqm_log_reset_domain(hw, domain_id, vf_request, vf_id); + + domain = hqm_get_domain_from_id(hw, domain_id, vf_request, vf_id); + + if (!domain || !domain->configured) + return -EINVAL; + + if (vf_request) { + hqm_domain_disable_dir_vpps(hw, domain, vf_id); + + hqm_domain_disable_ldb_vpps(hw, domain, vf_id); + } + + /* For each queue owned by this domain, disable its write permissions to + * cause any traffic sent to it to be dropped. Well-behaved software + * should not be sending QEs at this point. + */ + hqm_domain_disable_dir_queue_write_perms(hw, domain); + + hqm_domain_disable_ldb_queue_write_perms(hw, domain); + + /* Disable credit updates and turn off completion tracking on all the + * domain's PPs. + */ + hqm_domain_disable_dir_port_crd_updates(hw, domain); + + hqm_domain_disable_ldb_port_crd_updates(hw, domain); + + hqm_domain_disable_dir_port_interrupts(hw, domain); + + hqm_domain_disable_ldb_port_interrupts(hw, domain); + + hqm_domain_disable_ldb_seq_checks(hw, domain); + + /* Disable the LDB CQs and drain them in order to complete the map and + * unmap procedures, which require zero CQ inflights and zero QID + * inflights respectively. + */ + hqm_domain_disable_ldb_cqs(hw, domain); + + ret = hqm_domain_drain_ldb_cqs(hw, domain, false); + if (ret < 0) + return ret; + + ret = hqm_domain_wait_for_ldb_cqs_to_empty(hw, domain); + if (ret < 0) + return ret; + + ret = hqm_domain_finish_unmap_qid_procedures(hw, domain); + if (ret < 0) + return ret; + + ret = hqm_domain_finish_map_qid_procedures(hw, domain); + if (ret < 0) + return ret; + + /* Re-enable the CQs in order to drain the mapped queues. */ + hqm_domain_enable_ldb_cqs(hw, domain); + + ret = hqm_domain_drain_mapped_queues(hw, domain); + if (ret < 0) + return ret; + + ret = hqm_domain_drain_unmapped_queues(hw, domain); + if (ret < 0) + return ret; + + ret = hqm_domain_wait_for_ldb_pool_refill(hw, domain); + if (ret) { + HQM_BASE_ERR(hw, + "[%s()] Internal error: LDB credits failed to refill\n", + __func__); + return ret; + } + + /* Done draining LDB QEs, so disable the CQs. */ + hqm_domain_disable_ldb_cqs(hw, domain); + + /* Directed queues are reset in hqm_domain_reset_hw_resources(), but + * that process doesn't decrement the directed queue size counters used + * by SMON for its average DQED depth measurement. So, we manually drain + * the directed queues here. + */ + hqm_domain_drain_dir_queues(hw, domain); + + ret = hqm_domain_wait_for_dir_pool_refill(hw, domain); + if (ret) { + HQM_BASE_ERR(hw, + "[%s()] Internal error: DIR credits failed to refill\n", + __func__); + return ret; + } + + /* Done draining DIR QEs, so disable the CQs. */ + hqm_domain_disable_dir_cqs(hw, domain); + + hqm_domain_disable_dir_producer_ports(hw, domain); + + hqm_domain_disable_ldb_producer_ports(hw, domain); + + hqm_domain_disable_dir_pools(hw, domain); + + hqm_domain_disable_ldb_pools(hw, domain); + + /* Reset the QID, credit pool, and CQ hardware. + * + * Note: HQM 1.0 A0 h/w does not disarm CQ interrupts during VAS reset. + * A spurious interrupt can occur on subsequent use of a reset CQ. + */ + ret = hqm_domain_reset_hw_resources(hw, domain); + if (ret) + return ret; + + ret = hqm_domain_verify_reset_success(hw, domain); + if (ret) + return ret; + + hqm_domain_reset_registers(hw, domain); + + /* Hardware reset complete. Reset the domain's software state */ + ret = hqm_domain_reset_software_state(hw, domain); + if (ret) + return ret; + + return 0; +} + +/** + * hqm_reset_domain() - Reset an HQM scheduling domain and its associated + * hardware resources. + * @hw: Contains the current state of the HQM hardware. + * @args: User-provided arguments. + * @resp: Response to user. + * + * Note: User software *must* stop sending to this domain's producer ports + * before invoking this function, otherwise undefined behavior will result. + * + * Return: returns < 0 on error, 0 otherwise. + */ +int hqm_reset_domain(struct hqm_hw *hw, + u32 domain_id, + bool vf_request, + unsigned int vf_id) +{ + return __hqm_reset_domain(hw, domain_id, vf_request, vf_id); +} + +int hqm_reset_vf(struct hqm_hw *hw, unsigned int vf_id) +{ + struct hqm_domain *domain, *next __attribute__((unused)); + struct hqm_list_entry *it1 __attribute__((unused)); + struct hqm_list_entry *it2 __attribute__((unused)); + struct hqm_function_resources *rsrcs; + + if (vf_id >= HQM_MAX_NUM_VFS) { + HQM_BASE_ERR(hw, "[%s()] Internal error: invalid VF ID %d\n", + __func__, vf_id); + return -EFAULT; + } + + rsrcs = &hw->vf[vf_id]; + + HQM_FUNC_LIST_FOR_SAFE(rsrcs->used_domains, domain, next, it1, it2) { + int ret = __hqm_reset_domain(hw, + domain->id.virt_id, + true, + vf_id); + if (ret) + return ret; + } + + return 0; +} + +int hqm_ldb_port_owned_by_domain(struct hqm_hw *hw, + u32 domain_id, + u32 port_id, + bool vf_request, + unsigned int vf_id) +{ + struct hqm_ldb_port *port; + struct hqm_domain *domain; + + if (vf_request && vf_id >= HQM_MAX_NUM_VFS) + return -1; + + domain = hqm_get_domain_from_id(hw, domain_id, vf_request, vf_id); + + if (!domain || !domain->configured) + return -EINVAL; + + port = hqm_get_domain_ldb_port(port_id, vf_request, domain); + + if (!port) + return -EINVAL; + + return port->domain_id.phys_id == domain->id.phys_id; +} + +int hqm_dir_port_owned_by_domain(struct hqm_hw *hw, + u32 domain_id, + u32 port_id, + bool vf_request, + unsigned int vf_id) +{ + struct hqm_dir_pq_pair *port; + struct hqm_domain *domain; + + if (vf_request && vf_id >= HQM_MAX_NUM_VFS) + return -1; + + domain = hqm_get_domain_from_id(hw, domain_id, vf_request, vf_id); + + if (!domain || !domain->configured) + return -EINVAL; + + port = hqm_get_domain_dir_pq(port_id, vf_request, domain); + + if (!port) + return -EINVAL; + + return port->domain_id.phys_id == domain->id.phys_id; +} + +static inline bool hqm_ldb_port_owned_by_vf(struct hqm_hw *hw, + u32 vf_id, + u32 port_id) +{ + return (hw->rsrcs.ldb_ports[port_id].id.vf_owned && + hw->rsrcs.ldb_ports[port_id].id.vf_id == vf_id); +} + +static inline bool hqm_dir_port_owned_by_vf(struct hqm_hw *hw, + u32 vf_id, + u32 port_id) +{ + return (hw->rsrcs.dir_pq_pairs[port_id].id.vf_owned && + hw->rsrcs.dir_pq_pairs[port_id].id.vf_id == vf_id); +} + +int hqm_hw_get_num_resources(struct hqm_hw *hw, + struct hqm_get_num_resources_args *arg, + bool vf_request, + unsigned int vf_id) +{ + struct hqm_function_resources *rsrcs; + struct hqm_bitmap *map; + + if (vf_request && vf_id >= HQM_MAX_NUM_VFS) + return -1; + + if (vf_request) + rsrcs = &hw->vf[vf_id]; + else + rsrcs = &hw->pf; + + arg->num_sched_domains = rsrcs->num_avail_domains; + + arg->num_ldb_queues = rsrcs->num_avail_ldb_queues; + + arg->num_ldb_ports = rsrcs->num_avail_ldb_ports; + + arg->num_dir_ports = rsrcs->num_avail_dir_pq_pairs; + + map = rsrcs->avail_aqed_freelist_entries; + + arg->num_atomic_inflights = hqm_bitmap_count(map); + + arg->max_contiguous_atomic_inflights = + hqm_bitmap_longest_set_range(map); + + map = rsrcs->avail_hist_list_entries; + + arg->num_hist_list_entries = hqm_bitmap_count(map); + + arg->max_contiguous_hist_list_entries = + hqm_bitmap_longest_set_range(map); + + map = rsrcs->avail_qed_freelist_entries; + + arg->num_ldb_credits = hqm_bitmap_count(map); + + arg->max_contiguous_ldb_credits = hqm_bitmap_longest_set_range(map); + + map = rsrcs->avail_dqed_freelist_entries; + + arg->num_dir_credits = hqm_bitmap_count(map); + + arg->max_contiguous_dir_credits = hqm_bitmap_longest_set_range(map); + + arg->num_ldb_credit_pools = rsrcs->num_avail_ldb_credit_pools; + + arg->num_dir_credit_pools = rsrcs->num_avail_dir_credit_pools; + + return 0; +} + +int hqm_hw_get_num_used_resources(struct hqm_hw *hw, + struct hqm_get_num_resources_args *arg, + bool vf_request, + unsigned int vf_id) +{ + struct hqm_list_entry *iter1 __attribute__((unused)); + struct hqm_list_entry *iter2 __attribute__((unused)); + struct hqm_function_resources *rsrcs; + struct hqm_domain *domain; + + if (vf_request && vf_id >= HQM_MAX_NUM_VFS) + return -1; + + rsrcs = (vf_request) ? &hw->vf[vf_id] : &hw->pf; + + memset(arg, 0, sizeof(struct hqm_get_num_resources_args)); + + HQM_FUNC_LIST_FOR(rsrcs->used_domains, domain, iter1) { + struct hqm_dir_pq_pair *dir_port; + struct hqm_ldb_port *ldb_port; + struct hqm_credit_pool *pool; + struct hqm_ldb_queue *queue; + + arg->num_sched_domains++; + + arg->num_atomic_inflights += + domain->aqed_freelist.bound - + domain->aqed_freelist.base; + + HQM_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter2) + arg->num_ldb_queues++; + HQM_DOM_LIST_FOR(domain->avail_ldb_queues, queue, iter2) + arg->num_ldb_queues++; + + HQM_DOM_LIST_FOR(domain->used_ldb_ports, ldb_port, iter2) + arg->num_ldb_ports++; + HQM_DOM_LIST_FOR(domain->avail_ldb_ports, ldb_port, iter2) + arg->num_ldb_ports++; + + HQM_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_port, iter2) + arg->num_dir_ports++; + HQM_DOM_LIST_FOR(domain->avail_dir_pq_pairs, dir_port, iter2) + arg->num_dir_ports++; + + arg->num_ldb_credits += + domain->qed_freelist.bound - + domain->qed_freelist.base; + + HQM_DOM_LIST_FOR(domain->avail_ldb_credit_pools, pool, iter2) + arg->num_ldb_credit_pools++; + HQM_DOM_LIST_FOR(domain->used_ldb_credit_pools, pool, iter2) { + arg->num_ldb_credit_pools++; + arg->num_ldb_credits += pool->total_credits; + } + + arg->num_dir_credits += + domain->dqed_freelist.bound - + domain->dqed_freelist.base; + + HQM_DOM_LIST_FOR(domain->avail_dir_credit_pools, pool, iter2) + arg->num_dir_credit_pools++; + HQM_DOM_LIST_FOR(domain->used_dir_credit_pools, pool, iter2) { + arg->num_dir_credit_pools++; + arg->num_dir_credits += pool->total_credits; + } + + arg->num_hist_list_entries += domain->total_hist_list_entries; + } + + return 0; +} + +static u64 hqm_atomic_read_perf_counter(struct hqm_hw *hw, + u32 low_offset, + u32 high_offset) +{ + u32 low, high, cmp; + + high = HQM_CSR_RD(hw, high_offset); + low = HQM_CSR_RD(hw, low_offset); + cmp = HQM_CSR_RD(hw, high_offset); + + /* Handle the wrap case */ + if (high != cmp) { + high = cmp; + low = HQM_CSR_RD(hw, low_offset); + } + + return ((((u64)high) << 32) | low); +} + +void hqm_read_sched_counts(struct hqm_hw *hw, + struct hqm_sched_counts *data, + bool vf_request, + unsigned int vf_id) +{ + u32 lo, hi; + int i, id; + + memset(data, 0, sizeof(*data)); + + lo = HQM_LSP_LDB_SCH_CNT_L; + hi = HQM_LSP_LDB_SCH_CNT_H; + + data->ldb_sched_count = hqm_atomic_read_perf_counter(hw, lo, hi); + + lo = HQM_LSP_DIR_SCH_CNT_L; + hi = HQM_LSP_DIR_SCH_CNT_H; + + data->dir_sched_count = hqm_atomic_read_perf_counter(hw, lo, hi); + + for (i = 0; i < HQM_MAX_NUM_LDB_PORTS; i++) { + if (vf_request && !hqm_ldb_port_owned_by_vf(hw, vf_id, i)) + continue; + + /* If this is a VF request, translate the port ID */ + if (vf_request) + id = hw->rsrcs.ldb_ports[i].id.virt_id; + else + id = i; + + lo = HQM_LSP_CQ_LDB_TOT_SCH_CNTL(i); + hi = HQM_LSP_CQ_LDB_TOT_SCH_CNTH(i); + + data->ldb_cq_sched_count[id] = + hqm_atomic_read_perf_counter(hw, lo, hi); + } + + for (i = 0; i < HQM_MAX_NUM_DIR_PORTS; i++) { + if (vf_request && !hqm_dir_port_owned_by_vf(hw, vf_id, i)) + continue; + + /* If this is a VF request, translate the port ID */ + if (vf_request) + id = hw->rsrcs.dir_pq_pairs[i].id.virt_id; + else + id = i; + + lo = HQM_LSP_CQ_DIR_TOT_SCH_CNTL(i); + hi = HQM_LSP_CQ_DIR_TOT_SCH_CNTH(i); + + data->dir_cq_sched_count[id] = + hqm_atomic_read_perf_counter(hw, lo, hi); + } +} + +static void hqm_collect_pm_grp_0(struct hqm_hw *hw, + union hqm_perf_metric_group_data *data) +{ + u32 cnt[2]; + + hqm_smon_read_sys_perf_counter(hw, SMON_MEASURE_CNT, cnt); + + data->group_0.hqm_iosf_to_sys_enq_count = cnt[0]; + data->group_0.hqm_sys_to_iosf_deq_count = cnt[1]; + + hqm_smon_read_chp_perf_counter(hw, SMON_MEASURE_CNT, cnt); + + data->group_0.hqm_sys_to_hqm_enq_count = cnt[0]; + data->group_0.hqm_hqm_to_sys_deq_count = cnt[1]; +} + +static void hqm_collect_pm_grp_1(struct hqm_hw *hw, + union hqm_perf_metric_group_data *data) +{ + u32 counter[2]; + + hqm_smon_read_chp_perf_counter(hw, + SMON_MEASURE_CNT, + counter); + + data->group_1.hqm_push_ptr_update_count = counter[0]; +} + +static void hqm_collect_chp_hcw_avg(struct hqm_hw *hw, + union hqm_perf_metric_group_data *data) +{ + u32 counter[2]; + + hqm_smon_read_chp_perf_counter(hw, + SMON_MEASURE_AVG, + counter); + + ((u32 *)data)[0] = counter[0]; +} + +static void hqm_collect_chp_hcw_count(struct hqm_hw *hw, + union hqm_perf_metric_group_data *data) +{ + u32 counter[2]; + + hqm_smon_read_chp_perf_counter(hw, + SMON_MEASURE_CNT, + counter); + + ((u32 *)data)[0] = counter[0]; + ((u32 *)data)[1] = counter[1]; +} + +void hqm_init_perf_metric_measurement(struct hqm_hw *hw, + u32 id, + u32 duration_us) +{ + union hqm_chp_ctrl_diag_02 r0; + u32 clks = duration_us * (HQM_HZ / 1000000); + enum hqm_chp_smon_component chp_component[3] = { + SMON_COMP_HIST_LIST, + SMON_COMP_QED, + SMON_COMP_DQED}; + enum hqm_chp_smon_hcw_type hcw_types[][2] = { + {SMON_HCW_NOOP, SMON_HCW_BAT_T}, + {SMON_HCW_COMP, SMON_HCW_COMP_T}, + {SMON_HCW_ENQ, SMON_HCW_ENQ_T}, + {SMON_HCW_RENQ, SMON_HCW_RENQ_T}, + {SMON_HCW_REL, SMON_HCW_REL}, + {SMON_HCW_FRAG, SMON_HCW_FRAG_T} + }; + + /* Control word must be initialized before using the CHP perf SMON */ + r0.field.control = 0xF; + + HQM_CSR_WR(hw, HQM_CHP_CTRL_DIAG_02, r0.val); + + if (id == 0) { + hqm_smon_setup_sys_iosf_measurements(hw, clks); + + hqm_smon_setup_chp_ing_egr_measurements(hw, clks); + } else if (id == 1) { + hqm_smon_setup_chp_pp_count_measurement(hw, clks); + } else if (id >= 2 && id <= 4) { + hqm_smon_setup_chp_avg_measurement(hw, + chp_component[id - 2], + clks); + } else if (id >= 5) { + hqm_smon_setup_chp_hcw_measurements(hw, + hcw_types[id - 5][0], + hcw_types[id - 5][1], + clks); + } + + hqm_flush_csr(hw); +} + +void hqm_collect_perf_metric_data(struct hqm_hw *hw, + u32 id, + union hqm_perf_metric_group_data *data) +{ + union hqm_chp_ctrl_diag_02 r0; + + if (id == 0) + hqm_collect_pm_grp_0(hw, data); + else if (id == 1) + hqm_collect_pm_grp_1(hw, data); + else if (id >= 2 && id <= 4) + hqm_collect_chp_hcw_avg(hw, data); + else if (id >= 5) + hqm_collect_chp_hcw_count(hw, data); + + /* CHP SMON is no longer in use, so reset the control word */ + r0.field.control = 0; + + HQM_CSR_WR(hw, HQM_CHP_CTRL_DIAG_02, r0.val); + + hqm_flush_csr(hw); +} + +void hqm_send_async_pf_to_vf_msg(struct hqm_hw *hw, unsigned int vf_id) +{ + union hqm_func_pf_pf2vf_mailbox_isr r0 = { {0} }; + + r0.field.isr = 1 << vf_id; + + HQM_FUNC_WR(hw, HQM_FUNC_PF_PF2VF_MAILBOX_ISR(0), r0.val); +} + +bool hqm_pf_to_vf_complete(struct hqm_hw *hw, unsigned int vf_id) +{ + union hqm_func_pf_pf2vf_mailbox_isr r0; + + r0.val = HQM_FUNC_RD(hw, HQM_FUNC_PF_PF2VF_MAILBOX_ISR(vf_id)); + + return (r0.val & (1 << vf_id)) == 0; +} + +void hqm_send_async_vf_to_pf_msg(struct hqm_hw *hw) +{ + union hqm_func_vf_vf2pf_mailbox_isr r0 = { {0} }; + + r0.field.isr = 1; + HQM_FUNC_WR(hw, HQM_FUNC_VF_VF2PF_MAILBOX_ISR, r0.val); +} + +bool hqm_vf_to_pf_complete(struct hqm_hw *hw) +{ + union hqm_func_vf_vf2pf_mailbox_isr r0; + + r0.val = HQM_FUNC_RD(hw, HQM_FUNC_VF_VF2PF_MAILBOX_ISR); + + return (r0.field.isr == 0); +} + +bool hqm_vf_flr_complete(struct hqm_hw *hw) +{ + union hqm_func_vf_vf_reset_in_progress r0; + + r0.val = HQM_FUNC_RD(hw, HQM_FUNC_VF_VF_RESET_IN_PROGRESS); + + return (r0.field.reset_in_progress == 0); +} + +int hqm_pf_read_vf_mbox_req(struct hqm_hw *hw, + unsigned int vf_id, + void *data, + int len) +{ + u32 buf[HQM_VF2PF_REQ_BYTES / 4]; + int num_words; + int i; + + if (len > HQM_VF2PF_REQ_BYTES) { + HQM_BASE_ERR(hw, "[%s()] len (%d) > VF->PF mailbox req size\n", + __func__, len); + return -EINVAL; + } + + if (len == 0) { + HQM_BASE_ERR(hw, "[%s()] invalid len (0)\n", __func__); + return -EINVAL; + } + + /* Round up len to the nearest 4B boundary, since the mailbox registers + * are 32b wide. + */ + num_words = len / 4; + if (len % 4 != 0) + num_words++; + + for (i = 0; i < num_words; i++) { + u32 idx = i + HQM_VF2PF_REQ_BASE_WORD; + + buf[i] = HQM_FUNC_RD(hw, HQM_FUNC_PF_VF2PF_MAILBOX(vf_id, idx)); + } + + memcpy(data, buf, len); + + return 0; +} + +int hqm_pf_read_vf_mbox_resp(struct hqm_hw *hw, + unsigned int vf_id, + void *data, + int len) +{ + u32 buf[HQM_VF2PF_RESP_BYTES / 4]; + int num_words; + int i; + + if (len > HQM_VF2PF_RESP_BYTES) { + HQM_BASE_ERR(hw, "[%s()] len (%d) > VF->PF mailbox resp size\n", + __func__, len); + return -EINVAL; + } + + /* Round up len to the nearest 4B boundary, since the mailbox registers + * are 32b wide. + */ + num_words = len / 4; + if (len % 4 != 0) + num_words++; + + for (i = 0; i < num_words; i++) { + u32 idx = i + HQM_VF2PF_RESP_BASE_WORD; + + buf[i] = HQM_FUNC_RD(hw, HQM_FUNC_PF_VF2PF_MAILBOX(vf_id, idx)); + } + + memcpy(data, buf, len); + + return 0; +} + +int hqm_pf_write_vf_mbox_resp(struct hqm_hw *hw, + unsigned int vf_id, + void *data, + int len) +{ + u32 buf[HQM_PF2VF_RESP_BYTES / 4]; + int num_words; + int i; + + if (len > HQM_PF2VF_RESP_BYTES) { + HQM_BASE_ERR(hw, "[%s()] len (%d) > PF->VF mailbox resp size\n", + __func__, len); + return -EINVAL; + } + + memcpy(buf, data, len); + + /* Round up len to the nearest 4B boundary, since the mailbox registers + * are 32b wide. + */ + num_words = len / 4; + if (len % 4 != 0) + num_words++; + + for (i = 0; i < num_words; i++) { + u32 idx = i + HQM_PF2VF_RESP_BASE_WORD; + + HQM_FUNC_WR(hw, HQM_FUNC_PF_PF2VF_MAILBOX(vf_id, idx), buf[i]); + } + + return 0; +} + +int hqm_pf_write_vf_mbox_req(struct hqm_hw *hw, + unsigned int vf_id, + void *data, + int len) +{ + u32 buf[HQM_PF2VF_REQ_BYTES / 4]; + int num_words; + int i; + + if (len > HQM_PF2VF_REQ_BYTES) { + HQM_BASE_ERR(hw, "[%s()] len (%d) > PF->VF mailbox req size\n", + __func__, len); + return -EINVAL; + } + + memcpy(buf, data, len); + + /* Round up len to the nearest 4B boundary, since the mailbox registers + * are 32b wide. + */ + num_words = len / 4; + if (len % 4 != 0) + num_words++; + + for (i = 0; i < num_words; i++) { + u32 idx = i + HQM_PF2VF_REQ_BASE_WORD; + + HQM_FUNC_WR(hw, HQM_FUNC_PF_PF2VF_MAILBOX(vf_id, idx), buf[i]); + } + + return 0; +} + +int hqm_vf_read_pf_mbox_resp(struct hqm_hw *hw, void *data, int len) +{ + u32 buf[HQM_PF2VF_RESP_BYTES / 4]; + int num_words; + int i; + + if (len > HQM_PF2VF_RESP_BYTES) { + HQM_BASE_ERR(hw, "[%s()] len (%d) > PF->VF mailbox resp size\n", + __func__, len); + return -EINVAL; + } + + if (len == 0) { + HQM_BASE_ERR(hw, "[%s()] invalid len (0)\n", __func__); + return -EINVAL; + } + + /* Round up len to the nearest 4B boundary, since the mailbox registers + * are 32b wide. + */ + num_words = len / 4; + if (len % 4 != 0) + num_words++; + + for (i = 0; i < num_words; i++) { + u32 idx = i + HQM_PF2VF_RESP_BASE_WORD; + + buf[i] = HQM_FUNC_RD(hw, HQM_FUNC_VF_PF2VF_MAILBOX(idx)); + } + + memcpy(data, buf, len); + + return 0; +} + +int hqm_vf_read_pf_mbox_req(struct hqm_hw *hw, void *data, int len) +{ + u32 buf[HQM_PF2VF_REQ_BYTES / 4]; + int num_words; + int i; + + if (len > HQM_PF2VF_REQ_BYTES) { + HQM_BASE_ERR(hw, "[%s()] len (%d) > PF->VF mailbox req size\n", + __func__, len); + return -EINVAL; + } + + /* Round up len to the nearest 4B boundary, since the mailbox registers + * are 32b wide. + */ + num_words = len / 4; + if ((len % 4) != 0) + num_words++; + + for (i = 0; i < num_words; i++) { + u32 idx = i + HQM_PF2VF_REQ_BASE_WORD; + + buf[i] = HQM_FUNC_RD(hw, HQM_FUNC_VF_PF2VF_MAILBOX(idx)); + } + + memcpy(data, buf, len); + + return 0; +} + +int hqm_vf_write_pf_mbox_req(struct hqm_hw *hw, void *data, int len) +{ + u32 buf[HQM_VF2PF_REQ_BYTES / 4]; + int num_words; + int i; + + if (len > HQM_VF2PF_REQ_BYTES) { + HQM_BASE_ERR(hw, "[%s()] len (%d) > VF->PF mailbox req size\n", + __func__, len); + return -EINVAL; + } + + memcpy(buf, data, len); + + /* Round up len to the nearest 4B boundary, since the mailbox registers + * are 32b wide. + */ + num_words = len / 4; + if (len % 4 != 0) + num_words++; + + for (i = 0; i < num_words; i++) { + u32 idx = i + HQM_VF2PF_REQ_BASE_WORD; + + HQM_FUNC_WR(hw, HQM_FUNC_VF_VF2PF_MAILBOX(idx), buf[i]); + } + + return 0; +} + +int hqm_vf_write_pf_mbox_resp(struct hqm_hw *hw, void *data, int len) +{ + u32 buf[HQM_VF2PF_RESP_BYTES / 4]; + int num_words; + int i; + + if (len > HQM_VF2PF_RESP_BYTES) { + HQM_BASE_ERR(hw, "[%s()] len (%d) > VF->PF mailbox resp size\n", + __func__, len); + return -EINVAL; + } + + memcpy(buf, data, len); + + /* Round up len to the nearest 4B boundary, since the mailbox registers + * are 32b wide. + */ + num_words = len / 4; + if (len % 4 != 0) + num_words++; + + for (i = 0; i < num_words; i++) { + u32 idx = i + HQM_VF2PF_RESP_BASE_WORD; + + HQM_FUNC_WR(hw, HQM_FUNC_VF_VF2PF_MAILBOX(idx), buf[i]); + } + + return 0; +} + +bool hqm_vf_is_locked(struct hqm_hw *hw, unsigned int vf_id) +{ + return hw->vf[vf_id].locked; +} + +static void hqm_vf_set_rsrc_virt_ids(struct hqm_function_resources *rsrcs, + unsigned int vf_id) +{ + struct hqm_list_entry *iter __attribute__((unused)); + struct hqm_dir_pq_pair *dir_port; + struct hqm_ldb_queue *ldb_queue; + struct hqm_ldb_port *ldb_port; + struct hqm_credit_pool *pool; + struct hqm_domain *domain; + int i; + + i = 0; + HQM_FUNC_LIST_FOR(rsrcs->avail_domains, domain, iter) { + domain->id.virt_id = i; + domain->id.vf_owned = true; + domain->id.vf_id = vf_id; + i++; + } + + i = 0; + HQM_FUNC_LIST_FOR(rsrcs->avail_ldb_queues, ldb_queue, iter) { + ldb_queue->id.virt_id = i; + ldb_queue->id.vf_owned = true; + ldb_queue->id.vf_id = vf_id; + i++; + } + + i = 0; + HQM_FUNC_LIST_FOR(rsrcs->avail_ldb_ports, ldb_port, iter) { + ldb_port->id.virt_id = i; + ldb_port->id.vf_owned = true; + ldb_port->id.vf_id = vf_id; + i++; + } + + i = 0; + HQM_FUNC_LIST_FOR(rsrcs->avail_dir_pq_pairs, dir_port, iter) { + dir_port->id.virt_id = i; + dir_port->id.vf_owned = true; + dir_port->id.vf_id = vf_id; + i++; + } + + i = 0; + HQM_FUNC_LIST_FOR(rsrcs->avail_ldb_credit_pools, pool, iter) { + pool->id.virt_id = i; + pool->id.vf_owned = true; + pool->id.vf_id = vf_id; + i++; + } + + i = 0; + HQM_FUNC_LIST_FOR(rsrcs->avail_dir_credit_pools, pool, iter) { + pool->id.virt_id = i; + pool->id.vf_owned = true; + pool->id.vf_id = vf_id; + i++; + } +} + +void hqm_lock_vf(struct hqm_hw *hw, unsigned int vf_id) +{ + struct hqm_function_resources *rsrcs = &hw->vf[vf_id]; + + rsrcs->locked = true; + + hqm_vf_set_rsrc_virt_ids(rsrcs, vf_id); +} + +void hqm_unlock_vf(struct hqm_hw *hw, unsigned int vf_id) +{ + hw->vf[vf_id].locked = false; +} + +int hqm_reset_vf_resources(struct hqm_hw *hw, unsigned int vf_id) +{ + if (vf_id >= HQM_MAX_NUM_VFS) + return -EINVAL; + + /* If the VF is locked, its resource assignment can't be changed */ + if (hqm_vf_is_locked(hw, vf_id)) + return -EPERM; + + hqm_update_vf_sched_domains(hw, vf_id, 0); + hqm_update_vf_ldb_queues(hw, vf_id, 0); + hqm_update_vf_ldb_ports(hw, vf_id, 0); + hqm_update_vf_dir_ports(hw, vf_id, 0); + hqm_update_vf_ldb_credit_pools(hw, vf_id, 0); + hqm_update_vf_dir_credit_pools(hw, vf_id, 0); + hqm_update_vf_ldb_credits(hw, vf_id, 0); + hqm_update_vf_dir_credits(hw, vf_id, 0); + hqm_update_vf_hist_list_entries(hw, vf_id, 0); + hqm_update_vf_atomic_inflights(hw, vf_id, 0); + + return 0; +} + +void hqm_hw_enable_sparse_ldb_cq_mode(struct hqm_hw *hw) +{ + union hqm_sys_cq_mode r0; + + r0.val = HQM_CSR_RD(hw, HQM_SYS_CQ_MODE); + + r0.field.ldb_cq64 = 1; + + HQM_CSR_WR(hw, HQM_SYS_CQ_MODE, r0.val); +} + +void hqm_hw_enable_sparse_dir_cq_mode(struct hqm_hw *hw) +{ + union hqm_sys_cq_mode r0; + + r0.val = HQM_CSR_RD(hw, HQM_SYS_CQ_MODE); + + r0.field.dir_cq64 = 1; + + HQM_CSR_WR(hw, HQM_SYS_CQ_MODE, r0.val); +} + +void hqm_hw_disable_sparse_ldb_cq_mode(struct hqm_hw *hw) +{ + union hqm_sys_cq_mode r0; + + r0.val = HQM_CSR_RD(hw, HQM_SYS_CQ_MODE); + + r0.field.ldb_cq64 = 0; + + HQM_CSR_WR(hw, HQM_SYS_CQ_MODE, r0.val); +} + +void hqm_hw_disable_sparse_dir_cq_mode(struct hqm_hw *hw) +{ + union hqm_sys_cq_mode r0; + + r0.val = HQM_CSR_RD(hw, HQM_SYS_CQ_MODE); + + r0.field.dir_cq64 = 0; + + HQM_CSR_WR(hw, HQM_SYS_CQ_MODE, r0.val); +} diff --git a/drivers/misc/hqm/hqm_resource.h b/drivers/misc/hqm/hqm_resource.h new file mode 100644 index 00000000000000..3e36606300d89d --- /dev/null +++ b/drivers/misc/hqm/hqm_resource.h @@ -0,0 +1,1654 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * Copyright(c) 2016-2019 Intel Corporation + */ + +#ifndef __HQM_BASE_HQM_API_H +#define __HQM_BASE_HQM_API_H + +#include "hqm_hw_types.h" +#include "hqm_osdep_types.h" +#include + +/** + * hqm_resource_init() - initialize the device + * @hw: pointer to struct hqm_hw. + * + * This function initializes the device's software state (pointed to by the hw + * argument) and programs global scheduling QoS registers. This function should + * be called during driver initialization. + * + * The hqm_hw struct must be unique per HQM device and persist until the device + * is reset. + * + * Return: + * Returns 0 upon success, -1 otherwise. + */ +int hqm_resource_init(struct hqm_hw *hw); + +/** + * hqm_resource_free() - free device state memory + * @hw: hqm_hw handle for a particular device. + * + * This function frees software state pointed to by hqm_hw. This function + * should be called when resetting the device or unloading the driver. + */ +void hqm_resource_free(struct hqm_hw *hw); + +/** + * hqm_resource_reset() - reset in-use resources to their initial state + * @hw: hqm_hw handle for a particular device. + * + * This function resets in-use resources, and makes them available for use. + * All resources go back to their owning function, whether a PF or a VF. + */ +void hqm_resource_reset(struct hqm_hw *hw); + +/** + * hqm_hw_create_sched_domain() - create a scheduling domain + * @hw: hqm_hw handle for a particular device. + * @args: scheduling domain creation arguments. + * @resp: response structure. + * @vf_request: indicates whether this request came from a VF. + * @vf_id: If vf_request is true, this contains the VF's ID. + * + * This function creates a scheduling domain containing the resources specified + * in args. The individual resources (queues, ports, credit pools) can be + * configured after creating a scheduling domain. + * + * Return: + * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is + * assigned a detailed error code from enum hqm_error. If successful, resp->id + * contains the domain ID. + * + * Note: resp->id contains a virtual ID if vf_request is true. + * + * Errors: + * EINVAL - A requested resource is unavailable, or the requested domain name + * is already in use. + * EFAULT - Internal error (resp->status not set). + */ +int hqm_hw_create_sched_domain(struct hqm_hw *hw, + struct hqm_create_sched_domain_args *args, + struct hqm_cmd_response *resp, + bool vf_request, + unsigned int vf_id); + +/** + * hqm_hw_create_ldb_pool() - create a load-balanced credit pool + * @hw: hqm_hw handle for a particular device. + * @domain_id: domain ID. + * @args: credit pool creation arguments. + * @resp: response structure. + * @vf_request: indicates whether this request came from a VF. + * @vf_id: If vf_request is true, this contains the VF's ID. + * + * This function creates a load-balanced credit pool containing the number of + * requested credits. + * + * Return: + * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is + * assigned a detailed error code from enum hqm_error. If successful, resp->id + * contains the pool ID. + * + * Note: resp->id contains a virtual ID if vf_request is true. + * + * Errors: + * EINVAL - A requested resource is unavailable, the domain is not configured, + * or the domain has already been started. + * EFAULT - Internal error (resp->status not set). + */ +int hqm_hw_create_ldb_pool(struct hqm_hw *hw, + u32 domain_id, + struct hqm_create_ldb_pool_args *args, + struct hqm_cmd_response *resp, + bool vf_request, + unsigned int vf_id); + +/** + * hqm_hw_create_dir_pool() - create a directed credit pool + * @hw: hqm_hw handle for a particular device. + * @domain_id: domain ID. + * @args: credit pool creation arguments. + * @resp: response structure. + * @vf_request: indicates whether this request came from a VF. + * @vf_id: If vf_request is true, this contains the VF's ID. + * + * This function creates a directed credit pool containing the number of + * requested credits. + * + * Return: + * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is + * assigned a detailed error code from enum hqm_error. If successful, resp->id + * contains the pool ID. + * + * Note: resp->id contains a virtual ID if vf_request is true. + * + * Errors: + * EINVAL - A requested resource is unavailable, the domain is not configured, + * or the domain has already been started. + * EFAULT - Internal error (resp->status not set). + */ +int hqm_hw_create_dir_pool(struct hqm_hw *hw, + u32 domain_id, + struct hqm_create_dir_pool_args *args, + struct hqm_cmd_response *resp, + bool vf_request, + unsigned int vf_id); + +/** + * hqm_hw_create_ldb_queue() - create a load-balanced queue + * @hw: hqm_hw handle for a particular device. + * @domain_id: domain ID. + * @args: queue creation arguments. + * @resp: response structure. + * @vf_request: indicates whether this request came from a VF. + * @vf_id: If vf_request is true, this contains the VF's ID. + * + * This function creates a load-balanced queue. + * + * Return: + * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is + * assigned a detailed error code from enum hqm_error. If successful, resp->id + * contains the queue ID. + * + * Note: resp->id contains a virtual ID if vf_request is true. + * + * Errors: + * EINVAL - A requested resource is unavailable, the domain is not configured, + * the domain has already been started, or the requested queue name is + * already in use. + * EFAULT - Internal error (resp->status not set). + */ +int hqm_hw_create_ldb_queue(struct hqm_hw *hw, + u32 domain_id, + struct hqm_create_ldb_queue_args *args, + struct hqm_cmd_response *resp, + bool vf_request, + unsigned int vf_id); + +/** + * hqm_hw_create_dir_queue() - create a directed queue + * @hw: hqm_hw handle for a particular device. + * @domain_id: domain ID. + * @args: queue creation arguments. + * @resp: response structure. + * @vf_request: indicates whether this request came from a VF. + * @vf_id: If vf_request is true, this contains the VF's ID. + * + * This function creates a directed queue. + * + * Return: + * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is + * assigned a detailed error code from enum hqm_error. If successful, resp->id + * contains the queue ID. + * + * Note: resp->id contains a virtual ID if vf_request is true. + * + * Errors: + * EINVAL - A requested resource is unavailable, the domain is not configured, + * or the domain has already been started. + * EFAULT - Internal error (resp->status not set). + */ +int hqm_hw_create_dir_queue(struct hqm_hw *hw, + u32 domain_id, + struct hqm_create_dir_queue_args *args, + struct hqm_cmd_response *resp, + bool vf_request, + unsigned int vf_id); + +/** + * hqm_hw_create_dir_port() - create a directed port + * @hw: hqm_hw handle for a particular device. + * @domain_id: domain ID. + * @args: port creation arguments. + * @pop_count_dma_base: base address of the pop count memory. This can be + * a PA or an IOVA. + * @cq_dma_base: base address of the CQ memory. This can be a PA or an IOVA. + * @resp: response structure. + * @vf_request: indicates whether this request came from a VF. + * @vf_id: If vf_request is true, this contains the VF's ID. + * + * This function creates a directed port. + * + * Return: + * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is + * assigned a detailed error code from enum hqm_error. If successful, resp->id + * contains the port ID. + * + * Note: resp->id contains a virtual ID if vf_request is true. + * + * Errors: + * EINVAL - A requested resource is unavailable, a credit setting is invalid, a + * pool ID is invalid, a pointer address is not properly aligned, the + * domain is not configured, or the domain has already been started. + * EFAULT - Internal error (resp->status not set). + */ +int hqm_hw_create_dir_port(struct hqm_hw *hw, + u32 domain_id, + struct hqm_create_dir_port_args *args, + uintptr_t pop_count_dma_base, + uintptr_t cq_dma_base, + struct hqm_cmd_response *resp, + bool vf_request, + unsigned int vf_id); + +/** + * hqm_hw_create_ldb_port() - create a load-balanced port + * @hw: hqm_hw handle for a particular device. + * @domain_id: domain ID. + * @args: port creation arguments. + * @pop_count_dma_base: base address of the pop count memory. This can be + * a PA or an IOVA. + * @cq_dma_base: base address of the CQ memory. This can be a PA or an IOVA. + * @resp: response structure. + * @vf_request: indicates whether this request came from a VF. + * @vf_id: If vf_request is true, this contains the VF's ID. + * + * This function creates a load-balanced port. + * + * Return: + * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is + * assigned a detailed error code from enum hqm_error. If successful, resp->id + * contains the port ID. + * + * Note: resp->id contains a virtual ID if vf_request is true. + * + * Errors: + * EINVAL - A requested resource is unavailable, a credit setting is invalid, a + * pool ID is invalid, a pointer address is not properly aligned, the + * domain is not configured, or the domain has already been started. + * EFAULT - Internal error (resp->status not set). + */ +int hqm_hw_create_ldb_port(struct hqm_hw *hw, + u32 domain_id, + struct hqm_create_ldb_port_args *args, + uintptr_t pop_count_dma_base, + uintptr_t cq_dma_base, + struct hqm_cmd_response *resp, + bool vf_request, + unsigned int vf_id); + +/** + * hqm_hw_start_domain() - start a scheduling domain + * @hw: hqm_hw handle for a particular device. + * @domain_id: domain ID. + * @args: start domain arguments. + * @resp: response structure. + * @vf_request: indicates whether this request came from a VF. + * @vf_id: If vf_request is true, this contains the VF's ID. + * + * This function starts a scheduling domain, which allows applications to send + * traffic through it. Once a domain is started, its resources can no longer be + * configured (besides QID remapping and port enable/disable). + * + * Return: + * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is + * assigned a detailed error code from enum hqm_error. + * + * Errors: + * EINVAL - the domain is not configured, or the domain is already started. + */ +int hqm_hw_start_domain(struct hqm_hw *hw, + u32 domain_id, + struct hqm_start_domain_args *args, + struct hqm_cmd_response *resp, + bool vf_request, + unsigned int vf_id); + +/** + * hqm_hw_map_qid() - map a load-balanced queue to a load-balanced port + * @hw: hqm_hw handle for a particular device. + * @domain_id: domain ID. + * @args: map QID arguments. + * @resp: response structure. + * @vf_request: indicates whether this request came from a VF. + * @vf_id: If vf_request is true, this contains the VF's ID. + * + * This function configures the HQM to schedule QEs from the specified queue to + * the specified port. Each load-balanced port can be mapped to up to 8 queues; + * each load-balanced queue can potentially map to all the load-balanced ports. + * + * A successful return does not necessarily mean the mapping was configured. If + * this function is unable to immediately map the queue to the port, it will + * add the requested operation to a per-port list of pending map/unmap + * operations, and (if it's not already running) launch a kernel thread that + * periodically attempts to process all pending operations. In a sense, this is + * an asynchronous function. + * + * This asynchronicity creates two views of the state of hardware: the actual + * hardware state and the requested state (as if every request completed + * immediately). If there are any pending map/unmap operations, the requested + * state will differ from the actual state. All validation is performed with + * respect to the pending state; for instance, if there are 8 pending map + * operations for port X, a request for a 9th will fail because a load-balanced + * port can only map up to 8 queues. + * + * Return: + * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is + * assigned a detailed error code from enum hqm_error. + * + * Errors: + * EINVAL - A requested resource is unavailable, invalid port or queue ID, or + * the domain is not configured. + * EFAULT - Internal error (resp->status not set). + */ +int hqm_hw_map_qid(struct hqm_hw *hw, + u32 domain_id, + struct hqm_map_qid_args *args, + struct hqm_cmd_response *resp, + bool vf_request, + unsigned int vf_id); + +/** + * hqm_hw_unmap_qid() - Unmap a load-balanced queue from a load-balanced port + * @hw: hqm_hw handle for a particular device. + * @domain_id: domain ID. + * @args: unmap QID arguments. + * @resp: response structure. + * @vf_request: indicates whether this request came from a VF. + * @vf_id: If vf_request is true, this contains the VF's ID. + * + * This function configures the HQM to stop scheduling QEs from the specified + * queue to the specified port. + * + * A successful return does not necessarily mean the mapping was removed. If + * this function is unable to immediately unmap the queue from the port, it + * will add the requested operation to a per-port list of pending map/unmap + * operations, and (if it's not already running) launch a kernel thread that + * periodically attempts to process all pending operations. See + * hqm_hw_map_qid() for more details. + * + * Return: + * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is + * assigned a detailed error code from enum hqm_error. + * + * Errors: + * EINVAL - A requested resource is unavailable, invalid port or queue ID, or + * the domain is not configured. + * EFAULT - Internal error (resp->status not set). + */ +int hqm_hw_unmap_qid(struct hqm_hw *hw, + u32 domain_id, + struct hqm_unmap_qid_args *args, + struct hqm_cmd_response *resp, + bool vf_request, + unsigned int vf_id); + +/** + * hqm_finish_unmap_qid_procedures() - finish any pending unmap procedures + * @hw: hqm_hw handle for a particular device. + * + * This function attempts to finish any outstanding unmap procedures. + * This function should be called by the kernel thread responsible for + * finishing map/unmap procedures. + * + * Return: + * Returns the number of procedures that weren't completed. + */ +unsigned int hqm_finish_unmap_qid_procedures(struct hqm_hw *hw); + +/** + * hqm_finish_map_qid_procedures() - finish any pending map procedures + * @hw: hqm_hw handle for a particular device. + * + * This function attempts to finish any outstanding map procedures. + * This function should be called by the kernel thread responsible for + * finishing map/unmap procedures. + * + * Return: + * Returns the number of procedures that weren't completed. + */ +unsigned int hqm_finish_map_qid_procedures(struct hqm_hw *hw); + +/** + * hqm_hw_enable_ldb_port() - enable a load-balanced port for scheduling + * @hw: hqm_hw handle for a particular device. + * @domain_id: domain ID. + * @args: port enable arguments. + * @resp: response structure. + * @vf_request: indicates whether this request came from a VF. + * @vf_id: If vf_request is true, this contains the VF's ID. + * + * This function configures the HQM to schedule QEs to a load-balanced port. + * Ports are enabled by default. + * + * Return: + * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is + * assigned a detailed error code from enum hqm_error. + * + * Errors: + * EINVAL - The port ID is invalid or the domain is not configured. + * EFAULT - Internal error (resp->status not set). + */ +int hqm_hw_enable_ldb_port(struct hqm_hw *hw, + u32 domain_id, + struct hqm_enable_ldb_port_args *args, + struct hqm_cmd_response *resp, + bool vf_request, + unsigned int vf_id); + +/** + * hqm_hw_disable_ldb_port() - disable a load-balanced port for scheduling + * @hw: hqm_hw handle for a particular device. + * @domain_id: domain ID. + * @args: port disable arguments. + * @resp: response structure. + * @vf_request: indicates whether this request came from a VF. + * @vf_id: If vf_request is true, this contains the VF's ID. + * + * This function configures the HQM to stop scheduling QEs to a load-balanced + * port. Ports are enabled by default. + * + * Return: + * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is + * assigned a detailed error code from enum hqm_error. + * + * Errors: + * EINVAL - The port ID is invalid or the domain is not configured. + * EFAULT - Internal error (resp->status not set). + */ +int hqm_hw_disable_ldb_port(struct hqm_hw *hw, + u32 domain_id, + struct hqm_disable_ldb_port_args *args, + struct hqm_cmd_response *resp, + bool vf_request, + unsigned int vf_id); + +/** + * hqm_hw_enable_dir_port() - enable a directed port for scheduling + * @hw: hqm_hw handle for a particular device. + * @domain_id: domain ID. + * @args: port enable arguments. + * @resp: response structure. + * @vf_request: indicates whether this request came from a VF. + * @vf_id: If vf_request is true, this contains the VF's ID. + * + * This function configures the HQM to schedule QEs to a directed port. + * Ports are enabled by default. + * + * Return: + * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is + * assigned a detailed error code from enum hqm_error. + * + * Errors: + * EINVAL - The port ID is invalid or the domain is not configured. + * EFAULT - Internal error (resp->status not set). + */ +int hqm_hw_enable_dir_port(struct hqm_hw *hw, + u32 domain_id, + struct hqm_enable_dir_port_args *args, + struct hqm_cmd_response *resp, + bool vf_request, + unsigned int vf_id); + +/** + * hqm_hw_disable_dir_port() - disable a directed port for scheduling + * @hw: hqm_hw handle for a particular device. + * @domain_id: domain ID. + * @args: port disable arguments. + * @resp: response structure. + * @vf_request: indicates whether this request came from a VF. + * @vf_id: If vf_request is true, this contains the VF's ID. + * + * This function configures the HQM to stop scheduling QEs to a directed port. + * Ports are enabled by default. + * + * Return: + * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is + * assigned a detailed error code from enum hqm_error. + * + * Errors: + * EINVAL - The port ID is invalid or the domain is not configured. + * EFAULT - Internal error (resp->status not set). + */ +int hqm_hw_disable_dir_port(struct hqm_hw *hw, + u32 domain_id, + struct hqm_disable_dir_port_args *args, + struct hqm_cmd_response *resp, + bool vf_request, + unsigned int vf_id); + +/** + * hqm_configure_ldb_cq_interrupt() - configure load-balanced CQ for interrupts + * @hw: hqm_hw handle for a particular device. + * @port_id: load-balancd port ID. + * @vector: interrupt vector ID. Should be 0 for MSI or compressed MSI-X mode, + * else a value up to 64. + * @mode: interrupt type (HQM_CQ_ISR_MODE_MSI or HQM_CQ_ISR_MODE_MSIX) + * @vf: If the port is VF-owned, the VF's ID. This is used for translating the + * virtual port ID to a physical port ID. Ignored if mode is not MSI. + * @owner_vf: the VF to route the interrupt to. Ignore if mode is not MSI. + * @threshold: the minimum CQ depth at which the interrupt can fire. Must be + * greater than 0. + * + * This function configures the HQM registers for load-balanced CQ's interrupts. + * This doesn't enable the CQ's interrupt; that can be done with + * hqm_arm_cq_interrupt() or through an interrupt arm QE. + * + * Return: + * Returns 0 upon success, < 0 otherwise. + * + * Errors: + * EINVAL - The port ID is invalid. + */ +int hqm_configure_ldb_cq_interrupt(struct hqm_hw *hw, + int port_id, + int vector, + int mode, + unsigned int vf, + unsigned int owner_vf, + u16 threshold); + +/** + * hqm_configure_dir_cq_interrupt() - configure directed CQ for interrupts + * @hw: hqm_hw handle for a particular device. + * @port_id: load-balancd port ID. + * @vector: interrupt vector ID. Should be 0 for MSI or compressed MSI-X mode, + * else a value up to 64. + * @mode: interrupt type (HQM_CQ_ISR_MODE_MSI or HQM_CQ_ISR_MODE_MSIX) + * @vf: If the port is VF-owned, the VF's ID. This is used for translating the + * virtual port ID to a physical port ID. Ignored if mode is not MSI. + * @owner_vf: the VF to route the interrupt to. Ignore if mode is not MSI. + * @threshold: the minimum CQ depth at which the interrupt can fire. Must be + * greater than 0. + * + * This function configures the HQM registers for directed CQ's interrupts. + * This doesn't enable the CQ's interrupt; that can be done with + * hqm_arm_cq_interrupt() or through an interrupt arm QE. + * + * Return: + * Returns 0 upon success, < 0 otherwise. + * + * Errors: + * EINVAL - The port ID is invalid. + */ +int hqm_configure_dir_cq_interrupt(struct hqm_hw *hw, + int port_id, + int vector, + int mode, + unsigned int vf, + unsigned int owner_vf, + u16 threshold); + +/** + * hqm_enable_alarm_interrupts() - enable certain hardware alarm interrupts + * @hw: hqm_hw handle for a particular device. + * + * This function configures the ingress error alarm. (Other alarms are enabled + * by default.) + */ +void hqm_enable_alarm_interrupts(struct hqm_hw *hw); + +/** + * hqm_set_msix_mode() - enable certain hardware alarm interrupts + * @hw: hqm_hw handle for a particular device. + * @mode: MSI-X mode (HQM_MSIX_MODE_PACKED or HQM_MSIX_MODE_COMPRESSED) + * + * This function configures the hardware to use either packed or compressed + * mode. This function should not be called if using MSI interrupts. + */ +void hqm_set_msix_mode(struct hqm_hw *hw, int mode); + +/** + * hqm_cq_depth() - query a CQ's depth + * @hw: hqm_hw handle for a particular device. + * @port_id: port ID + * @is_ldb: true for load-balanced port, false for a directed port + * @vf_request: indicates whether this request came from a VF. + * @vf_id: If vf_request is true, this contains the VF's ID. + * + * This function returns the CQ's current depth. It can be used in CQ interrupt + * code to check if the caller needs to block (depth == 0), or as a thread + * wake-up condition (depth != 0). + * + * The function does no parameter validation; that is the caller's + * responsibility. + * + * Return: returns the CQ's depth (>= 0), or an error otherwise. + * + * EINVAL - Invalid port ID. + */ +int hqm_cq_depth(struct hqm_hw *hw, + int port_id, + bool is_ldb, + bool vf_request, + unsigned int vf_id); + +/** + * hqm_arm_cq_interrupt() - arm a CQ's interrupt + * @hw: hqm_hw handle for a particular device. + * @port_id: port ID + * @is_ldb: true for load-balanced port, false for a directed port + * @vf_request: indicates whether this request came from a VF. + * @vf_id: If vf_request is true, this contains the VF's ID. + * + * This function arms the CQ's interrupt. The CQ must be configured prior to + * calling this function. + * + * The function does no parameter validation; that is the caller's + * responsibility. + * + * Return: returns 0 upon success, <0 otherwise. + * + * EINVAL - Invalid port ID. + */ +int hqm_arm_cq_interrupt(struct hqm_hw *hw, + int port_id, + bool is_ldb, + bool vf_request, + unsigned int vf_id); + +/** + * hqm_read_compressed_cq_intr_status() - read compressed CQ interrupt status + * @hw: hqm_hw handle for a particular device. + * @ldb_interrupts: 2-entry array of u32 bitmaps + * @dir_interrupts: 4-entry array of u32 bitmaps + * + * This function can be called from a compressed CQ interrupt handler to + * determine which CQ interrupts have fired. The caller should take appropriate + * (such as waking threads blocked on a CQ's interrupt) then ack the interrupts + * with hqm_ack_compressed_cq_intr(). + */ +void hqm_read_compressed_cq_intr_status(struct hqm_hw *hw, + u32 *ldb_interrupts, + u32 *dir_interrupts); + +/** + * hqm_ack_compressed_cq_intr_status() - ack compressed CQ interrupts + * @hw: hqm_hw handle for a particular device. + * @ldb_interrupts: 2-entry array of u32 bitmaps + * @dir_interrupts: 4-entry array of u32 bitmaps + * + * This function ACKs compressed CQ interrupts. Its arguments should be the + * same ones passed to hqm_read_compressed_cq_intr_status(). + */ +void hqm_ack_compressed_cq_intr(struct hqm_hw *hw, + u32 *ldb_interrupts, + u32 *dir_interrupts); + +/** + * hqm_read_vf_intr_status() - read the VF interrupt status register + * @hw: hqm_hw handle for a particular device. + * + * This function can be called from a VF's interrupt handler to determine + * which interrupts have fired. The first 31 bits correspond to CQ interrupt + * vectors, and the final bit is for the PF->VF mailbox interrupt vector. + * + * Return: + * Returns a bit vector indicating which interrupt vectors are active. + */ +u32 hqm_read_vf_intr_status(struct hqm_hw *hw); + +/** + * hqm_ack_vf_intr_status() - ack VF interrupts + * @hw: hqm_hw handle for a particular device. + * @interrupts: 32-bit bitmap + * + * This function ACKs a VF's interrupts. Its interrupts argument should be the + * value returned by hqm_read_vf_intr_status(). + */ +void hqm_ack_vf_intr_status(struct hqm_hw *hw, u32 interrupts); + +/** + * hqm_ack_vf_msi_intr() - ack VF MSI interrupt + * @hw: hqm_hw handle for a particular device. + * @interrupts: 32-bit bitmap + * + * This function clears the VF's MSI interrupt pending register. Its interrupts + * argument should be contain the MSI vectors to ACK. For example, if MSI MME + * is in mode 0, then one bit 0 should ever be set. + */ +void hqm_ack_vf_msi_intr(struct hqm_hw *hw, u32 interrupts); + +/** + * hqm_ack_vf_mbox_int() - ack PF->VF mailbox interrupt + * @hw: hqm_hw handle for a particular device. + * + * When done processing the PF mailbox request, this function unsets + * the PF's mailbox ISR register. + */ +void hqm_ack_pf_mbox_int(struct hqm_hw *hw); + +/** + * hqm_process_alarm_interrupt() - process an alarm interrupt + * @hw: hqm_hw handle for a particular device. + * + * This function reads the alarm syndrome, logs its, and acks the interrupt. + * This function should be called from the alarm interrupt handler when + * interrupt vector HQM_INT_ALARM fires. + */ +void hqm_process_alarm_interrupt(struct hqm_hw *hw); + +/** + * hqm_read_vf_to_pf_int_bitvec() - return a bit vector of all requesting VFs + * @hw: hqm_hw handle for a particular device. + * + * When the VF->PF ISR fires, this function can be called to determine which + * VF(s) are requesting service. This bitvector must be passed to + * hqm_ack_vf_to_pf_int() when processing is complete for all requesting VFs. + * + * Return: + * Returns a bit vector indicating which VFs (0-15) have requested service. + */ +u32 hqm_read_vf_to_pf_int_bitvec(struct hqm_hw *hw); + +/** + * hqm_ack_vf_mbox_int() - ack processed VF->PF mailbox interrupt + * @hw: hqm_hw handle for a particular device. + * @bitvec: bit vector returned by hqm_read_vf_to_pf_int_bitvec() + * + * When done processing all VF mailbox requests, this function unsets the VF's + * mailbox ISR register. + */ +void hqm_ack_vf_mbox_int(struct hqm_hw *hw, u32 bitvec); + +/** + * hqm_read_vf_flr_int_bitvec() - return a bit vector of all VFs requesting FLR + * @hw: hqm_hw handle for a particular device. + * + * When the VF FLR ISR fires, this function can be called to determine which + * VF(s) are requesting FLRs. This bitvector must passed to + * hqm_ack_vf_flr_int() when processing is complete for all requesting VFs. + * + * Return: + * Returns a bit vector indicating which VFs (0-15) have requested FLRs. + */ +u32 hqm_read_vf_flr_int_bitvec(struct hqm_hw *hw); + +/** + * hqm_ack_vf_flr_int() - ack processed VF<->PF interrupt(s) + * @hw: hqm_hw handle for a particular device. + * @bitvec: bit vector returned by hqm_read_vf_flr_int_bitvec() + * @a_stepping: device is A-stepping + * + * When done processing all VF FLR requests, this function unsets the VF's FLR + * ISR register. + * + * Note: The caller must ensure hqm_set_vf_reset_in_progress(), + * hqm_clr_vf_reset_in_progress(), and hqm_ack_vf_flr_int() are not executed in + * parallel, because the reset-in-progress register does not support atomic + * updates. + */ +void hqm_ack_vf_flr_int(struct hqm_hw *hw, u32 bitvec, bool a_stepping); + +/** + * hqm_ack_vf_to_pf_int() - ack processed VF mbox and FLR interrupt(s) + * @hw: hqm_hw handle for a particular device. + * @mbox_bitvec: bit vector returned by hqm_read_vf_to_pf_int_bitvec() + * @flr_bitvec: bit vector returned by hqm_read_vf_flr_int_bitvec() + * + * When done processing all VF requests, this function communicates to the + * hardware that processing is complete. When this function completes, hardware + * can immediately generate another VF mbox or FLR interrupt. + */ +void hqm_ack_vf_to_pf_int(struct hqm_hw *hw, + u32 mbox_bitvec, + u32 flr_bitvec); + +/** + * hqm_process_ingress_error_interrupt() - process ingress error interrupts + * @hw: hqm_hw handle for a particular device. + * + * This function reads the alarm syndrome, logs it, notifies user-space, and + * acks the interrupt. This function should be called from the alarm interrupt + * handler when interrupt vector HQM_INT_INGRESS_ERROR fires. + */ +void hqm_process_ingress_error_interrupt(struct hqm_hw *hw); + +/** + * hqm_get_group_sequence_numbers() - return a group's number of SNs per queue + * @hw: hqm_hw handle for a particular device. + * @group_id: sequence number group ID. + * + * This function returns the configured number of sequence numbers per queue + * for the specified group. + * + * Return: + * Returns -EINVAL if group_id is invalid, else the group's SNs per queue. + */ +int hqm_get_group_sequence_numbers(struct hqm_hw *hw, unsigned int group_id); + +/** + * hqm_get_group_sequence_number_occupancy() - return a group's in-use slots + * @hw: hqm_hw handle for a particular device. + * @group_id: sequence number group ID. + * + * This function returns the group's number of in-use slots (i.e. load-balanced + * queues using the specified group). + * + * Return: + * Returns -EINVAL if group_id is invalid, else the group's occupancy. + */ +int hqm_get_group_sequence_number_occupancy(struct hqm_hw *hw, + unsigned int group_id); + +/** + * hqm_set_group_sequence_numbers() - assign a group's number of SNs per queue + * @hw: hqm_hw handle for a particular device. + * @group_id: sequence number group ID. + * @val: requested amount of sequence numbers per queue. + * + * This function configures the group's number of sequence numbers per queue. + * val can be a power-of-two between 32 and 1024, inclusive. This setting can + * be configured until the first ordered load-balanced queue is configured, at + * which point the configuration is locked. + * + * Return: + * Returns 0 upon success; -EINVAL if group_id or val is invalid, -EPERM if an + * ordered queue is configured. + */ +int hqm_set_group_sequence_numbers(struct hqm_hw *hw, + unsigned int group_id, + unsigned long val); + +/** + * hqm_reset_domain() - reset a scheduling domain + * @hw: hqm_hw handle for a particular device. + * @domain_id: domain ID. + * @vf_request: indicates whether this request came from a VF. + * @vf_id: If vf_request is true, this contains the VF's ID. + * + * This function resets and frees an HQM scheduling domain and its associated + * resources. + * + * Pre-condition: the driver must ensure software has stopped sending QEs + * through this domain's producer ports before invoking this function, or + * undefined behavior will result. + * + * Return: + * Returns 0 upon success, -1 otherwise. + * + * EINVAL - Invalid domain ID, or the domain is not configured. + * EFAULT - Internal error. (Possibly caused if software is the pre-condition + * is not met.) + * ETIMEDOUT - Hardware component didn't reset in the expected time. + */ +int hqm_reset_domain(struct hqm_hw *hw, + u32 domain_id, + bool vf_request, + unsigned int vf_id); + +/** + * hqm_ldb_port_owned_by_domain() - query whether a port is owned by a domain + * @hw: hqm_hw handle for a particular device. + * @domain_id: domain ID. + * @port_id: indicates whether this request came from a VF. + * @vf_request: indicates whether this request came from a VF. + * @vf_id: If vf_request is true, this contains the VF's ID. + * + * This function returns whether a load-balanced port is owned by a specified + * domain. + * + * Return: + * Returns 0 if false, 1 if true, <0 otherwise. + * + * EINVAL - Invalid domain or port ID, or the domain is not configured. + */ +int hqm_ldb_port_owned_by_domain(struct hqm_hw *hw, + u32 domain_id, + u32 port_id, + bool vf_request, + unsigned int vf_id); + +/** + * hqm_dir_port_owned_by_domain() - query whether a port is owned by a domain + * @hw: hqm_hw handle for a particular device. + * @domain_id: domain ID. + * @port_id: indicates whether this request came from a VF. + * @vf_request: indicates whether this request came from a VF. + * @vf_id: If vf_request is true, this contains the VF's ID. + * + * This function returns whether a directed port is owned by a specified + * domain. + * + * Return: + * Returns 0 if false, 1 if true, <0 otherwise. + * + * EINVAL - Invalid domain or port ID, or the domain is not configured. + */ +int hqm_dir_port_owned_by_domain(struct hqm_hw *hw, + u32 domain_id, + u32 port_id, + bool vf_request, + unsigned int vf_id); + +/** + * hqm_hw_get_num_resources() - query the PCI function's available resources + * @arg: pointer to resource counts. + * @vf_request: indicates whether this request came from a VF. + * @vf_id: If vf_request is true, this contains the VF's ID. + * + * This function returns the number of available resources for the PF or for a + * VF. + * + * Return: + * Returns 0 upon success, -1 if vf_request is true and vf_id is invalid. + */ +int hqm_hw_get_num_resources(struct hqm_hw *hw, + struct hqm_get_num_resources_args *arg, + bool vf_request, + unsigned int vf_id); + +/** + * hqm_hw_get_num_used_resources() - query the PCI function's used resources + * @arg: pointer to resource counts. + * @vf_request: indicates whether this request came from a VF. + * @vf_id: If vf_request is true, this contains the VF's ID. + * + * This function returns the number of resources in use by the PF or a VF. It + * fills in the fields that args points to, except the following: + * - max_contiguous_atomic_inflights + * - max_contiguous_hist_list_entries + * - max_contiguous_ldb_credits + * - max_contiguous_dir_credits + * + * Return: + * Returns 0 upon success, -1 if vf_request is true and vf_id is invalid. + */ +int hqm_hw_get_num_used_resources(struct hqm_hw *hw, + struct hqm_get_num_resources_args *arg, + bool vf_request, + unsigned int vf_id); + +/* For free-running counters, we record the initial data at setup time */ +struct hqm_perf_metric_pre_data { + u64 ldb_sched_count; + u64 dir_sched_count; + u64 ldb_cq_sched_count[HQM_MAX_NUM_LDB_PORTS]; + u64 dir_cq_sched_count[HQM_MAX_NUM_DIR_PORTS]; +}; + +/** + * hqm_init_perf_metric_measurement() - initialize perf metric h/w counters + * @hw: hqm_hw handle for a particular device. + * @id: perf metric group ID. + * @duration_us: measurement duration (microseconds). + * + * This function starts the performance measurement hardware for the requested + * performance metric group. + * + * Note: Only one metric group can be measured at a time. Calling this function + * successively without calling hqm_collect_perf_metric_data() will halt the + * first measurement. + */ +void hqm_init_perf_metric_measurement(struct hqm_hw *hw, + u32 id, + u32 duration_us); + +/** + * hqm_collect_perf_metric_data() - collect perf metric h/w counter results + * @hw: hqm_hw handle for a particular device. + * @id: perf metric group ID. + * @data: measurement results (output argument). + * + * This function collects SMON-measured performance measurement data. + */ +void hqm_collect_perf_metric_data(struct hqm_hw *hw, + u32 id, + union hqm_perf_metric_group_data *data); + +/** + * hqm_read_sched_counts() - read the current HQM scheduling counter values + * @hw: hqm_hw handle for a particular device. + * @data: current scheduling counter values (output argument). + * @vf_request: indicates whether this request came from a VF. + * @vf_id: If vf_request is true, this contains the VF's ID. + * + * This function returns the current values in the HQM scheduling counters. + * These counters increase monotonically until the device is reset. + */ +void hqm_read_sched_counts(struct hqm_hw *hw, + struct hqm_sched_counts *data, + bool vf_request, + unsigned int vf_id); + +/** + * hqm_send_async_vf_to_pf_msg() - (VF only) send a mailbox message to the PF + * @hw: hqm_hw handle for a particular device. + * + * This function sends a VF->PF mailbox message. It is asynchronous, so it + * returns once the message is sent but potentially before the PF has processed + * the message. The caller must call hqm_vf_to_pf_complete() to determine when + * the PF has finished processing the request. + */ +void hqm_send_async_vf_to_pf_msg(struct hqm_hw *hw); + +/** + * hqm_vf_to_pf_complete() - check the status of an asynchronous mailbox request + * @hw: hqm_hw handle for a particular device. + * + * This function returns a boolean indicating whether the PF has finished + * processing a VF->PF mailbox request. It should only be called after sending + * an asynchronous request with hqm_send_async_vf_to_pf_msg(). + */ +bool hqm_vf_to_pf_complete(struct hqm_hw *hw); + +/** + * hqm_vf_flr_complete() - check the status of a VF FLR + * @hw: hqm_hw handle for a particular device. + * + * This function returns a boolean indicating whether the PF has finished + * executing the VF FLR. It should only be called after setting the VF's FLR + * bit. + */ +bool hqm_vf_flr_complete(struct hqm_hw *hw); + +/** + * hqm_set_vf_reset_in_progress() - set a VF's reset in progress bit + * @hw: hqm_hw handle for a particular device. + * @vf_id: VF ID. + * + * Note: The caller must ensure hqm_set_vf_reset_in_progress(), + * hqm_clr_vf_reset_in_progress(), and hqm_ack_vf_flr_int() are not executed in + * parallel, because the reset-in-progress register does not support atomic + * updates. + */ +void hqm_set_vf_reset_in_progress(struct hqm_hw *hw, int vf_id); + +/** + * hqm_clr_vf_reset_in_progress() - clear a VF's reset in progress bit + * @hw: hqm_hw handle for a particular device. + * @vf_id: VF ID. + * + * Note: The caller must ensure hqm_set_vf_reset_in_progress(), + * hqm_clr_vf_reset_in_progress(), and hqm_ack_vf_flr_int() are not executed in + * parallel, because the reset-in-progress register does not support atomic + * updates. + */ +void hqm_clr_vf_reset_in_progress(struct hqm_hw *hw, int vf_id); + +/** + * hqm_send_async_pf_to_vf_msg() - (PF only) send a mailbox message to the VF + * @hw: hqm_hw handle for a particular device. + * @vf_id: VF ID. + * + * This function sends a PF->VF mailbox message. It is asynchronous, so it + * returns once the message is sent but potentially before the VF has processed + * the message. The caller must call hqm_pf_to_vf_complete() to determine when + * the VF has finished processing the request. + */ +void hqm_send_async_pf_to_vf_msg(struct hqm_hw *hw, unsigned int vf_id); + +/** + * hqm_pf_to_vf_complete() - check the status of an asynchronous mailbox request + * @hw: hqm_hw handle for a particular device. + * @vf_id: VF ID. + * + * This function returns a boolean indicating whether the VF has finished + * processing a PF->VF mailbox request. It should only be called after sending + * an asynchronous request with hqm_send_async_pf_to_vf_msg(). + */ +bool hqm_pf_to_vf_complete(struct hqm_hw *hw, unsigned int vf_id); + +/** + * hqm_pf_read_vf_mbox_req() - (PF only) read a VF->PF mailbox request + * @hw: hqm_hw handle for a particular device. + * @vf_id: VF ID. + * @data: pointer to message data. + * @len: size, in bytes, of the data array. + * + * This function copies one of the PF's VF->PF mailboxes into the array pointed + * to by data. + * + * Return: + * Returns 0 upon success, <0 otherwise. + * + * EINVAL - len >= HQM_VF2PF_REQ_BYTES. + */ +int hqm_pf_read_vf_mbox_req(struct hqm_hw *hw, + unsigned int vf_id, + void *data, + int len); + +/** + * hqm_pf_read_vf_mbox_resp() - (PF only) read a VF->PF mailbox response + * @hw: hqm_hw handle for a particular device. + * @vf_id: VF ID. + * @data: pointer to message data. + * @len: size, in bytes, of the data array. + * + * This function copies one of the PF's VF->PF mailboxes into the array pointed + * to by data. + * + * Return: + * Returns 0 upon success, <0 otherwise. + * + * EINVAL - len >= HQM_VF2PF_RESP_BYTES. + */ +int hqm_pf_read_vf_mbox_resp(struct hqm_hw *hw, + unsigned int vf_id, + void *data, + int len); + +/** + * hqm_pf_write_vf_mbox_resp() - (PF only) write a PF->VF mailbox response + * @hw: hqm_hw handle for a particular device. + * @vf_id: VF ID. + * @data: pointer to message data. + * @len: size, in bytes, of the data array. + * + * This function copies the user-provided message data into of the PF's VF->PF + * mailboxes. + * + * Return: + * Returns 0 upon success, <0 otherwise. + * + * EINVAL - len >= HQM_PF2VF_RESP_BYTES. + */ +int hqm_pf_write_vf_mbox_resp(struct hqm_hw *hw, + unsigned int vf_id, + void *data, + int len); + +/** + * hqm_pf_write_vf_mbox_req() - (PF only) write a PF->VF mailbox request + * @hw: hqm_hw handle for a particular device. + * @vf_id: VF ID. + * @data: pointer to message data. + * @len: size, in bytes, of the data array. + * + * This function copies the user-provided message data into of the PF's VF->PF + * mailboxes. + * + * Return: + * Returns 0 upon success, <0 otherwise. + * + * EINVAL - len >= HQM_PF2VF_REQ_BYTES. + */ +int hqm_pf_write_vf_mbox_req(struct hqm_hw *hw, + unsigned int vf_id, + void *data, + int len); + +/** + * hqm_vf_read_pf_mbox_resp() - (VF only) read a PF->VF mailbox response + * @hw: hqm_hw handle for a particular device. + * @data: pointer to message data. + * @len: size, in bytes, of the data array. + * + * This function copies the VF's PF->VF mailbox into the array pointed to by + * data. + * + * Return: + * Returns 0 upon success, <0 otherwise. + * + * EINVAL - len >= HQM_PF2VF_RESP_BYTES. + */ +int hqm_vf_read_pf_mbox_resp(struct hqm_hw *hw, void *data, int len); + +/** + * hqm_vf_read_pf_mbox_req() - (VF only) read a PF->VF mailbox request + * @hw: hqm_hw handle for a particular device. + * @data: pointer to message data. + * @len: size, in bytes, of the data array. + * + * This function copies the VF's PF->VF mailbox into the array pointed to by + * data. + * + * Return: + * Returns 0 upon success, <0 otherwise. + * + * EINVAL - len >= HQM_PF2VF_REQ_BYTES. + */ +int hqm_vf_read_pf_mbox_req(struct hqm_hw *hw, void *data, int len); + +/** + * hqm_vf_write_pf_mbox_req() - (VF only) write a VF->PF mailbox request + * @hw: hqm_hw handle for a particular device. + * @data: pointer to message data. + * @len: size, in bytes, of the data array. + * + * This function copies the user-provided message data into of the VF's PF->VF + * mailboxes. + * + * Return: + * Returns 0 upon success, <0 otherwise. + * + * EINVAL - len >= HQM_VF2PF_REQ_BYTES. + */ +int hqm_vf_write_pf_mbox_req(struct hqm_hw *hw, void *data, int len); + +/** + * hqm_vf_write_pf_mbox_resp() - (VF only) write a VF->PF mailbox response + * @hw: hqm_hw handle for a particular device. + * @data: pointer to message data. + * @len: size, in bytes, of the data array. + * + * This function copies the user-provided message data into of the VF's PF->VF + * mailboxes. + * + * Return: + * Returns 0 upon success, <0 otherwise. + * + * EINVAL - len >= HQM_VF2PF_RESP_BYTES. + */ +int hqm_vf_write_pf_mbox_resp(struct hqm_hw *hw, void *data, int len); + +/** + * hqm_reset_vf() - reset the hardware owned by a VF + * @hw: hqm_hw handle for a particular device. + * @vf_id: VF ID + * + * This function resets the hardware owned by a VF (if any), by resetting the + * VF's domains one by one. + */ +int hqm_reset_vf(struct hqm_hw *hw, unsigned int vf_id); + +/** + * hqm_vf_is_locked() - check whether the VF's resources are locked + * @hw: hqm_hw handle for a particular device. + * @vf_id: VF ID + * + * This function returns whether or not the VF's resource assignments are + * locked. If locked, no resources can be added to or subtracted from the + * group. + */ +bool hqm_vf_is_locked(struct hqm_hw *hw, unsigned int vf_id); + +/** + * hqm_lock_vf() - lock the VF's resources + * @hw: hqm_hw handle for a particular device. + * @vf_id: VF ID + * + * This function sets a flag indicating that the VF is using its resources. + * When VF is locked, its resource assignment cannot be changed. + */ +void hqm_lock_vf(struct hqm_hw *hw, unsigned int vf_id); + +/** + * hqm_unlock_vf() - unlock the VF's resources + * @hw: hqm_hw handle for a particular device. + * @vf_id: VF ID + * + * This function unlocks the VF's resource assignment, allowing it to be + * modified. + */ +void hqm_unlock_vf(struct hqm_hw *hw, unsigned int vf_id); + +/** + * hqm_update_vf_sched_domains() - update the domains assigned to a VF + * @hw: hqm_hw handle for a particular device. + * @vf_id: VF ID + * @num: number of scheduling domains to assign to this VF + * + * This function assigns num scheduling domains to the specified VF. If the VF + * already has domains assigned, this existing assignment is adjusted + * accordingly. + * + * Return: + * Returns 0 upon success, <0 otherwise. + * + * Errors: + * EINVAL - vf_id is invalid, or the requested number of resources are + * unavailable. + * EPERM - The VF's resource assignment is locked and cannot be changed. + */ +int hqm_update_vf_sched_domains(struct hqm_hw *hw, + u32 vf_id, + u32 num); + +/** + * hqm_update_vf_ldb_queues() - update the LDB queues assigned to a VF + * @hw: hqm_hw handle for a particular device. + * @vf_id: VF ID + * @num: number of LDB queues to assign to this VF + * + * This function assigns num LDB queues to the specified VF. If the VF already + * has LDB queues assigned, this existing assignment is adjusted + * accordingly. + * + * Return: + * Returns 0 upon success, <0 otherwise. + * + * Errors: + * EINVAL - vf_id is invalid, or the requested number of resources are + * unavailable. + * EPERM - The VF's resource assignment is locked and cannot be changed. + */ +int hqm_update_vf_ldb_queues(struct hqm_hw *hw, u32 vf_id, u32 num); + +/** + * hqm_update_vf_ldb_ports() - update the LDB ports assigned to a VF + * @hw: hqm_hw handle for a particular device. + * @vf_id: VF ID + * @num: number of LDB ports to assign to this VF + * + * This function assigns num LDB ports to the specified VF. If the VF already + * has LDB ports assigned, this existing assignment is adjusted accordingly. + * + * Return: + * Returns 0 upon success, <0 otherwise. + * + * Errors: + * EINVAL - vf_id is invalid, or the requested number of resources are + * unavailable. + * EPERM - The VF's resource assignment is locked and cannot be changed. + */ +int hqm_update_vf_ldb_ports(struct hqm_hw *hw, u32 vf_id, u32 num); + +/** + * hqm_update_vf_dir_ports() - update the DIR ports assigned to a VF + * @hw: hqm_hw handle for a particular device. + * @vf_id: VF ID + * @num: number of DIR ports to assign to this VF + * + * This function assigns num DIR ports to the specified VF. If the VF already + * has DIR ports assigned, this existing assignment is adjusted accordingly. + * + * Return: + * Returns 0 upon success, <0 otherwise. + * + * Errors: + * EINVAL - vf_id is invalid, or the requested number of resources are + * unavailable. + * EPERM - The VF's resource assignment is locked and cannot be changed. + */ +int hqm_update_vf_dir_ports(struct hqm_hw *hw, u32 vf_id, u32 num); + +/** + * hqm_update_vf_ldb_credit_pools() - update the VF's assigned LDB pools + * @hw: hqm_hw handle for a particular device. + * @vf_id: VF ID + * @num: number of LDB credit pools to assign to this VF + * + * This function assigns num LDB credit pools to the specified VF. If the VF + * already has LDB credit pools assigned, this existing assignment is adjusted + * accordingly. + * + * Return: + * Returns 0 upon success, <0 otherwise. + * + * Errors: + * EINVAL - vf_id is invalid, or the requested number of resources are + * unavailable. + * EPERM - The VF's resource assignment is locked and cannot be changed. + */ +int hqm_update_vf_ldb_credit_pools(struct hqm_hw *hw, + u32 vf_id, + u32 num); + +/** + * hqm_update_vf_dir_credit_pools() - update the VF's assigned DIR pools + * @hw: hqm_hw handle for a particular device. + * @vf_id: VF ID + * @num: number of DIR credit pools to assign to this VF + * + * This function assigns num DIR credit pools to the specified VF. If the VF + * already has DIR credit pools assigned, this existing assignment is adjusted + * accordingly. + * + * Return: + * Returns 0 upon success, <0 otherwise. + * + * Errors: + * EINVAL - vf_id is invalid, or the requested number of resources are + * unavailable. + * EPERM - The VF's resource assignment is locked and cannot be changed. + */ +int hqm_update_vf_dir_credit_pools(struct hqm_hw *hw, + u32 vf_id, + u32 num); + +/** + * hqm_update_vf_ldb_credits() - update the VF's assigned LDB credits + * @hw: hqm_hw handle for a particular device. + * @vf_id: VF ID + * @num: number of LDB credits to assign to this VF + * + * This function assigns num LDB credits to the specified VF. If the VF already + * has LDB credits assigned, this existing assignment is adjusted accordingly. + * VF's are assigned a contiguous chunk of credits, so this function may fail + * if a sufficiently large contiguous chunk is not available. + * + * Return: + * Returns 0 upon success, <0 otherwise. + * + * Errors: + * EINVAL - vf_id is invalid, or the requested number of resources are + * unavailable. + * EPERM - The VF's resource assignment is locked and cannot be changed. + */ +int hqm_update_vf_ldb_credits(struct hqm_hw *hw, u32 vf_id, u32 num); + +/** + * hqm_update_vf_dir_credits() - update the VF's assigned DIR credits + * @hw: hqm_hw handle for a particular device. + * @vf_id: VF ID + * @num: number of DIR credits to assign to this VF + * + * This function assigns num DIR credits to the specified VF. If the VF already + * has DIR credits assigned, this existing assignment is adjusted accordingly. + * VF's are assigned a contiguous chunk of credits, so this function may fail + * if a sufficiently large contiguous chunk is not available. + * + * Return: + * Returns 0 upon success, <0 otherwise. + * + * Errors: + * EINVAL - vf_id is invalid, or the requested number of resources are + * unavailable. + * EPERM - The VF's resource assignment is locked and cannot be changed. + */ +int hqm_update_vf_dir_credits(struct hqm_hw *hw, u32 vf_id, u32 num); + +/** + * hqm_update_vf_hist_list_entries() - update the VF's assigned HL entries + * @hw: hqm_hw handle for a particular device. + * @vf_id: VF ID + * @num: number of history list entries to assign to this VF + * + * This function assigns num history list entries to the specified VF. If the + * VF already has history list entries assigned, this existing assignment is + * adjusted accordingly. VF's are assigned a contiguous chunk of entries, so + * this function may fail if a sufficiently large contiguous chunk is not + * available. + * + * Return: + * Returns 0 upon success, <0 otherwise. + * + * Errors: + * EINVAL - vf_id is invalid, or the requested number of resources are + * unavailable. + * EPERM - The VF's resource assignment is locked and cannot be changed. + */ +int hqm_update_vf_hist_list_entries(struct hqm_hw *hw, + u32 vf_id, + u32 num); + +/** + * hqm_update_vf_atomic_inflights() - update the VF's atomic inflights + * @hw: hqm_hw handle for a particular device. + * @vf_id: VF ID + * @num: number of atomic inflights to assign to this VF + * + * This function assigns num atomic inflights to the specified VF. If the VF + * already has atomic inflights assigned, this existing assignment is adjusted + * accordingly. VF's are assigned a contiguous chunk of entries, so this + * function may fail if a sufficiently large contiguous chunk is not available. + * + * Return: + * Returns 0 upon success, <0 otherwise. + * + * Errors: + * EINVAL - vf_id is invalid, or the requested number of resources are + * unavailable. + * EPERM - The VF's resource assignment is locked and cannot be changed. + */ +int hqm_update_vf_atomic_inflights(struct hqm_hw *hw, + u32 vf_id, + u32 num); + +/** + * hqm_reset_vf_resources() - reassign the VF's resources to the PF + * @hw: hqm_hw handle for a particular device. + * @vf_id: VF ID + * + * This function takes any resources currently assigned to the VF and reassigns + * them to the PF. + * + * Return: + * Returns 0 upon success, <0 otherwise. + * + * Errors: + * EINVAL - vf_id is invalid + * EPERM - The VF's resource assignment is locked and cannot be changed. + */ +int hqm_reset_vf_resources(struct hqm_hw *hw, unsigned int vf_id); + +/** + * hqm_notify_vf() - send a notification to a VF + * @hw: hqm_hw handle for a particular device. + * @vf_id: VF ID + * @notification: notification + * + * This function sends a notification (as defined in hqm_mbox.h) to a VF. + * + * Return: + * Returns 0 upon success, -1 if the VF doesn't ACK the PF->VF interrupt. + */ +int hqm_notify_vf(struct hqm_hw *hw, + unsigned int vf_id, + u32 notification); + +/** + * hqm_vf_in_use() - query whether a VF is in use + * @hw: hqm_hw handle for a particular device. + * @vf_id: VF ID + * + * This function sends a mailbox request to the VF to query whether the VF is in + * use. + * + * Return: + * Returns 0 for false, 1 for true, and -1 if the mailbox request times out or + * an internal error occurs. + */ +int hqm_vf_in_use(struct hqm_hw *hw, unsigned int vf_id); + +/** + * hqm_disable_dp_vasr_feature() - disable directed pipe VAS reset hardware + * @hw: hqm_hw handle for a particular device. + * + * This function disables certain hardware in the directed pipe, + * necessary to workaround an HQM 1.0 VAS reset issue. + */ +void hqm_disable_dp_vasr_feature(struct hqm_hw *hw); + +/** + * hqm_enable_excess_tokens_alarm() - enable interrupts for the excess token + * pop alarm + * @hw: hqm_hw handle for a particular device. + * + * This function enables the PF ingress error alarm interrupt to fire when an + * excess token pop occurs. + */ +void hqm_enable_excess_tokens_alarm(struct hqm_hw *hw); + +/** + * hqm_hw_get_ldb_queue_depth() - returns the depth of a load-balanced queue + * @hw: hqm_hw handle for a particular device. + * @domain_id: domain ID. + * @args: queue depth args + * @vf_request: indicates whether this request came from a VF. + * @vf_id: If vf_request is true, this contains the VF's ID. + * + * This function returns the depth of a load-balanced queue. + * + * Return: + * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is + * assigned a detailed error code from enum hqm_error. If successful, resp->id + * contains the depth. + * + * Errors: + * EINVAL - Invalid domain ID or queue ID. + */ +int hqm_hw_get_ldb_queue_depth(struct hqm_hw *hw, + u32 domain_id, + struct hqm_get_ldb_queue_depth_args *args, + struct hqm_cmd_response *resp, + bool vf_request, + unsigned int vf_id); + +/** + * hqm_hw_get_dir_queue_depth() - returns the depth of a directed queue + * @hw: hqm_hw handle for a particular device. + * @domain_id: domain ID. + * @args: queue depth args + * @vf_request: indicates whether this request came from a VF. + * @vf_id: If vf_request is true, this contains the VF's ID. + * + * This function returns the depth of a directed queue. + * + * Return: + * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is + * assigned a detailed error code from enum hqm_error. If successful, resp->id + * contains the depth. + * + * Errors: + * EINVAL - Invalid domain ID or queue ID. + */ +int hqm_hw_get_dir_queue_depth(struct hqm_hw *hw, + u32 domain_id, + struct hqm_get_dir_queue_depth_args *args, + struct hqm_cmd_response *resp, + bool vf_request, + unsigned int vf_id); + +/** + * hqm_hw_pending_port_unmaps() - returns the number of unmap operations in + * progress for a load-balanced port. + * @hw: hqm_hw handle for a particular device. + * @domain_id: domain ID. + * @args: number of unmaps in progress args + * @vf_request: indicates whether this request came from a VF. + * @vf_id: If vf_request is true, this contains the VF's ID. + * + * Return: + * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is + * assigned a detailed error code from enum hqm_error. If successful, resp->id + * contains the number of unmaps in progress. + * + * Errors: + * EINVAL - Invalid port ID. + */ +int hqm_hw_pending_port_unmaps(struct hqm_hw *hw, + u32 domain_id, + struct hqm_pending_port_unmaps_args *args, + struct hqm_cmd_response *resp, + bool vf_request, + unsigned int vf_id); + +/** + * hqm_hw_enable_sparse_ldb_cq_mode() - enable sparse mode for load-balanced + * ports. + * @hw: hqm_hw handle for a particular device. + * + * This function must be called prior to configuring scheduling domains. + */ +void hqm_hw_enable_sparse_ldb_cq_mode(struct hqm_hw *hw); + +/** + * hqm_hw_enable_sparse_dir_cq_mode() - enable sparse mode for directed ports + * @hw: hqm_hw handle for a particular device. + * + * This function must be called prior to configuring scheduling domains. + */ +void hqm_hw_enable_sparse_dir_cq_mode(struct hqm_hw *hw); + +/** + * hqm_hw_disable_sparse_ldb_cq_mode() - disable sparse mode for load-balanced + * ports. + * @hw: hqm_hw handle for a particular device. + * + * This function must be called prior to configuring scheduling domains. + */ +void hqm_hw_disable_sparse_ldb_cq_mode(struct hqm_hw *hw); + +/** + * hqm_hw_disable_sparse_dir_cq_mode() - disable sparse mode for directed ports + * @hw: hqm_hw handle for a particular device. + * + * This function must be called prior to configuring scheduling domains. + */ +void hqm_hw_disable_sparse_dir_cq_mode(struct hqm_hw *hw); + +#endif /* __HQM_BASE_HQM_API_H */ diff --git a/drivers/misc/hqm/hqm_smon.c b/drivers/misc/hqm/hqm_smon.c new file mode 100644 index 00000000000000..dd4fc40b060f1a --- /dev/null +++ b/drivers/misc/hqm/hqm_smon.c @@ -0,0 +1,282 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2016-2019 Intel Corporation */ + +#include "hqm_hw_types.h" +#include "hqm_osdep.h" +#include "hqm_osdep_types.h" +#include "hqm_smon.h" +#include "hqm_regs.h" + +struct hqm_smon_cfg { + u8 mode[2]; + u8 cmp[2]; + u32 mask[2]; + u32 cnt[2]; + u32 timer; + u32 max_timer; + u8 fn[2]; + u8 fn_cmp[2]; + u8 smon_mode; + u8 stop_counter_mode; + u8 stop_timer_mode; +}; + +#define DEFAULT_SMON_CFG \ + {{0, 0}, {0, 0}, {0xFFFFFFFF, 0xFFFFFFFF}, \ + {0, 0}, 0, 0, {0, 0}, {0, 0}, 0, 0, 0} + +static void hqm_smon_setup_sys_perf_smon(struct hqm_hw *hw, + struct hqm_smon_cfg *cfg) +{ + union hqm_sys_smon_cfg0 r0 = { {0} }; + union hqm_sys_smon_cfg1 r1 = { {0} }; + union hqm_sys_smon_compare0 r2 = { {0} }; + union hqm_sys_smon_compare1 r3 = { {0} }; + union hqm_sys_smon_comp_mask0 r4 = { {0} }; + union hqm_sys_smon_comp_mask1 r5 = { {0} }; + union hqm_sys_smon_activitycntr0 r6 = { {0} }; + union hqm_sys_smon_activitycntr1 r7 = { {0} }; + union hqm_sys_smon_tmr r8 = { {0} }; + union hqm_sys_smon_max_tmr r9 = { {0} }; + + /* Disable the SMON before configuring it */ + HQM_CSR_WR(hw, HQM_SYS_SMON_CFG0, r0.val); + + r1.field.mode0 = cfg->mode[0]; + r1.field.mode1 = cfg->mode[1]; + + HQM_CSR_WR(hw, HQM_SYS_SMON_CFG1, r1.val); + + r2.field.compare0 = cfg->cmp[0]; + + HQM_CSR_WR(hw, HQM_SYS_SMON_COMPARE0, r2.val); + + r3.field.compare1 = cfg->cmp[1]; + + HQM_CSR_WR(hw, HQM_SYS_SMON_COMPARE1, r3.val); + + r4.field.comp_mask0 = cfg->mask[0]; + + HQM_CSR_WR(hw, HQM_SYS_SMON_COMP_MASK0, r4.val); + + r5.field.comp_mask1 = cfg->mask[1]; + + HQM_CSR_WR(hw, HQM_SYS_SMON_COMP_MASK1, r5.val); + + r6.field.counter0 = cfg->cnt[0]; + + HQM_CSR_WR(hw, HQM_SYS_SMON_ACTIVITYCNTR0, r6.val); + + r7.field.counter1 = cfg->cnt[1]; + + HQM_CSR_WR(hw, HQM_SYS_SMON_ACTIVITYCNTR1, r7.val); + + r8.field.timer_val = cfg->timer; + + HQM_CSR_WR(hw, HQM_SYS_SMON_TMR, r8.val); + + r9.field.maxvalue = cfg->max_timer; + + HQM_CSR_WR(hw, HQM_SYS_SMON_MAX_TMR, r9.val); + + r0.field.smon_enable = 1; + r0.field.smon0_function = cfg->fn[0]; + r0.field.smon0_function_compare = cfg->fn_cmp[0]; + r0.field.smon1_function = cfg->fn[1]; + r0.field.smon1_function_compare = cfg->fn_cmp[1]; + r0.field.smon_mode = cfg->smon_mode; + r0.field.stopcounterovfl = cfg->stop_counter_mode; + r0.field.stoptimerovfl = cfg->stop_timer_mode; + + /* Enable the SMON */ + HQM_CSR_WR(hw, HQM_SYS_SMON_CFG0, r0.val); +} + +static void hqm_smon_setup_chp_perf_smon(struct hqm_hw *hw, + struct hqm_smon_cfg *cfg) +{ + union hqm_chp_smon_cfg0 r0 = { {0} }; + union hqm_chp_smon_cfg1 r1 = { {0} }; + union hqm_chp_smon_compare0 r2 = { {0} }; + union hqm_chp_smon_compare1 r3 = { {0} }; + union hqm_chp_smon_cntr0 r4 = { {0} }; + union hqm_chp_smon_cntr1 r5 = { {0} }; + union hqm_chp_smon_tmr r6 = { {0} }; + union hqm_chp_smon_max_tmr r7 = { {0} }; + + /* Disable the SMON before configuring it */ + HQM_CSR_WR(hw, HQM_CHP_SMON_CFG0, r0.val); + + r1.field.mode0 = cfg->mode[0]; + r1.field.mode1 = cfg->mode[1]; + + HQM_CSR_WR(hw, HQM_CHP_SMON_CFG1, r1.val); + + r2.field.compare0 = cfg->cmp[0]; + + HQM_CSR_WR(hw, HQM_CHP_SMON_COMPARE0, r2.val); + + r3.field.compare1 = cfg->cmp[1]; + + HQM_CSR_WR(hw, HQM_CHP_SMON_COMPARE1, r3.val); + + r4.field.counter0 = cfg->cnt[0]; + + HQM_CSR_WR(hw, HQM_CHP_SMON_CNTR0, r4.val); + + r5.field.counter1 = cfg->cnt[1]; + + HQM_CSR_WR(hw, HQM_CHP_SMON_CNTR1, r5.val); + + r6.field.timer = cfg->timer; + + HQM_CSR_WR(hw, HQM_CHP_SMON_TMR, r6.val); + + r7.field.maxvalue = cfg->max_timer; + + HQM_CSR_WR(hw, HQM_CHP_SMON_MAX_TMR, r7.val); + + r0.field.smon_enable = 1; + r0.field.smon0_function = cfg->fn[0]; + r0.field.smon0_function_compare = cfg->fn_cmp[0]; + r0.field.smon1_function = cfg->fn[1]; + r0.field.smon1_function_compare = cfg->fn_cmp[1]; + r0.field.smon_mode = cfg->smon_mode; + r0.field.stopcounterovfl = cfg->stop_counter_mode; + r0.field.stoptimerovfl = cfg->stop_timer_mode; + + /* Enable the SMON */ + HQM_CSR_WR(hw, HQM_CHP_SMON_CFG0, r0.val); +} + +void hqm_smon_setup_sys_iosf_measurements(struct hqm_hw *hw, + u32 max) +{ + struct hqm_smon_cfg sys_pm_cfg = DEFAULT_SMON_CFG; + + sys_pm_cfg.mode[1] = 1; + sys_pm_cfg.max_timer = max; + sys_pm_cfg.stop_timer_mode = 1; + sys_pm_cfg.stop_counter_mode = 1; + + hqm_smon_setup_sys_perf_smon(hw, &sys_pm_cfg); +} + +void hqm_smon_setup_chp_ing_egr_measurements(struct hqm_hw *hw, + u32 max) +{ + struct hqm_smon_cfg chp_pm_cfg = DEFAULT_SMON_CFG; + + chp_pm_cfg.mode[1] = 1; + chp_pm_cfg.max_timer = max; + chp_pm_cfg.stop_timer_mode = 1; + chp_pm_cfg.stop_counter_mode = 1; + + hqm_smon_setup_chp_perf_smon(hw, &chp_pm_cfg); +} + +void hqm_smon_setup_chp_pp_count_measurement(struct hqm_hw *hw, + u32 max) +{ + struct hqm_smon_cfg chp_pm_cfg = DEFAULT_SMON_CFG; + + chp_pm_cfg.mode[0] = 2; + chp_pm_cfg.mode[1] = 2; + chp_pm_cfg.max_timer = max; + chp_pm_cfg.stop_timer_mode = 1; + chp_pm_cfg.stop_counter_mode = 1; + + hqm_smon_setup_chp_perf_smon(hw, &chp_pm_cfg); +} + +void hqm_smon_setup_chp_avg_measurement(struct hqm_hw *hw, + enum hqm_chp_smon_component type, + u32 max) +{ + struct hqm_smon_cfg chp_pm_cfg = DEFAULT_SMON_CFG; + + chp_pm_cfg.mode[0] = type; + chp_pm_cfg.mode[1] = type; + chp_pm_cfg.max_timer = max; + chp_pm_cfg.stop_timer_mode = 1; + chp_pm_cfg.stop_counter_mode = 1; + chp_pm_cfg.smon_mode = 3; + + hqm_smon_setup_chp_perf_smon(hw, &chp_pm_cfg); +} + +void hqm_smon_setup_chp_hcw_measurements(struct hqm_hw *hw, + enum hqm_chp_smon_hcw_type type_0, + enum hqm_chp_smon_hcw_type type_1, + u32 max) +{ + struct hqm_smon_cfg chp_pm_cfg = DEFAULT_SMON_CFG; + + chp_pm_cfg.mode[0] = 0; + chp_pm_cfg.mode[1] = 0; + chp_pm_cfg.cmp[0] = type_0; + chp_pm_cfg.cmp[1] = type_1; + chp_pm_cfg.fn_cmp[0] = 1; + chp_pm_cfg.fn_cmp[1] = 1; + chp_pm_cfg.max_timer = max; + chp_pm_cfg.stop_timer_mode = 1; + chp_pm_cfg.stop_counter_mode = 1; + + hqm_smon_setup_chp_perf_smon(hw, &chp_pm_cfg); +} + +void hqm_smon_read_sys_perf_counter(struct hqm_hw *hw, + enum hqm_smon_meas_type type, + u32 counter[2]) +{ + union hqm_sys_smon_cfg0 r0 = { {0} }; + union hqm_sys_smon_activitycntr0 r1; + union hqm_sys_smon_activitycntr1 r2; + + /* Disable the SMON */ + HQM_CSR_WR(hw, HQM_SYS_SMON_CFG0, r0.val); + + r1.val = HQM_CSR_RD(hw, HQM_SYS_SMON_ACTIVITYCNTR0); + r2.val = HQM_CSR_RD(hw, HQM_SYS_SMON_ACTIVITYCNTR1); + + if (type == SMON_MEASURE_CNT) { + counter[0] = r1.val; + counter[1] = r2.val; + } else if (type == SMON_MEASURE_AVG) { + counter[0] = (r1.val) ? (r2.val / r1.val) : 0; + } +} + +void hqm_smon_read_chp_perf_counter(struct hqm_hw *hw, + enum hqm_smon_meas_type type, + u32 counter[2]) +{ + union hqm_chp_smon_cfg0 r0 = { {0} }; + union hqm_chp_smon_cntr0 r1; + union hqm_chp_smon_cntr1 r2; + u64 counter0_overflow = 0; + u64 counter1_overflow = 0; + u64 count64[2]; + + r0.val = HQM_CSR_RD(hw, HQM_CHP_SMON_CFG0); + + if (r0.field.statcounter0ovfl) + counter0_overflow = 0x100000000; + if (r0.field.statcounter1ovfl) + counter1_overflow = 0x100000000; + + /* Disable the SMON */ + HQM_CSR_WR(hw, HQM_CHP_SMON_CFG0, r0.val); + + r1.val = HQM_CSR_RD(hw, HQM_CHP_SMON_CNTR0); + r2.val = HQM_CSR_RD(hw, HQM_CHP_SMON_CNTR1); + + if (type == SMON_MEASURE_CNT) { + counter[0] = r1.val; + counter[1] = r2.val; + } else if (type == SMON_MEASURE_AVG) { + count64[0] = ((u64)r1.val) + counter0_overflow; + count64[1] = ((u64)r2.val) + counter1_overflow; + counter[0] = (count64[0]) ? (count64[1] / count64[0]) : 0; + } +} diff --git a/drivers/misc/hqm/hqm_smon.h b/drivers/misc/hqm/hqm_smon.h new file mode 100644 index 00000000000000..339f6f1fa8eba5 --- /dev/null +++ b/drivers/misc/hqm/hqm_smon.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * Copyright(c) 2016-2019 Intel Corporation + */ + +#ifndef __HQM_SMON_H +#define __HQM_SMON_H + +#include "hqm_hw_types.h" +#include "hqm_osdep_types.h" + +enum hqm_chp_smon_component { + /* IDs 0-2 are unused */ + SMON_COMP_DQED = 3, + SMON_COMP_QED, + SMON_COMP_HIST_LIST, +}; + +enum hqm_chp_smon_hcw_type { + SMON_HCW_NOOP, + SMON_HCW_BAT_T, + SMON_HCW_COMP, + SMON_HCW_COMP_T, + SMON_HCW_REL, + /* IDs 5-7 are unused */ + SMON_HCW_ENQ = 8, + SMON_HCW_ENQ_T, + SMON_HCW_RENQ, + SMON_HCW_RENQ_T, + SMON_HCW_FRAG, + SMON_HCW_FRAG_T, +}; + +void hqm_smon_setup_sys_iosf_measurements(struct hqm_hw *hw, u32 max); + +void hqm_smon_setup_chp_ing_egr_measurements(struct hqm_hw *hw, u32 max); + +void hqm_smon_setup_chp_pp_count_measurement(struct hqm_hw *hw, u32 max); + +void hqm_smon_setup_chp_avg_measurement(struct hqm_hw *hw, + enum hqm_chp_smon_component, + u32 max); + +void hqm_smon_setup_chp_hcw_measurements(struct hqm_hw *hw, + enum hqm_chp_smon_hcw_type type_1, + enum hqm_chp_smon_hcw_type type_2, + u32 max); + +enum hqm_smon_meas_type { + SMON_MEASURE_CNT, + SMON_MEASURE_AVG +}; + +void hqm_smon_read_sys_perf_counter(struct hqm_hw *hw, + enum hqm_smon_meas_type type, + u32 counter[2]); + +void hqm_smon_read_chp_perf_counter(struct hqm_hw *hw, + enum hqm_smon_meas_type type, + u32 counter[2]); + +#endif /* __HQM_SMON_H */ diff --git a/include/uapi/linux/hqm_user.h b/include/uapi/linux/hqm_user.h new file mode 100644 index 00000000000000..a8f658a7dc8623 --- /dev/null +++ b/include/uapi/linux/hqm_user.h @@ -0,0 +1,1232 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note + * Copyright(c) 2016-2018 Intel Corporation + */ + +#ifndef __HQM_USER_H +#define __HQM_USER_H + +#define HQM_MAX_NAME_LEN 64 + +#include + +enum hqm_error { + HQM_ST_SUCCESS = 0, + HQM_ST_NAME_EXISTS, + HQM_ST_DOMAIN_UNAVAILABLE, + HQM_ST_LDB_PORTS_UNAVAILABLE, + HQM_ST_DIR_PORTS_UNAVAILABLE, + HQM_ST_LDB_QUEUES_UNAVAILABLE, + HQM_ST_LDB_CREDITS_UNAVAILABLE, + HQM_ST_DIR_CREDITS_UNAVAILABLE, + HQM_ST_LDB_CREDIT_POOLS_UNAVAILABLE, + HQM_ST_DIR_CREDIT_POOLS_UNAVAILABLE, + HQM_ST_SEQUENCE_NUMBERS_UNAVAILABLE, + HQM_ST_INVALID_DOMAIN_ID, + HQM_ST_INVALID_QID_INFLIGHT_ALLOCATION, + HQM_ST_ATOMIC_INFLIGHTS_UNAVAILABLE, + HQM_ST_HIST_LIST_ENTRIES_UNAVAILABLE, + HQM_ST_INVALID_LDB_CREDIT_POOL_ID, + HQM_ST_INVALID_DIR_CREDIT_POOL_ID, + HQM_ST_INVALID_POP_COUNT_VIRT_ADDR, + HQM_ST_INVALID_LDB_QUEUE_ID, + HQM_ST_INVALID_CQ_DEPTH, + HQM_ST_INVALID_CQ_VIRT_ADDR, + HQM_ST_INVALID_PORT_ID, + HQM_ST_INVALID_QID, + HQM_ST_INVALID_PRIORITY, + HQM_ST_NO_QID_SLOTS_AVAILABLE, + HQM_ST_QED_FREELIST_ENTRIES_UNAVAILABLE, + HQM_ST_DQED_FREELIST_ENTRIES_UNAVAILABLE, + HQM_ST_INVALID_DIR_QUEUE_ID, + HQM_ST_DIR_QUEUES_UNAVAILABLE, + HQM_ST_INVALID_LDB_CREDIT_LOW_WATERMARK, + HQM_ST_INVALID_LDB_CREDIT_QUANTUM, + HQM_ST_INVALID_DIR_CREDIT_LOW_WATERMARK, + HQM_ST_INVALID_DIR_CREDIT_QUANTUM, + HQM_ST_DOMAIN_NOT_CONFIGURED, + HQM_ST_PID_ALREADY_ATTACHED, + HQM_ST_PID_NOT_ATTACHED, + HQM_ST_INTERNAL_ERROR, + HQM_ST_DOMAIN_IN_USE, + HQM_ST_IOMMU_MAPPING_ERROR, + HQM_ST_FAIL_TO_PIN_MEMORY_PAGE, + HQM_ST_UNABLE_TO_PIN_POPCOUNT_PAGES, + HQM_ST_UNABLE_TO_PIN_CQ_PAGES, + HQM_ST_DISCONTIGUOUS_CQ_MEMORY, + HQM_ST_DISCONTIGUOUS_POP_COUNT_MEMORY, + HQM_ST_DOMAIN_STARTED, + HQM_ST_LARGE_POOL_NOT_SPECIFIED, + HQM_ST_SMALL_POOL_NOT_SPECIFIED, + HQM_ST_NEITHER_POOL_SPECIFIED, + HQM_ST_DOMAIN_NOT_STARTED, + HQM_ST_INVALID_MEASUREMENT_DURATION, + HQM_ST_INVALID_PERF_METRIC_GROUP_ID, + HQM_ST_LDB_PORT_REQUIRED_FOR_LDB_QUEUES, + HQM_ST_DOMAIN_RESET_FAILED, + HQM_ST_MBOX_ERROR, + HQM_ST_INVALID_HIST_LIST_DEPTH, + HQM_ST_NO_MEMORY, +}; + +static const char hqm_error_strings[][128] = { + "HQM_ST_SUCCESS", + "HQM_ST_NAME_EXISTS", + "HQM_ST_DOMAIN_UNAVAILABLE", + "HQM_ST_LDB_PORTS_UNAVAILABLE", + "HQM_ST_DIR_PORTS_UNAVAILABLE", + "HQM_ST_LDB_QUEUES_UNAVAILABLE", + "HQM_ST_LDB_CREDITS_UNAVAILABLE", + "HQM_ST_DIR_CREDITS_UNAVAILABLE", + "HQM_ST_LDB_CREDIT_POOLS_UNAVAILABLE", + "HQM_ST_DIR_CREDIT_POOLS_UNAVAILABLE", + "HQM_ST_SEQUENCE_NUMBERS_UNAVAILABLE", + "HQM_ST_INVALID_DOMAIN_ID", + "HQM_ST_INVALID_QID_INFLIGHT_ALLOCATION", + "HQM_ST_ATOMIC_INFLIGHTS_UNAVAILABLE", + "HQM_ST_HIST_LIST_ENTRIES_UNAVAILABLE", + "HQM_ST_INVALID_LDB_CREDIT_POOL_ID", + "HQM_ST_INVALID_DIR_CREDIT_POOL_ID", + "HQM_ST_INVALID_POP_COUNT_VIRT_ADDR", + "HQM_ST_INVALID_LDB_QUEUE_ID", + "HQM_ST_INVALID_CQ_DEPTH", + "HQM_ST_INVALID_CQ_VIRT_ADDR", + "HQM_ST_INVALID_PORT_ID", + "HQM_ST_INVALID_QID", + "HQM_ST_INVALID_PRIORITY", + "HQM_ST_NO_QID_SLOTS_AVAILABLE", + "HQM_ST_QED_FREELIST_ENTRIES_UNAVAILABLE", + "HQM_ST_DQED_FREELIST_ENTRIES_UNAVAILABLE", + "HQM_ST_INVALID_DIR_QUEUE_ID", + "HQM_ST_DIR_QUEUES_UNAVAILABLE", + "HQM_ST_INVALID_LDB_CREDIT_LOW_WATERMARK", + "HQM_ST_INVALID_LDB_CREDIT_QUANTUM", + "HQM_ST_INVALID_DIR_CREDIT_LOW_WATERMARK", + "HQM_ST_INVALID_DIR_CREDIT_QUANTUM", + "HQM_ST_DOMAIN_NOT_CONFIGURED", + "HQM_ST_PID_ALREADY_ATTACHED", + "HQM_ST_PID_NOT_ATTACHED", + "HQM_ST_INTERNAL_ERROR", + "HQM_ST_DOMAIN_IN_USE", + "HQM_ST_IOMMU_MAPPING_ERROR", + "HQM_ST_FAIL_TO_PIN_MEMORY_PAGE", + "HQM_ST_UNABLE_TO_PIN_POPCOUNT_PAGES", + "HQM_ST_UNABLE_TO_PIN_CQ_PAGES", + "HQM_ST_DISCONTIGUOUS_CQ_MEMORY", + "HQM_ST_DISCONTIGUOUS_POP_COUNT_MEMORY", + "HQM_ST_DOMAIN_STARTED", + "HQM_ST_LARGE_POOL_NOT_SPECIFIED", + "HQM_ST_SMALL_POOL_NOT_SPECIFIED", + "HQM_ST_NEITHER_POOL_SPECIFIED", + "HQM_ST_DOMAIN_NOT_STARTED", + "HQM_ST_INVALID_MEASUREMENT_DURATION", + "HQM_ST_INVALID_PERF_METRIC_GROUP_ID", + "HQM_ST_LDB_PORT_REQUIRED_FOR_LDB_QUEUES", + "HQM_ST_DOMAIN_RESET_FAILED", + "HQM_ST_MBOX_ERROR", + "HQM_ST_INVALID_HIST_LIST_DEPTH", + "HQM_ST_NO_MEMORY", +}; + +struct hqm_cmd_response { + __u32 status; /* Interpret using enum hqm_error */ + __u32 id; +}; + +/******************************/ +/* 'hqm' device file commands */ +/******************************/ + +#define HQM_DEVICE_VERSION(x) (((x) >> 8) & 0xFF) +#define HQM_DEVICE_REVISION(x) ((x) & 0xFF) + +enum hqm_revisions { + HQM_REV_A0 = 0, + HQM_REV_A1 = 1, + HQM_REV_A2 = 2, + HQM_REV_A3 = 3, + HQM_REV_B0 = 4, +}; + +/* + * HQM_CMD_GET_DEVICE_VERSION: Query the HQM device version. + * + * This ioctl interface is the same in all driver versions and is always + * the first ioctl. + * + * Output parameters: + * - response: pointer to a struct hqm_cmd_response. + * response.status: Detailed error code. In certain cases, such as if the + * response pointer is invalid, the driver won't set status. + * response.id[7:0]: Device revision. + * response.id[15:8]: Device version. + */ + +struct hqm_get_device_version_args { + /* Output parameters */ + __u64 response; +}; + +#define HQM_VERSION_MAJOR_NUMBER 10 +#define HQM_VERSION_MINOR_NUMBER 7 +#define HQM_VERSION_REVISION_NUMBER 0 +#define HQM_VERSION (HQM_VERSION_MAJOR_NUMBER << 24 | \ + HQM_VERSION_MINOR_NUMBER << 16 | \ + HQM_VERSION_REVISION_NUMBER) + +#define HQM_VERSION_GET_MAJOR_NUMBER(x) (((x) >> 24) & 0xFF) +#define HQM_VERSION_GET_MINOR_NUMBER(x) (((x) >> 16) & 0xFF) +#define HQM_VERSION_GET_REVISION_NUMBER(x) ((x) & 0xFFFF) + +static inline __u8 hqm_version_incompatible(__u32 version) +{ + __u8 inc; + + inc = HQM_VERSION_GET_MAJOR_NUMBER(version) != HQM_VERSION_MAJOR_NUMBER; + inc |= (int)HQM_VERSION_GET_MINOR_NUMBER(version) < + HQM_VERSION_MINOR_NUMBER; + + return inc; +} + +/* + * HQM_CMD_GET_DRIVER_VERSION: Query the HQM driver version. The major number + * is changed when there is an ABI-breaking change, the minor number is + * changed if the API is changed in a backwards-compatible way, and the + * revision number is changed for fixes that don't affect the API. + * + * If the kernel driver's API version major number and the header's + * HQM_VERSION_MAJOR_NUMBER differ, the two are incompatible, or if the + * major numbers match but the kernel driver's minor number is less than + * the header file's, they are incompatible. The HQM_VERSION_INCOMPATIBLE + * macro should be used to check for compatibility. + * + * This ioctl interface is the same in all driver versions. Applications + * should check the driver version before performing any other ioctl + * operations. + * + * Output parameters: + * - response: pointer to a struct hqm_cmd_response. + * response.status: Detailed error code. In certain cases, such as if the + * response pointer is invalid, the driver won't set status. + * response.id: Driver API version. Use the HQM_VERSION_GET_MAJOR_NUMBER, + * HQM_VERSION_GET_MINOR_NUMBER, and + * HQM_VERSION_GET_REVISION_NUMBER macros to interpret the field. + */ + +struct hqm_get_driver_version_args { + /* Output parameters */ + __u64 response; +}; + +/* + * HQM_CMD_CREATE_SCHED_DOMAIN: Create an HQM scheduling domain and reserve the + * resources (queues, ports, etc.) that it contains. + * + * Input parameters: + * - num_ldb_queues: Number of load-balanced queues. + * - num_ldb_ports: Number of load-balanced ports. + * - num_dir_ports: Number of directed ports. A directed port has one directed + * queue, so no num_dir_queues argument is necessary. + * - num_atomic_inflights: This specifies the amount of temporary atomic QE + * storage for the domain. This storage is divided among the domain's + * load-balanced queues that are configured for atomic scheduling. + * - num_hist_list_entries: Amount of history list storage. This is divided + * among the domain's CQs. + * - num_ldb_credits: Amount of load-balanced QE storage (QED). QEs occupy this + * space until they are scheduled to a load-balanced CQ. One credit + * represents the storage for one QE. + * - num_dir_credits: Amount of directed QE storage (DQED). QEs occupy this + * space until they are scheduled to a directed CQ. One credit represents + * the storage for one QE. + * - num_ldb_credit_pools: Number of pools into which the load-balanced credits + * are placed. + * - num_dir_credit_pools: Number of pools into which the directed credits are + * placed. + * - padding0: Reserved for future use. + * + * Output parameters: + * - response: pointer to a struct hqm_cmd_response. + * response.status: Detailed error code. In certain cases, such as if the + * response pointer is invalid, the driver won't set status. + * response.id: domain ID. + */ +struct hqm_create_sched_domain_args { + /* Output parameters */ + __u64 response; + /* Input parameters */ + __u32 num_ldb_queues; + __u32 num_ldb_ports; + __u32 num_dir_ports; + __u32 num_atomic_inflights; + __u32 num_hist_list_entries; + __u32 num_ldb_credits; + __u32 num_dir_credits; + __u32 num_ldb_credit_pools; + __u32 num_dir_credit_pools; +}; + +/* + * HQM_CMD_GET_NUM_RESOURCES: Return the number of available resources + * (queues, ports, etc.) that this device owns. + * + * Output parameters: + * - num_domains: Number of available scheduling domains. + * - num_ldb_queues: Number of available load-balanced queues. + * - num_ldb_ports: Number of available load-balanced ports. + * - num_dir_ports: Number of available directed ports. There is one directed + * queue for every directed port. + * - num_atomic_inflights: Amount of available temporary atomic QE storage. + * - max_contiguous_atomic_inflights: When a domain is created, the temporary + * atomic QE storage is allocated in a contiguous chunk. This return value + * is the longest available contiguous range of atomic QE storage. + * - num_hist_list_entries: Amount of history list storage. + * - max_contiguous_hist_list_entries: History list storage is allocated in + * a contiguous chunk, and this return value is the longest available + * contiguous range of history list entries. + * - num_ldb_credits: Amount of available load-balanced QE storage. + * - max_contiguous_ldb_credits: QED storage is allocated in a contiguous + * chunk, and this return value is the longest available contiguous range + * of load-balanced credit storage. + * - num_dir_credits: Amount of available directed QE storage. + * - max_contiguous_dir_credits: DQED storage is allocated in a contiguous + * chunk, and this return value is the longest available contiguous range + * of directed credit storage. + * - num_ldb_credit_pools: Number of available load-balanced credit pools. + * - num_dir_credit_pools: Number of available directed credit pools. + * - padding0: Reserved for future use. + */ +struct hqm_get_num_resources_args { + /* Output parameters */ + __u32 num_sched_domains; + __u32 num_ldb_queues; + __u32 num_ldb_ports; + __u32 num_dir_ports; + __u32 num_atomic_inflights; + __u32 max_contiguous_atomic_inflights; + __u32 num_hist_list_entries; + __u32 max_contiguous_hist_list_entries; + __u32 num_ldb_credits; + __u32 max_contiguous_ldb_credits; + __u32 num_dir_credits; + __u32 max_contiguous_dir_credits; + __u32 num_ldb_credit_pools; + __u32 num_dir_credit_pools; + __u32 padding0; +}; + +/* + * HQM_CMD_SAMPLE_PERF_COUNTERS: Gather a set of HQM performance data by + * enabling performance counters for a user-specified measurement duration. + * This ioctl is blocking; the calling thread sleeps in the kernel driver + * for the duration of the measurement, then writes the data to user + * memory before returning. + * + * Certain metrics cannot be measured simultaneously, so multiple + * invocations of this command are necessary to gather all metrics. + * Metrics that can be collected simultaneously are grouped together in + * struct hqm_perf_metric_group_X. + * + * The driver allows only one active measurement at a time. If a thread + * calls this command while a measurement is ongoing, the thread will + * block until the original measurement completes. + * + * This ioctl is not supported for VF devices. + * + * Input parameters: + * - measurement_duration_us: Duration, in microseconds, of the + * measurement period. The duration must be between 1us and 60s, + * inclusive. + * - perf_metric_group_id: ID of the metric group to measure. + * - perf_metric_group_data: Pointer to union hqm_perf_metric_group_data + * structure. The driver will interpret the union according to + * perf_metric_group_ID. + * + * Output parameters: + * - response: pointer to a struct hqm_cmd_response. + * response.status: Detailed error code. In certain cases, such as if the + * response pointer is invalid, the driver won't set status. + */ +struct hqm_perf_metric_group_0 { + __u32 hqm_iosf_to_sys_enq_count; + __u32 hqm_sys_to_iosf_deq_count; + __u32 hqm_sys_to_hqm_enq_count; + __u32 hqm_hqm_to_sys_deq_count; +}; + +struct hqm_perf_metric_group_1 { + __u32 hqm_push_ptr_update_count; +}; + +struct hqm_perf_metric_group_2 { + __u32 hqm_avg_hist_list_depth; +}; + +struct hqm_perf_metric_group_3 { + __u32 hqm_avg_qed_depth; +}; + +struct hqm_perf_metric_group_4 { + __u32 hqm_avg_dqed_depth; +}; + +struct hqm_perf_metric_group_5 { + __u32 hqm_noop_hcw_count; + __u32 hqm_bat_t_hcw_count; +}; + +struct hqm_perf_metric_group_6 { + __u32 hqm_comp_hcw_count; + __u32 hqm_comp_t_hcw_count; +}; + +struct hqm_perf_metric_group_7 { + __u32 hqm_enq_hcw_count; + __u32 hqm_enq_t_hcw_count; +}; + +struct hqm_perf_metric_group_8 { + __u32 hqm_renq_hcw_count; + __u32 hqm_renq_t_hcw_count; +}; + +struct hqm_perf_metric_group_9 { + __u32 hqm_rel_hcw_count; +}; + +struct hqm_perf_metric_group_10 { + __u32 hqm_frag_hcw_count; + __u32 hqm_frag_t_hcw_count; +}; + +union hqm_perf_metric_group_data { + struct hqm_perf_metric_group_0 group_0; + struct hqm_perf_metric_group_1 group_1; + struct hqm_perf_metric_group_2 group_2; + struct hqm_perf_metric_group_3 group_3; + struct hqm_perf_metric_group_4 group_4; + struct hqm_perf_metric_group_5 group_5; + struct hqm_perf_metric_group_6 group_6; + struct hqm_perf_metric_group_7 group_7; + struct hqm_perf_metric_group_8 group_8; + struct hqm_perf_metric_group_9 group_9; + struct hqm_perf_metric_group_10 group_10; +}; + +struct hqm_sample_perf_counters_args { + /* Output parameters */ + __u64 elapsed_time_us; + __u64 response; + /* Input parameters */ + __u32 measurement_duration_us; + __u32 perf_metric_group_id; + __u64 perf_metric_group_data; +}; + +/* + * HQM_CMD_MEASURE_SCHED_COUNTS: Measure the HQM scheduling activity for a + * user-specified measurement duration. This ioctl is blocking; the + * calling thread sleeps in the kernel driver for the duration of the + * measurement, then writes the result to user memory before returning. + * + * Unlike the HQM_CMD_SAMPLE_PERF_COUNTERS ioctl, multiple threads can + * measure scheduling counts simultaneously. + * + * Note: VF devices can only measure the scheduling counts of their CQs; + * all other counts will be set to 0. + * + * Input parameters: + * - measurement_duration_us: Duration, in microseconds, of the + * measurement period. The duration must be between 1us and 60s, + * inclusive. + * - padding0: Reserved for future use. + * - sched_count_data: Pointer to a struct hqm_sched_count data structure. + * + * Output parameters: + * - response: pointer to a struct hqm_cmd_response. + * response.status: Detailed error code. In certain cases, such as if the + * response pointer is invalid, the driver won't set status. + */ +struct hqm_sched_counts { + __u64 ldb_sched_count; + __u64 dir_sched_count; + __u64 ldb_cq_sched_count[64]; + __u64 dir_cq_sched_count[128]; +}; + +struct hqm_measure_sched_count_args { + /* Output parameters */ + __u64 elapsed_time_us; + __u64 response; + /* Input parameters */ + __u32 measurement_duration_us; + __u32 padding0; + __u64 sched_count_data; +}; + +/* + * HQM_CMD_SET_SN_ALLOCATION: Configure a sequence number group + * + * Input parameters: + * - group: Sequence number group ID. + * - num: Number of sequence numbers per queue. + * + * Output parameters: + * - response: pointer to a struct hqm_cmd_response. + * response.status: Detailed error code. In certain cases, such as if the + * response pointer is invalid, the driver won't set status. + */ +struct hqm_set_sn_allocation_args { + /* Output parameters */ + __u64 response; + /* Input parameters */ + __u32 group; + __u32 num; +}; + +/* + * HQM_CMD_GET_SN_ALLOCATION: Get a sequence number group's configuration + * + * Input parameters: + * - group: Sequence number group ID. + * - padding0: Reserved for future use. + * + * Output parameters: + * - response: pointer to a struct hqm_cmd_response. + * response.status: Detailed error code. In certain cases, such as if the + * response pointer is invalid, the driver won't set status. + * response.id: Specified group's number of sequence numbers per queue. + */ +struct hqm_get_sn_allocation_args { + /* Output parameters */ + __u64 response; + /* Input parameters */ + __u32 group; + __u32 padding0; +}; + +enum hqm_cq_poll_modes { + HQM_CQ_POLL_MODE_STD, + HQM_CQ_POLL_MODE_SPARSE, + + /* NUM_HQM_CQ_POLL_MODE must be last */ + NUM_HQM_CQ_POLL_MODE, +}; + +/* + * HQM_CMD_QUERY_CQ_POLL_MODE: Query the CQ poll mode the kernel driver is using + * + * Output parameters: + * - response: pointer to a struct hqm_cmd_response. + * response.status: Detailed error code. In certain cases, such as if the + * response pointer is invalid, the driver won't set status. + * response.id: CQ poll mode (see enum hqm_cq_poll_modes). + */ +struct hqm_query_cq_poll_mode_args { + /* Output parameters */ + __u64 response; +}; + +/* + * HQM_CMD_GET_SN_OCCUPANCY: Get a sequence number group's occupancy + * + * Each sequence number group has one or more slots, depending on its + * configuration. I.e.: + * - If configured for 1024 sequence numbers per queue, the group has 1 slot + * - If configured for 512 sequence numbers per queue, the group has 2 slots + * ... + * - If configured for 32 sequence numbers per queue, the group has 32 slots + * + * This ioctl returns the group's number of in-use slots. If its occupancy is + * 0, the group's sequence number allocation can be reconfigured. + * + * Input parameters: + * - group: Sequence number group ID. + * - padding0: Reserved for future use. + * + * Output parameters: + * - response: pointer to a struct hqm_cmd_response. + * response.status: Detailed error code. In certain cases, such as if the + * response pointer is invalid, the driver won't set status. + * response.id: Specified group's number of used slots. + */ +struct hqm_get_sn_occupancy_args { + /* Output parameters */ + __u64 response; + /* Input parameters */ + __u32 group; + __u32 padding0; +}; + +enum hqm_user_interface_commands { + HQM_CMD_GET_DEVICE_VERSION, + HQM_CMD_CREATE_SCHED_DOMAIN, + HQM_CMD_GET_NUM_RESOURCES, + HQM_CMD_GET_DRIVER_VERSION, + HQM_CMD_SAMPLE_PERF_COUNTERS, + HQM_CMD_SET_SN_ALLOCATION, + HQM_CMD_GET_SN_ALLOCATION, + HQM_CMD_MEASURE_SCHED_COUNTS, + HQM_CMD_QUERY_CQ_POLL_MODE, + HQM_CMD_GET_SN_OCCUPANCY, + + /* NUM_HQM_CMD must be last */ + NUM_HQM_CMD, +}; + +/*******************************/ +/* 'domain' device file alerts */ +/*******************************/ + +/* Scheduling domain device files can be read to receive domain-specific + * notifications, for alerts such as hardware errors. + * + * Each alert is encoded in a 16B message. The first 8B contains the alert ID, + * and the second 8B is optional and contains additional information. + * Applications should cast read data to a struct hqm_domain_alert, and + * interpret the struct's alert_id according to hqm_domain_alert_id. The read + * length must be 16B, or the function will return -EINVAL. + * + * Reads are destructive, and in the case of multiple file descriptors for the + * same domain device file, an alert will be read by only one of the file + * descriptors. + * + * The driver stores alerts in a fixed-size alert ring until they are read. If + * the alert ring fills completely, subsequent alerts will be dropped. It is + * recommended that HQM applications dedicate a thread to perform blocking + * reads on the device file. + */ +enum hqm_domain_alert_id { + /* A destination domain queue that this domain connected to has + * unregistered, and can no longer be sent to. The aux alert data + * contains the queue ID. + */ + HQM_DOMAIN_ALERT_REMOTE_QUEUE_UNREGISTER, + /* A producer port in this domain attempted to send a QE without a + * credit. aux_alert_data[7:0] contains the port ID, and + * aux_alert_data[15:8] contains a flag indicating whether the port is + * load-balanced (1) or directed (0). + */ + HQM_DOMAIN_ALERT_PP_OUT_OF_CREDITS, + /* Software issued an illegal enqueue for a port in this domain. An + * illegal enqueue could be: + * - Illegal (excess) completion + * - Illegal fragment + * - Illegal enqueue command + * aux_alert_data[7:0] contains the port ID, and aux_alert_data[15:8] + * contains a flag indicating whether the port is load-balanced (1) or + * directed (0). + */ + HQM_DOMAIN_ALERT_PP_ILLEGAL_ENQ, + /* Software issued excess CQ token pops for a port in this domain. + * aux_alert_data[7:0] contains the port ID, and aux_alert_data[15:8] + * contains a flag indicating whether the port is load-balanced (1) or + * directed (0). + */ + HQM_DOMAIN_ALERT_PP_EXCESS_TOKEN_POPS, + /* A enqueue contained either an invalid command encoding or a REL, + * REL_T, RLS, FWD, FWD_T, FRAG, or FRAG_T from a directed port. + * + * aux_alert_data[7:0] contains the port ID, and aux_alert_data[15:8] + * contains a flag indicating whether the port is load-balanced (1) or + * directed (0). + */ + HQM_DOMAIN_ALERT_ILLEGAL_HCW, + /* The QID must be valid and less than 128. + * + * aux_alert_data[7:0] contains the port ID, and aux_alert_data[15:8] + * contains a flag indicating whether the port is load-balanced (1) or + * directed (0). + */ + HQM_DOMAIN_ALERT_ILLEGAL_QID, + /* An enqueue went to a disabled QID. + * + * aux_alert_data[7:0] contains the port ID, and aux_alert_data[15:8] + * contains a flag indicating whether the port is load-balanced (1) or + * directed (0). + */ + HQM_DOMAIN_ALERT_DISABLED_QID, + /* The device containing this domain was reset. All applications using + * the device need to exit for the driver to complete the reset + * procedure. + * + * aux_alert_data doesn't contain any information for this alert. + */ + HQM_DOMAIN_ALERT_DEVICE_RESET, + /* User-space has enqueued an alert. + * + * aux_alert_data contains user-provided data. + */ + HQM_DOMAIN_ALERT_USER, + + /* Number of HQM domain alerts */ + NUM_HQM_DOMAIN_ALERTS +}; + +static const char hqm_domain_alert_strings[][128] = { + "HQM_DOMAIN_ALERT_REMOTE_QUEUE_UNREGISTER", + "HQM_DOMAIN_ALERT_PP_OUT_OF_CREDITS", + "HQM_DOMAIN_ALERT_PP_ILLEGAL_ENQ", + "HQM_DOMAIN_ALERT_PP_EXCESS_TOKEN_POPS", + "HQM_DOMAIN_ALERT_ILLEGAL_HCW", + "HQM_DOMAIN_ALERT_ILLEGAL_QID", + "HQM_DOMAIN_ALERT_DISABLED_QID", + "HQM_DOMAIN_ALERT_DEVICE_RESET", + "HQM_DOMAIN_ALERT_USER", +}; + +struct hqm_domain_alert { + __u64 alert_id; + __u64 aux_alert_data; +}; + +/*********************************/ +/* 'domain' device file commands */ +/*********************************/ + +/* + * HQM_DOMAIN_CMD_CREATE_LDB_POOL: Configure a load-balanced credit pool. + * Input parameters: + * - num_ldb_credits: Number of load-balanced credits (QED space) for this + * pool. + * - padding0: Reserved for future use. + * + * Output parameters: + * - response: pointer to a struct hqm_cmd_response. + * response.status: Detailed error code. In certain cases, such as if the + * response pointer is invalid, the driver won't set status. + * response.id: pool ID. + */ +struct hqm_create_ldb_pool_args { + /* Output parameters */ + __u64 response; + /* Input parameters */ + __u32 num_ldb_credits; + __u32 padding0; +}; + +/* + * HQM_DOMAIN_CMD_CREATE_DIR_POOL: Configure a directed credit pool. + * Input parameters: + * - num_dir_credits: Number of directed credits (DQED space) for this pool. + * - padding0: Reserved for future use. + * + * Output parameters: + * - response: pointer to a struct hqm_cmd_response. + * response.status: Detailed error code. In certain cases, such as if the + * response pointer is invalid, the driver won't set status. + * response.id: Pool ID. + */ +struct hqm_create_dir_pool_args { + /* Output parameters */ + __u64 response; + /* Input parameters */ + __u32 num_dir_credits; + __u32 padding0; +}; + +/* + * HQM_DOMAIN_CMD_CREATE_LDB_QUEUE: Configure a load-balanced queue. + * Input parameters: + * - num_atomic_inflights: This specifies the amount of temporary atomic QE + * storage for this queue. If zero, the queue will not support atomic + * scheduling. + * - num_sequence_numbers: This specifies the number of sequence numbers used + * by this queue. If zero, the queue will not support ordered scheduling. + * If non-zero, the queue will not support unordered scheduling. + * - num_qid_inflights: The maximum number of QEs that can be inflight + * (scheduled to a CQ but not completed) at any time. If + * num_sequence_numbers is non-zero, num_qid_inflights must be set equal + * to num_sequence_numbers. + * - padding0: Reserved for future use. + * + * Output parameters: + * - response: pointer to a struct hqm_cmd_response. + * response.status: Detailed error code. In certain cases, such as if the + * response pointer is invalid, the driver won't set status. + * response.id: Queue ID. + */ +struct hqm_create_ldb_queue_args { + /* Output parameters */ + __u64 response; + /* Input parameters */ + __u32 num_sequence_numbers; + __u32 num_qid_inflights; + __u32 num_atomic_inflights; + __u32 padding0; +}; + +/* + * HQM_DOMAIN_CMD_CREATE_DIR_QUEUE: Configure a directed queue. + * Input parameters: + * - port_id: Port ID. If the corresponding directed port is already created, + * specify its ID here. Else this argument must be 0xFFFFFFFF to indicate + * that the queue is being created before the port. + * - padding0: Reserved for future use. + * + * Output parameters: + * - response: pointer to a struct hqm_cmd_response. + * response.status: Detailed error code. In certain cases, such as if the + * response pointer is invalid, the driver won't set status. + * response.id: Queue ID. + */ +struct hqm_create_dir_queue_args { + /* Output parameters */ + __u64 response; + /* Input parameters */ + __s32 port_id; + __u32 padding0; +}; + +/* + * HQM_DOMAIN_CMD_CREATE_LDB_PORT: Configure a load-balanced port. + * Input parameters: + * - ldb_credit_pool_id: Load-balanced credit pool this port will belong to. + * - dir_credit_pool_id: Directed credit pool this port will belong to. + * - ldb_credit_high_watermark: Number of load-balanced credits from the pool + * that this port will own. + * + * If this port's scheduling domain doesn't have any load-balanced queues, + * this argument is ignored and the port is given no load-balanced + * credits. + * - dir_credit_high_watermark: Number of directed credits from the pool that + * this port will own. + * + * If this port's scheduling domain doesn't have any directed queues, + * this argument is ignored and the port is given no directed credits. + * - ldb_credit_low_watermark: Load-balanced credit low watermark. When the + * port's credits reach this watermark, they become eligible to be + * refilled by the HQM as credits until the high watermark + * (num_ldb_credits) is reached. + * + * If this port's scheduling domain doesn't have any load-balanced queues, + * this argument is ignored and the port is given no load-balanced + * credits. + * - dir_credit_low_watermark: Directed credit low watermark. When the port's + * credits reach this watermark, they become eligible to be refilled by + * the HQM as credits until the high watermark (num_dir_credits) is + * reached. + * + * If this port's scheduling domain doesn't have any directed queues, + * this argument is ignored and the port is given no directed credits. + * - ldb_credit_quantum: Number of load-balanced credits for the HQM to refill + * per refill operation. + * + * If this port's scheduling domain doesn't have any load-balanced queues, + * this argument is ignored and the port is given no load-balanced + * credits. + * - dir_credit_quantum: Number of directed credits for the HQM to refill per + * refill operation. + * + * If this port's scheduling domain doesn't have any directed queues, + * this argument is ignored and the port is given no directed credits. + * - padding0: Reserved for future use. + * - cq_depth: Depth of the port's CQ. Must be a power-of-two between 8 and + * 1024, inclusive. + * - cq_depth_threshold: CQ depth interrupt threshold. A value of N means that + * the CQ interrupt won't fire until there are N or more outstanding CQ + * tokens. + * - cq_history_list_size: Number of history list entries. This must be greater + * than or equal to cq_depth. + * - padding1: Reserved for future use. + * - padding2: Reserved for future use. + * + * Output parameters: + * - response: pointer to a struct hqm_cmd_response. + * response.status: Detailed error code. In certain cases, such as if the + * response pointer is invalid, the driver won't set status. + * response.id: port ID. + */ +struct hqm_create_ldb_port_args { + /* Output parameters */ + __u64 response; + /* Input parameters */ + __u32 ldb_credit_pool_id; + __u32 dir_credit_pool_id; + __u16 ldb_credit_high_watermark; + __u16 ldb_credit_low_watermark; + __u16 ldb_credit_quantum; + __u16 dir_credit_high_watermark; + __u16 dir_credit_low_watermark; + __u16 dir_credit_quantum; + __u16 padding0; + __u16 cq_depth; + __u16 cq_depth_threshold; + __u16 cq_history_list_size; + __u32 padding1; +}; + +/* + * HQM_DOMAIN_CMD_CREATE_DIR_PORT: Configure a directed port. + * Input parameters: + * - ldb_credit_pool_id: Load-balanced credit pool this port will belong to. + * - dir_credit_pool_id: Directed credit pool this port will belong to. + * - ldb_credit_high_watermark: Number of load-balanced credits from the pool + * that this port will own. + * + * If this port's scheduling domain doesn't have any load-balanced queues, + * this argument is ignored and the port is given no load-balanced + * credits. + * - dir_credit_high_watermark: Number of directed credits from the pool that + * this port will own. + * - ldb_credit_low_watermark: Load-balanced credit low watermark. When the + * port's credits reach this watermark, they become eligible to be + * refilled by the HQM as credits until the high watermark + * (num_ldb_credits) is reached. + * + * If this port's scheduling domain doesn't have any load-balanced queues, + * this argument is ignored and the port is given no load-balanced + * credits. + * - dir_credit_low_watermark: Directed credit low watermark. When the port's + * credits reach this watermark, they become eligible to be refilled by + * the HQM as credits until the high watermark (num_dir_credits) is + * reached. + * - ldb_credit_quantum: Number of load-balanced credits for the HQM to refill + * per refill operation. + * + * If this port's scheduling domain doesn't have any load-balanced queues, + * this argument is ignored and the port is given no load-balanced + * credits. + * - dir_credit_quantum: Number of directed credits for the HQM to refill per + * refill operation. + * - cq_depth: Depth of the port's CQ. Must be a power-of-two between 8 and + * 1024, inclusive. + * - cq_depth_threshold: CQ depth interrupt threshold. A value of N means that + * the CQ interrupt won't fire until there are N or more outstanding CQ + * tokens. + * - qid: Queue ID. If the corresponding directed queue is already created, + * specify its ID here. Else this argument must be 0xFFFFFFFF to indicate + * that the port is being created before the queue. + * - padding1: Reserved for future use. + * + * Output parameters: + * - response: pointer to a struct hqm_cmd_response. + * response.status: Detailed error code. In certain cases, such as if the + * response pointer is invalid, the driver won't set status. + * response.id: Port ID. + */ +struct hqm_create_dir_port_args { + /* Output parameters */ + __u64 response; + /* Input parameters */ + __u32 ldb_credit_pool_id; + __u32 dir_credit_pool_id; + __u16 ldb_credit_high_watermark; + __u16 ldb_credit_low_watermark; + __u16 ldb_credit_quantum; + __u16 dir_credit_high_watermark; + __u16 dir_credit_low_watermark; + __u16 dir_credit_quantum; + __u16 cq_depth; + __u16 cq_depth_threshold; + __s32 queue_id; + __u32 padding1; +}; + +/* + * HQM_DOMAIN_CMD_START_DOMAIN: Mark the end of the domain configuration. This + * must be called before passing QEs into the device, and no configuration + * ioctls can be issued once the domain has started. Sending QEs into the + * device before calling this ioctl will result in undefined behavior. + * Input parameters: + * - (None) + * + * Output parameters: + * - response: pointer to a struct hqm_cmd_response. + * response.status: Detailed error code. In certain cases, such as if the + * response pointer is invalid, the driver won't set status. + */ +struct hqm_start_domain_args { + /* Output parameters */ + __u64 response; + /* Input parameters */ +}; + +/* + * HQM_DOMAIN_CMD_MAP_QID: Map a load-balanced queue to a load-balanced port. + * Input parameters: + * - port_id: Load-balanced port ID. + * - qid: Load-balanced queue ID. + * - priority: Queue->port service priority. + * - padding0: Reserved for future use. + * + * Output parameters: + * - response: pointer to a struct hqm_cmd_response. + * response.status: Detailed error code. In certain cases, such as if the + * response pointer is invalid, the driver won't set status. + */ +struct hqm_map_qid_args { + /* Output parameters */ + __u64 response; + /* Input parameters */ + __u32 port_id; + __u32 qid; + __u32 priority; + __u32 padding0; +}; + +/* + * HQM_DOMAIN_CMD_UNMAP_QID: Unmap a load-balanced queue to a load-balanced + * port. + * Input parameters: + * - port_id: Load-balanced port ID. + * - qid: Load-balanced queue ID. + * + * Output parameters: + * - response: pointer to a struct hqm_cmd_response. + * response.status: Detailed error code. In certain cases, such as if the + * response pointer is invalid, the driver won't set status. + */ +struct hqm_unmap_qid_args { + /* Output parameters */ + __u64 response; + /* Input parameters */ + __u32 port_id; + __u32 qid; +}; + +/* + * HQM_DOMAIN_CMD_ENABLE_LDB_PORT: Enable scheduling to a load-balanced port. + * Input parameters: + * - port_id: Load-balanced port ID. + * - padding0: Reserved for future use. + * + * Output parameters: + * - response: pointer to a struct hqm_cmd_response. + * response.status: Detailed error code. In certain cases, such as if the + * response pointer is invalid, the driver won't set status. + */ +struct hqm_enable_ldb_port_args { + /* Output parameters */ + __u64 response; + /* Input parameters */ + __u32 port_id; + __u32 padding0; +}; + +/* + * HQM_DOMAIN_CMD_ENABLE_DIR_PORT: Enable scheduling to a directed port. + * Input parameters: + * - port_id: Directed port ID. + * - padding0: Reserved for future use. + * + * Output parameters: + * - response: pointer to a struct hqm_cmd_response. + * response.status: Detailed error code. In certain cases, such as if the + * response pointer is invalid, the driver won't set status. + */ +struct hqm_enable_dir_port_args { + /* Output parameters */ + __u64 response; + /* Input parameters */ + __u32 port_id; +}; + +/* + * HQM_DOMAIN_CMD_DISABLE_LDB_PORT: Disable scheduling to a load-balanced port. + * Input parameters: + * - port_id: Load-balanced port ID. + * - padding0: Reserved for future use. + * + * Output parameters: + * - response: pointer to a struct hqm_cmd_response. + * response.status: Detailed error code. In certain cases, such as if the + * response pointer is invalid, the driver won't set status. + */ +struct hqm_disable_ldb_port_args { + /* Output parameters */ + __u64 response; + /* Input parameters */ + __u32 port_id; + __u32 padding0; +}; + +/* + * HQM_DOMAIN_CMD_DISABLE_DIR_PORT: Disable scheduling to a directed port. + * Input parameters: + * - port_id: Directed port ID. + * - padding0: Reserved for future use. + * + * Output parameters: + * - response: pointer to a struct hqm_cmd_response. + * response.status: Detailed error code. In certain cases, such as if the + * response pointer is invalid, the driver won't set status. + */ +struct hqm_disable_dir_port_args { + /* Output parameters */ + __u64 response; + /* Input parameters */ + __u32 port_id; + __u32 padding0; +}; + +/* + * HQM_DOMAIN_CMD_BLOCK_ON_CQ_INTERRUPT: Block on a CQ interrupt until a QE + * arrives for the specified port. If a QE is already present, the ioctl + * will immediately return. + * + * Note: Only one thread can block on a CQ's interrupt at a time. Doing + * otherwise can result in hung threads. + * + * Input parameters: + * - port_id: Port ID. + * - is_ldb: True if the port is load-balanced, false otherwise. + * - arm: Tell the driver to arm the interrupt. + * - cq_gen: Current CQ generation bit. + * - padding0: Reserved for future use. + * - cq_va: VA of the CQ entry where the next QE will be placed. + * + * Output parameters: + * - response: pointer to a struct hqm_cmd_response. + * response.status: Detailed error code. In certain cases, such as if the + * response pointer is invalid, the driver won't set status. + */ +struct hqm_block_on_cq_interrupt_args { + /* Output parameters */ + __u64 response; + /* Input parameters */ + __u32 port_id; + __u8 is_ldb; + __u8 arm; + __u8 cq_gen; + __u8 padding0; + __u64 cq_va; +}; + +/* + * HQM_DOMAIN_CMD_ENQUEUE_DOMAIN_ALERT: Enqueue a domain alert that will be + * read by one reader thread. + * + * Input parameters: + * - aux_alert_data: user-defined auxiliary data. + * + * Output parameters: + * - response: pointer to a struct hqm_cmd_response. + * response.status: Detailed error code. In certain cases, such as if the + * response pointer is invalid, the driver won't set status. + */ +struct hqm_enqueue_domain_alert_args { + /* Output parameters */ + __u64 response; + /* Input parameters */ + __u64 aux_alert_data; +}; + +/* + * HQM_DOMAIN_CMD_GET_LDB_QUEUE_DEPTH: Get a load-balanced queue's depth. + * Input parameters: + * - queue_id: The load-balanced queue ID. + * - padding0: Reserved for future use. + * + * Output parameters: + * - response: pointer to a struct hqm_cmd_response. + * response.status: Detailed error code. In certain cases, such as if the + * response pointer is invalid, the driver won't set status. + * response.id: queue depth. + */ +struct hqm_get_ldb_queue_depth_args { + /* Output parameters */ + __u64 response; + /* Input parameters */ + __u32 queue_id; + __u32 padding0; +}; + +/* + * HQM_DOMAIN_CMD_GET_DIR_QUEUE_DEPTH: Get a directed queue's depth. + * Input parameters: + * - queue_id: The directed queue ID. + * - padding0: Reserved for future use. + * + * Output parameters: + * - response: pointer to a struct hqm_cmd_response. + * response.status: Detailed error code. In certain cases, such as if the + * response pointer is invalid, the driver won't set status. + * response.id: queue depth. + */ +struct hqm_get_dir_queue_depth_args { + /* Output parameters */ + __u64 response; + /* Input parameters */ + __u32 queue_id; + __u32 padding0; +}; + +/* + * HQM_DOMAIN_CMD_PENDING_PORT_UNMAPS: Get number of queue unmap operations in + * progress for a load-balanced port. + * + * Note: This is a snapshot; the number of unmap operations in progress + * is subject to change at any time. + * + * Input parameters: + * - port_id: Load-balanced port ID. + * + * Output parameters: + * - response: pointer to a struct hqm_cmd_response. + * response.status: Detailed error code. In certain cases, such as if the + * response pointer is invalid, the driver won't set status. + * response.id: number of unmaps in progress. + */ +struct hqm_pending_port_unmaps_args { + /* Output parameters */ + __u64 response; + /* Input parameters */ + __u32 port_id; + __u32 padding0; +}; + +enum hqm_domain_user_interface_commands { + HQM_DOMAIN_CMD_CREATE_LDB_POOL, + HQM_DOMAIN_CMD_CREATE_DIR_POOL, + HQM_DOMAIN_CMD_CREATE_LDB_QUEUE, + HQM_DOMAIN_CMD_CREATE_DIR_QUEUE, + HQM_DOMAIN_CMD_CREATE_LDB_PORT, + HQM_DOMAIN_CMD_CREATE_DIR_PORT, + HQM_DOMAIN_CMD_START_DOMAIN, + HQM_DOMAIN_CMD_MAP_QID, + HQM_DOMAIN_CMD_UNMAP_QID, + HQM_DOMAIN_CMD_ENABLE_LDB_PORT, + HQM_DOMAIN_CMD_ENABLE_DIR_PORT, + HQM_DOMAIN_CMD_DISABLE_LDB_PORT, + HQM_DOMAIN_CMD_DISABLE_DIR_PORT, + HQM_DOMAIN_CMD_BLOCK_ON_CQ_INTERRUPT, + HQM_DOMAIN_CMD_ENQUEUE_DOMAIN_ALERT, + HQM_DOMAIN_CMD_GET_LDB_QUEUE_DEPTH, + HQM_DOMAIN_CMD_GET_DIR_QUEUE_DEPTH, + HQM_DOMAIN_CMD_PENDING_PORT_UNMAPS, + + /* NUM_HQM_DOMAIN_CMD must be last */ + NUM_HQM_DOMAIN_CMD, +}; + +/* + * Base addresses for memory mapping the consumer queue (CQ) and popcount (PC) + * memory space, and producer port (PP) MMIO space. The CQ, PC, and PP + * addresses are per-port. Every address is page-separated (e.g. LDB PP 0 is at + * 0x2100000 and LDB PP 1 is at 0x2101000). + */ +#define HQM_LDB_CQ_BASE 0x3000000 +#define HQM_LDB_CQ_MAX_SIZE 65536 +#define HQM_LDB_CQ_OFFS(id) (HQM_LDB_CQ_BASE + (id) * HQM_LDB_CQ_MAX_SIZE) + +#define HQM_DIR_CQ_BASE 0x3800000 +#define HQM_DIR_CQ_MAX_SIZE 65536 +#define HQM_DIR_CQ_OFFS(id) (HQM_DIR_CQ_BASE + (id) * HQM_DIR_CQ_MAX_SIZE) + +#define HQM_LDB_PC_BASE 0x2300000 +#define HQM_LDB_PC_MAX_SIZE 4096 +#define HQM_LDB_PC_OFFS(id) (HQM_LDB_PC_BASE + (id) * HQM_LDB_PC_MAX_SIZE) + +#define HQM_DIR_PC_BASE 0x2200000 +#define HQM_DIR_PC_MAX_SIZE 4096 +#define HQM_DIR_PC_OFFS(id) (HQM_DIR_PC_BASE + (id) * HQM_DIR_PC_MAX_SIZE) + +#define HQM_LDB_PP_BASE 0x2100000 +#define HQM_LDB_PP_MAX_SIZE 4096 +#define HQM_LDB_PP_OFFS(id) (HQM_LDB_PP_BASE + (id) * HQM_LDB_PP_MAX_SIZE) + +#define HQM_DIR_PP_BASE 0x2000000 +#define HQM_DIR_PP_MAX_SIZE 4096 +#define HQM_DIR_PP_OFFS(id) (HQM_DIR_PP_BASE + (id) * HQM_DIR_PP_MAX_SIZE) + +#endif /* __HQM_USER_H */