From bae6ae34a21cbbff985b96b24f11834d3753f462 Mon Sep 17 00:00:00 2001 From: Bert Karwatzki Date: Tue, 20 Jun 2023 00:54:25 +0200 Subject: [PATCH 44/65] TODO: net: ipa: Add support for IPA v2.x commands and table init IPA v2.x commands are different from later IPA revisions mostly because of the fact that IPA v2.x is 32 bit. There are also other minor differences some of the command structs. The tables again are only different because of the fact that IPA v2.x is 32 bit. Signed-off-by: Sireesh Kodali Signed-off-by: Vladimir Lypak Signed-off-by: Alejandro Tafalla [Yassine: Don't use IPA_IS_64BIT and IPA_TABLE_ENTRY_SIZE macros, calculate entry_size instead] Signed-off-by: Yassine Oudjana TODO: probably bring back IPA_TABLE_ENTRY_SIZE macro or implement it as an inline function instead. Replaced IPA_ZERO_RULE_SIZE by entry_size in ipa_table_{init,exit}. Signed-off-by: Bert Karwatzki --- drivers/net/ipa/ipa.h | 2 +- drivers/net/ipa/ipa_cmd.c | 141 ++++++++++++++++++++++++++---------- drivers/net/ipa/ipa_table.c | 90 +++++++++++++++-------- drivers/net/ipa/ipa_table.h | 2 +- 4 files changed, 161 insertions(+), 74 deletions(-) diff --git a/drivers/net/ipa/ipa.h b/drivers/net/ipa/ipa.h index 22ca55f70b16..fd783785cc63 100644 --- a/drivers/net/ipa/ipa.h +++ b/drivers/net/ipa/ipa.h @@ -87,7 +87,7 @@ struct ipa { struct ipa_power *power; dma_addr_t table_addr; - __le64 *table_virt; + void *table_virt; u32 route_count; u32 modem_route_count; u32 filter_count; diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c index 9b8da8e8fb41..40cf6f6b1927 100644 --- a/drivers/net/ipa/ipa_cmd.c +++ b/drivers/net/ipa/ipa_cmd.c @@ -25,10 +25,10 @@ * An immediate command is generally used to request the IPA do something * other than data transfer to another endpoint. * - * Immediate commands are represented by GSI transactions just like other - * transfer requests, and use a single GSI TRE. Each immediate command - * has a well-defined format, having a payload of a known length. This - * allows the transfer element's length field to be used to hold an + * Immediate commands on IPA v3+ are represented by GSI transactions just + * like other transfer requests, and use a single GSI TRE. Each immediate + * command has a well-defined format, having a payload of a known length. + * This allows the transfer element's length field to be used to hold an * immediate command's opcode. The payload for a command resides in AP * memory and is described by a single scatterlist entry in its transaction. * Commands do not require a transaction completion callback, and are @@ -44,10 +44,16 @@ enum pipeline_clear_options { /* IPA_CMD_IP_V{4,6}_{FILTER,ROUTING}_INIT */ -struct ipa_cmd_hw_ip_fltrt_init { - __le64 hash_rules_addr; - __le64 flags; - __le64 nhash_rules_addr; +union ipa_cmd_hw_ip_fltrt_init { + struct { + __le32 nhash_rules_addr; + __le32 flags; + } v2; + struct { + __le64 hash_rules_addr; + __le64 flags; + __le64 nhash_rules_addr; + } v3; }; /* Field masks for ipa_cmd_hw_ip_fltrt_init structure fields */ @@ -55,13 +61,23 @@ struct ipa_cmd_hw_ip_fltrt_init { #define IP_FLTRT_FLAGS_HASH_ADDR_FMASK GENMASK_ULL(27, 12) #define IP_FLTRT_FLAGS_NHASH_SIZE_FMASK GENMASK_ULL(39, 28) #define IP_FLTRT_FLAGS_NHASH_ADDR_FMASK GENMASK_ULL(55, 40) +#define IP_V2_IPV4_FLTRT_FLAGS_SIZE_FMASK GENMASK_ULL(11, 0) +#define IP_V2_IPV4_FLTRT_FLAGS_ADDR_FMASK GENMASK_ULL(27, 12) +#define IP_V2_IPV6_FLTRT_FLAGS_SIZE_FMASK GENMASK_ULL(15, 0) +#define IP_V2_IPV6_FLTRT_FLAGS_ADDR_FMASK GENMASK_ULL(31, 16) /* IPA_CMD_HDR_INIT_LOCAL */ -struct ipa_cmd_hw_hdr_init_local { - __le64 hdr_table_addr; - __le32 flags; - __le32 reserved; +union ipa_cmd_hw_hdr_init_local { + struct { + __le32 hdr_table_addr; + __le32 flags; + } v2; + struct { + __le64 hdr_table_addr; + __le32 flags; + __le32 reserved; + } v3; }; /* Field masks for ipa_cmd_hw_hdr_init_local structure fields */ @@ -108,14 +124,37 @@ struct ipa_cmd_ip_packet_init { #define DMA_SHARED_MEM_OPCODE_SKIP_CLEAR_FMASK GENMASK(8, 8) #define DMA_SHARED_MEM_OPCODE_CLEAR_OPTION_FMASK GENMASK(10, 9) -struct ipa_cmd_hw_dma_mem_mem { - __le16 clear_after_read; /* 0 or DMA_SHARED_MEM_CLEAR_AFTER_READ */ - __le16 size; - __le16 local_addr; - __le16 flags; - __le64 system_addr; +union ipa_cmd_hw_dma_mem_mem { + struct { + __le16 reserved; + __le16 size; + __le32 system_addr; + __le16 local_addr; + __le16 flags; /* the least significant 14 bits are reserved */ + __le32 padding; + } v2; + struct { + __le16 clear_after_read; /* 0 or DMA_SHARED_MEM_CLEAR_AFTER_READ */ + __le16 size; + __le16 local_addr; + __le16 flags; + __le64 system_addr; + } v3; }; +#define CMD_FIELD(_version, _payload, _field) \ + *(((_version) > IPA_VERSION_2_6L) ? \ + &(_payload->v3._field) : \ + &(_payload->v2._field)) + +#define SET_DMA_FIELD(_ver, _payload, _field, _value) \ + do { \ + if ((_ver) >= IPA_VERSION_3_0) \ + (_payload)->v3._field = cpu_to_le64(_value); \ + else \ + (_payload)->v2._field = cpu_to_le32(_value); \ + } while (0) + /* Flag allowing atomic clear of target region after reading data (v4.0+)*/ #define DMA_SHARED_MEM_CLEAR_AFTER_READ GENMASK(15, 15) @@ -131,15 +170,16 @@ struct ipa_cmd_ip_packet_tag_status { __le64 tag; }; -#define IP_PACKET_TAG_STATUS_TAG_FMASK GENMASK_ULL(63, 16) +#define IPA_V2_IP_PACKET_TAG_STATUS_TAG_FMASK GENMASK_ULL(63, 32) +#define IPA_V3_IP_PACKET_TAG_STATUS_TAG_FMASK GENMASK_ULL(63, 16) /* Immediate command payload */ union ipa_cmd_payload { - struct ipa_cmd_hw_ip_fltrt_init table_init; - struct ipa_cmd_hw_hdr_init_local hdr_init_local; + union ipa_cmd_hw_ip_fltrt_init table_init; + union ipa_cmd_hw_hdr_init_local hdr_init_local; struct ipa_cmd_register_write register_write; struct ipa_cmd_ip_packet_init ip_packet_init; - struct ipa_cmd_hw_dma_mem_mem dma_shared_mem; + union ipa_cmd_hw_dma_mem_mem dma_shared_mem; struct ipa_cmd_ip_packet_tag_status ip_packet_tag_status; }; @@ -363,15 +403,26 @@ void ipa_cmd_table_init_add(struct ipa_dma_trans *trans, dma_addr_t hash_addr) { struct ipa *ipa = container_of(trans->ipa_dma, struct ipa, ipa_dma); - struct ipa_cmd_hw_ip_fltrt_init *payload; + union ipa_cmd_hw_ip_fltrt_init *payload; + enum ipa_version version = ipa->version; union ipa_cmd_payload *cmd_payload; dma_addr_t payload_addr; u64 val; /* Record the non-hash table offset and size */ offset += ipa->mem_offset; - val = u64_encode_bits(offset, IP_FLTRT_FLAGS_NHASH_ADDR_FMASK); - val |= u64_encode_bits(size, IP_FLTRT_FLAGS_NHASH_SIZE_FMASK); + + if (version >= IPA_VERSION_3_0) { + val = u64_encode_bits(offset, IP_FLTRT_FLAGS_NHASH_ADDR_FMASK); + val |= u64_encode_bits(size, IP_FLTRT_FLAGS_NHASH_SIZE_FMASK); + } else if (opcode == IPA_CMD_IP_V4_FILTER_INIT || + opcode == IPA_CMD_IP_V4_ROUTING_INIT) { + val = u64_encode_bits(offset, IP_V2_IPV4_FLTRT_FLAGS_ADDR_FMASK); + val |= u64_encode_bits(size, IP_V2_IPV4_FLTRT_FLAGS_SIZE_FMASK); + } else { /* IPA <= v2.6L IPv6 */ + val = u64_encode_bits(offset, IP_V2_IPV6_FLTRT_FLAGS_ADDR_FMASK); + val |= u64_encode_bits(size, IP_V2_IPV6_FLTRT_FLAGS_SIZE_FMASK); + } /* The hash table offset and address are zero if its size is 0 */ if (hash_size) { @@ -387,10 +438,10 @@ void ipa_cmd_table_init_add(struct ipa_dma_trans *trans, payload = &cmd_payload->table_init; /* Fill in all offsets and sizes and the non-hash table address */ - if (hash_size) - payload->hash_rules_addr = cpu_to_le64(hash_addr); - payload->flags = cpu_to_le64(val); - payload->nhash_rules_addr = cpu_to_le64(addr); + if (hash_size && version >= IPA_VERSION_3_0) + payload->v3.hash_rules_addr = cpu_to_le64(hash_addr); + SET_DMA_FIELD(version, payload, flags, val); + SET_DMA_FIELD(version, payload, nhash_rules_addr, addr); ipa_dma_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr, opcode); @@ -402,7 +453,7 @@ void ipa_cmd_hdr_init_local_add(struct ipa_dma_trans *trans, u32 offset, u16 siz { struct ipa *ipa = container_of(trans->ipa_dma, struct ipa, ipa_dma); enum ipa_cmd_opcode opcode = IPA_CMD_HDR_INIT_LOCAL; - struct ipa_cmd_hw_hdr_init_local *payload; + union ipa_cmd_hw_hdr_init_local *payload; union ipa_cmd_payload *cmd_payload; dma_addr_t payload_addr; u32 flags; @@ -417,10 +468,10 @@ void ipa_cmd_hdr_init_local_add(struct ipa_dma_trans *trans, u32 offset, u16 siz cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr); payload = &cmd_payload->hdr_init_local; - payload->hdr_table_addr = cpu_to_le64(addr); + SET_DMA_FIELD(ipa->version, payload, hdr_table_addr, addr); flags = u32_encode_bits(size, HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK); flags |= u32_encode_bits(offset, HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK); - payload->flags = cpu_to_le32(flags); + CMD_FIELD(ipa->version, payload, flags) = cpu_to_le32(flags); ipa_dma_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr, opcode); @@ -466,8 +517,11 @@ void ipa_cmd_register_write_add(struct ipa_dma_trans *trans, u32 offset, u32 val } else { flags = 0; /* SKIP_CLEAR flag is always 0 */ - options = u16_encode_bits(clear_option, - REGISTER_WRITE_CLEAR_OPTIONS_FMASK); + if (ipa->version > IPA_VERSION_2_6L) + options = u16_encode_bits(clear_option, + REGISTER_WRITE_CLEAR_OPTIONS_FMASK); + else + options = 0; } cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr); @@ -513,7 +567,8 @@ void ipa_cmd_dma_shared_mem_add(struct ipa_dma_trans *trans, u32 offset, u16 siz { struct ipa *ipa = container_of(trans->ipa_dma, struct ipa, ipa_dma); enum ipa_cmd_opcode opcode = IPA_CMD_DMA_SHARED_MEM; - struct ipa_cmd_hw_dma_mem_mem *payload; + enum ipa_version version = ipa->version; + union ipa_cmd_hw_dma_mem_mem *payload; union ipa_cmd_payload *cmd_payload; dma_addr_t payload_addr; u16 flags; @@ -531,8 +586,8 @@ void ipa_cmd_dma_shared_mem_add(struct ipa_dma_trans *trans, u32 offset, u16 siz /* payload->clear_after_read was reserved prior to IPA v4.0. It's * never needed for current code, so it's 0 regardless of version. */ - payload->size = cpu_to_le16(size); - payload->local_addr = cpu_to_le16(offset); + CMD_FIELD(version, payload, size) = cpu_to_le16(size); + CMD_FIELD(version, payload, local_addr) = cpu_to_le16(offset); /* payload->flags: * direction: 0 = write to IPA, 1 read from IPA * Starting at v4.0 these are reserved; either way, all zero: @@ -542,8 +597,8 @@ void ipa_cmd_dma_shared_mem_add(struct ipa_dma_trans *trans, u32 offset, u16 siz * since both values are 0 we won't bother OR'ing them in. */ flags = toward_ipa ? 0 : DMA_SHARED_MEM_FLAGS_DIRECTION_FMASK; - payload->flags = cpu_to_le16(flags); - payload->system_addr = cpu_to_le64(addr); + CMD_FIELD(version, payload, flags) = cpu_to_le16(flags); + SET_DMA_FIELD(version, payload, system_addr, addr); ipa_dma_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr, opcode); @@ -556,11 +611,17 @@ static void ipa_cmd_ip_tag_status_add(struct ipa_dma_trans *trans) struct ipa_cmd_ip_packet_tag_status *payload; union ipa_cmd_payload *cmd_payload; dma_addr_t payload_addr; + u64 tag_mask; + + if (trans->ipa_dma->version <= IPA_VERSION_2_6L) + tag_mask = IPA_V2_IP_PACKET_TAG_STATUS_TAG_FMASK; + else + tag_mask = IPA_V3_IP_PACKET_TAG_STATUS_TAG_FMASK; cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr); payload = &cmd_payload->ip_packet_tag_status; - payload->tag = le64_encode_bits(0, IP_PACKET_TAG_STATUS_TAG_FMASK); + payload->tag = le64_encode_bits(0, tag_mask); ipa_dma_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr, opcode); diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c index 2b2cf7462833..61d7b812357e 100644 --- a/drivers/net/ipa/ipa_table.c +++ b/drivers/net/ipa/ipa_table.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -119,12 +120,6 @@ * ---------------------- */ -/* Filter or route rules consist of a set of 32-bit values followed by a - * 32-bit all-zero rule list terminator. The "zero rule" is simply an - * all-zero rule followed by the list terminator. - */ -#define IPA_ZERO_RULE_SIZE (2 * sizeof(__le32)) - /* Check things that can be validated at build time. */ static void ipa_table_validate_build(void) { @@ -136,12 +131,6 @@ static void ipa_table_validate_build(void) * initialize tables. */ BUILD_BUG_ON(sizeof(dma_addr_t) > sizeof(__le64)); - - /* A "zero rule" is used to represent no filtering or no routing. - * It is a 64-bit block of zeroed memory. Code in ipa_table_init() - * assumes that it can be written using a pointer to __le64. - */ - BUILD_BUG_ON(IPA_ZERO_RULE_SIZE != sizeof(__le64)); } static const struct ipa_mem * @@ -204,6 +193,8 @@ static void ipa_table_reset_add(struct ipa_dma_trans *trans, bool filter, { struct ipa *ipa = container_of(trans->ipa_dma, struct ipa, ipa_dma); const struct ipa_mem *mem; + const size_t entry_size = ipa->version > IPA_VERSION_2_6L ? + sizeof(__le64) : sizeof(__le32); dma_addr_t addr; u32 offset; u16 size; @@ -216,8 +207,8 @@ static void ipa_table_reset_add(struct ipa_dma_trans *trans, bool filter, if (filter) first++; /* skip over bitmap */ - offset = mem->offset + first * sizeof(__le64); - size = count * sizeof(__le64); + offset = mem->offset + first * entry_size; + size = count * entry_size; addr = ipa_table_addr(ipa, false, count); ipa_cmd_dma_shared_mem_add(trans, offset, size, addr, true); @@ -383,9 +374,11 @@ int ipa_table_hash_flush(struct ipa *ipa) static void ipa_table_init_add(struct ipa_dma_trans *trans, bool filter, bool ipv6) { struct ipa *ipa = container_of(trans->ipa_dma, struct ipa, ipa_dma); + const struct ipa_mem *mem; const struct ipa_mem *hash_mem; enum ipa_cmd_opcode opcode; - const struct ipa_mem *mem; + const size_t entry_size = ipa->version > IPA_VERSION_2_6L ? + sizeof(__le64) : sizeof(__le32); dma_addr_t hash_addr; dma_addr_t addr; u32 hash_offset; @@ -414,20 +407,28 @@ static void ipa_table_init_add(struct ipa_dma_trans *trans, bool filter, bool ip * table is either the same as the non-hashed one, or zero. */ count = 1 + hweight64(ipa->filtered); - hash_count = hash_mem && hash_mem->size ? count : 0; + if (hash_mem) + hash_count = hash_mem->size ? count : 0; } else { /* The size of a route table region determines the number * of entries it has. */ - count = mem->size / sizeof(__le64); - hash_count = hash_mem ? hash_mem->size / sizeof(__le64) : 0; + count = mem->size / entry_size; + if (hash_mem) + hash_count = hash_mem->size / entry_size; } - size = count * sizeof(__le64); - hash_size = hash_count * sizeof(__le64); + size = count * entry_size; + if (hash_mem) + hash_size = hash_count * entry_size; + else + hash_size = 0; addr = ipa_table_addr(ipa, filter, count); - hash_addr = ipa_table_addr(ipa, filter, hash_count); + if (hash_mem) + hash_addr = ipa_table_addr(ipa, filter, hash_count); + /* if hash_size is zero ipa_cmd_table_init_add ignores + * hash_offset and hash_addr */ ipa_cmd_table_init_add(trans, opcode, size, mem->offset, addr, hash_size, hash_offset, hash_addr); if (!filter) @@ -678,7 +679,22 @@ bool ipa_table_mem_valid(struct ipa *ipa, bool filter) return true; } -/* Initialize a coherent DMA allocation containing initialized filter and +static inline void *ipa_table_write(enum ipa_version version, + void *virt, u64 value) +{ + if (version > IPA_VERSION_2_6L) { + __le64 *ptr = virt; + *ptr = cpu_to_le64(value); + return virt + sizeof(__le64); + } else { + __le32 *ptr = virt; + *ptr = cpu_to_le32(value); + return virt + sizeof(__le32); + } +} + +/* + * Initialize a coherent DMA allocation containing initialized filter and * route table data. This is used when initializing or resetting the IPA * filter or route table. * @@ -711,10 +727,13 @@ bool ipa_table_mem_valid(struct ipa *ipa, bool filter) */ int ipa_table_init(struct ipa *ipa) { + enum ipa_version version = ipa->version; struct device *dev = &ipa->pdev->dev; + u64 filter_map; + const size_t entry_size = ipa->version > IPA_VERSION_2_6L ? + sizeof(__le64) : sizeof(__le32); dma_addr_t addr; - __le64 le_addr; - __le64 *virt; + void *virt; size_t size; u32 count; @@ -722,13 +741,14 @@ int ipa_table_init(struct ipa *ipa) count = max_t(u32, ipa->filter_count, ipa->route_count); + /* TODO: See if this comment is correct for v2.* */ /* The IPA hardware requires route and filter table rules to be * aligned on a 128-byte boundary. We put the "zero rule" at the * base of the table area allocated here. The DMA address returned * by dma_alloc_coherent() is guaranteed to be a power-of-2 number * of pages, which satisfies the rule alignment requirement. */ - size = IPA_ZERO_RULE_SIZE + (1 + count) * sizeof(__le64); + size = entry_size + (1 + count) * entry_size; virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL); if (!virt) return -ENOMEM; @@ -737,7 +757,7 @@ int ipa_table_init(struct ipa *ipa) ipa->table_addr = addr; /* First slot is the zero rule */ - *virt++ = 0; + virt = ipa_table_write(version, virt, 0); /* Next is the filter table bitmap. The "soft" bitmap value might * need to be converted to the hardware representation by shifting @@ -745,15 +765,18 @@ int ipa_table_init(struct ipa *ipa) * filtering, which is possible but not used. IPA v5.0+ eliminated * that option, so there's no shifting required. */ - if (ipa->version < IPA_VERSION_5_0) - *virt++ = cpu_to_le64(ipa->filtered << 1); + if (version <= IPA_VERSION_2_6L) + filter_map = (ipa->filtered << 1 ) | 1; + else if (ipa->version < IPA_VERSION_5_0) + filter_map = ipa->filtered << 1; else - *virt++ = cpu_to_le64(ipa->filtered); + filter_map = ipa->filtered; + + virt = ipa_table_write(version, virt, filter_map); /* All the rest contain the DMA address of the zero rule */ - le_addr = cpu_to_le64(addr); while (count--) - *virt++ = le_addr; + virt = ipa_table_write(version, virt, addr); return 0; } @@ -762,9 +785,12 @@ void ipa_table_exit(struct ipa *ipa) { u32 count = max_t(u32, 1 + ipa->filter_count, ipa->route_count); struct device *dev = &ipa->pdev->dev; + const size_t entry_size = ipa->version > IPA_VERSION_2_6L ? + sizeof(__le64) : sizeof(__le32); + size_t size; - size = IPA_ZERO_RULE_SIZE + (1 + count) * sizeof(__le64); + size = entry_size + (1 + count) * entry_size; dma_free_coherent(dev, size, ipa->table_virt, ipa->table_addr); ipa->table_addr = 0; diff --git a/drivers/net/ipa/ipa_table.h b/drivers/net/ipa/ipa_table.h index 7cc951904bb4..3ed943457b61 100644 --- a/drivers/net/ipa/ipa_table.h +++ b/drivers/net/ipa/ipa_table.h @@ -25,7 +25,7 @@ bool ipa_filtered_valid(struct ipa *ipa, u64 filtered); */ static inline bool ipa_table_hash_support(struct ipa *ipa) { - return ipa->version != IPA_VERSION_4_2; + return ipa->version != IPA_VERSION_4_2 && ipa->version > IPA_VERSION_2_6L; } /** -- 2.39.2