5252 *
5353 * Items in the non-cached region are allocated from the start of the partition
5454 * while items in the cached region are allocated from the end. The free area
55- * is hence the region between the cached and non-cached offsets.
55+ * is hence the region between the cached and non-cached offsets. The header of
56+ * cached items comes after the data.
5657 *
5758 *
5859 * To synchronize allocations in the shared memory heaps a remote spinlock must
@@ -140,6 +141,7 @@ struct smem_header {
140141 * @flags: flags for the partition (currently unused)
141142 * @host0: first processor/host with access to this partition
142143 * @host1: second processor/host with access to this partition
144+ * @cacheline: alignment for "cached" entries
143145 * @reserved: reserved entries for later use
144146 */
145147struct smem_ptable_entry {
@@ -148,7 +150,8 @@ struct smem_ptable_entry {
148150 __le32 flags ;
149151 __le16 host0 ;
150152 __le16 host1 ;
151- __le32 reserved [8 ];
153+ __le32 cacheline ;
154+ __le32 reserved [7 ];
152155};
153156
154157/**
@@ -230,6 +233,7 @@ struct smem_region {
230233 * @hwlock: reference to a hwspinlock
231234 * @partitions: list of pointers to partitions affecting the current
232235 * processor/host
236+ * @cacheline: list of cacheline sizes for each host
233237 * @num_regions: number of @regions
234238 * @regions: list of the memory regions defining the shared memory
235239 */
@@ -239,6 +243,7 @@ struct qcom_smem {
239243 struct hwspinlock * hwlock ;
240244
241245 struct smem_partition_header * partitions [SMEM_HOST_COUNT ];
246+ size_t cacheline [SMEM_HOST_COUNT ];
242247
243248 unsigned num_regions ;
244249 struct smem_region regions [0 ];
@@ -252,6 +257,14 @@ phdr_to_last_uncached_entry(struct smem_partition_header *phdr)
252257 return p + le32_to_cpu (phdr -> offset_free_uncached );
253258}
254259
260+ static void * phdr_to_first_cached_entry (struct smem_partition_header * phdr ,
261+ size_t cacheline )
262+ {
263+ void * p = phdr ;
264+
265+ return p + le32_to_cpu (phdr -> size ) - ALIGN (sizeof (* phdr ), cacheline );
266+ }
267+
255268static void * phdr_to_last_cached_entry (struct smem_partition_header * phdr )
256269{
257270 void * p = phdr ;
@@ -276,13 +289,28 @@ uncached_entry_next(struct smem_private_entry *e)
276289 le32_to_cpu (e -> size );
277290}
278291
292+ static struct smem_private_entry *
293+ cached_entry_next (struct smem_private_entry * e , size_t cacheline )
294+ {
295+ void * p = e ;
296+
297+ return p - le32_to_cpu (e -> size ) - ALIGN (sizeof (* e ), cacheline );
298+ }
299+
279300static void * uncached_entry_to_item (struct smem_private_entry * e )
280301{
281302 void * p = e ;
282303
283304 return p + sizeof (* e ) + le16_to_cpu (e -> padding_hdr );
284305}
285306
307+ static void * cached_entry_to_item (struct smem_private_entry * e )
308+ {
309+ void * p = e ;
310+
311+ return p - le32_to_cpu (e -> size );
312+ }
313+
286314/* Pointer to the one and only smem handle */
287315static struct qcom_smem * __smem ;
288316
@@ -458,18 +486,17 @@ static void *qcom_smem_get_private(struct qcom_smem *smem,
458486{
459487 struct smem_partition_header * phdr ;
460488 struct smem_private_entry * e , * end ;
489+ size_t cacheline ;
461490
462491 phdr = smem -> partitions [host ];
492+ cacheline = smem -> cacheline [host ];
493+
463494 e = phdr_to_first_uncached_entry (phdr );
464495 end = phdr_to_last_uncached_entry (phdr );
465496
466497 while (e < end ) {
467- if (e -> canary != SMEM_PRIVATE_CANARY ) {
468- dev_err (smem -> dev ,
469- "Found invalid canary in host %d partition\n" ,
470- host );
471- return ERR_PTR (- EINVAL );
472- }
498+ if (e -> canary != SMEM_PRIVATE_CANARY )
499+ goto invalid_canary ;
473500
474501 if (le16_to_cpu (e -> item ) == item ) {
475502 if (size != NULL )
@@ -482,7 +509,32 @@ static void *qcom_smem_get_private(struct qcom_smem *smem,
482509 e = uncached_entry_next (e );
483510 }
484511
512+ /* Item was not found in the uncached list, search the cached list */
513+
514+ e = phdr_to_first_cached_entry (phdr , cacheline );
515+ end = phdr_to_last_cached_entry (phdr );
516+
517+ while (e > end ) {
518+ if (e -> canary != SMEM_PRIVATE_CANARY )
519+ goto invalid_canary ;
520+
521+ if (le16_to_cpu (e -> item ) == item ) {
522+ if (size != NULL )
523+ * size = le32_to_cpu (e -> size ) -
524+ le16_to_cpu (e -> padding_data );
525+
526+ return cached_entry_to_item (e );
527+ }
528+
529+ e = cached_entry_next (e , cacheline );
530+ }
531+
485532 return ERR_PTR (- ENOENT );
533+
534+ invalid_canary :
535+ dev_err (smem -> dev , "Found invalid canary in host %d partition\n" , host );
536+
537+ return ERR_PTR (- EINVAL );
486538}
487539
488540/**
@@ -659,6 +711,7 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
659711 }
660712
661713 smem -> partitions [remote_host ] = header ;
714+ smem -> cacheline [remote_host ] = le32_to_cpu (entry -> cacheline );
662715 }
663716
664717 return 0 ;
0 commit comments