3434 * When it reaches max 255, the allocation cycle starts afresh by flushing
3535 * the entire TLB and wrapping ASID back to zero.
3636 *
37- * For book-keeping, Linux uses a couple of data-structures:
38- * -mm_struct has an @asid field to keep a note of task's ASID (needed at the
39- * time of say switch_mm( )
40- * -An array of mm structs @asid_mm_map[] for asid->mm the reverse mapping,
41- * given an ASID, finding the mm struct associated.
42- *
43- * The round-robin allocation algorithm allows for ASID stealing.
44- * If asid tracker is at "x-1", a new req will allocate "x", even if "x" was
45- * already assigned to another (switched-out) task. Obviously the prev owner
46- * is marked with an invalid ASID to make it request for a new ASID when it
47- * gets scheduled next time. However its TLB entries (with ASID "x") could
48- * exist, which must be cleared before the same ASID is used by the new owner.
49- * Flushing them would be plausible but costly solution. Instead we force a
50- * allocation policy quirk, which ensures that a stolen ASID won't have any
51- * TLB entries associates, alleviating the need to flush.
52- * The quirk essentially is not allowing ASID allocated in prev cycle
53- * to be used past a roll-over in the next cycle.
54- * When this happens (i.e. task ASID > asid tracker), task needs to refresh
55- * its ASID, aligning it to current value of tracker. If the task doesn't get
56- * scheduled past a roll-over, hence its ASID is not yet realigned with
57- * tracker, such ASID is anyways safely reusable because it is
58- * gauranteed that TLB entries with that ASID wont exist.
37+ * A new allocation cycle, post rollover, could potentially reassign an ASID
38+ * to a different task. Thus the rule is to refresh the ASID in a new cycle.
39+ * The 32 bit @asid_cache (and mm->asid) have 8 bits MMU PID and rest 24 bits
40+ * serve as cycle/generation indicator and natural 32 bit unsigned math
41+ * automagically increments the generation when lower 8 bits rollover.
5942 */
6043
61- #define FIRST_ASID 0
62- #define MAX_ASID 255 /* 8 bit PID field in PID Aux reg */
63- #define NO_ASID (MAX_ASID + 1) /* ASID Not alloc to mmu ctxt */
64- #define NUM_ASID ((MAX_ASID - FIRST_ASID) + 1)
44+ #define MM_CTXT_ASID_MASK 0x000000ff /* MMU PID reg :8 bit PID */
45+ #define MM_CTXT_CYCLE_MASK (~MM_CTXT_ASID_MASK)
46+
47+ #define MM_CTXT_FIRST_CYCLE (MM_CTXT_ASID_MASK + 1)
48+ #define MM_CTXT_NO_ASID 0UL
6549
66- /* ASID to mm struct mapping */
67- extern struct mm_struct * asid_mm_map [NUM_ASID + 1 ];
50+ #define hw_pid (mm ) (mm->context.asid & MM_CTXT_ASID_MASK)
6851
69- extern int asid_cache ;
52+ extern unsigned int asid_cache ;
7053
7154/*
72- * Assign a new ASID to task. If the task already has an ASID, it is
73- * relinquished.
55+ * Get a new ASID if task doesn't have a valid one (unalloc or from prev cycle)
56+ * Also set the MMU PID register to existing/updated ASID
7457 */
7558static inline void get_new_mmu_context (struct mm_struct * mm )
7659{
77- struct mm_struct * prev_owner ;
7860 unsigned long flags ;
7961
8062 local_irq_save (flags );
8163
8264 /*
83- * Relinquish the currently owned ASID (if any).
84- * Doing unconditionally saves a cmp-n-branch; for already unused
85- * ASID slot, the value was/remains NULL
65+ * Move to new ASID if it was not from current alloc-cycle/generation.
66+ * This is done by ensuring that the generation bits in both mm->ASID
67+ * and cpu's ASID counter are exactly same.
68+ *
69+ * Note: Callers needing new ASID unconditionally, independent of
70+ * generation, e.g. local_flush_tlb_mm() for forking parent,
71+ * first need to destroy the context, setting it to invalid
72+ * value.
8673 */
87- asid_mm_map [mm -> context .asid ] = (struct mm_struct * )NULL ;
74+ if (!((mm -> context .asid ^ asid_cache ) & MM_CTXT_CYCLE_MASK ))
75+ goto set_hw ;
76+
77+ /* move to new ASID and handle rollover */
78+ if (unlikely (!(++ asid_cache & MM_CTXT_ASID_MASK ))) {
8879
89- /* move to new ASID */
90- if (++ asid_cache > MAX_ASID ) { /* ASID roll-over */
91- asid_cache = FIRST_ASID ;
9280 flush_tlb_all ();
93- }
9481
95- /*
96- * Is next ASID already owned by some-one else (we are stealing it).
97- * If so, let the orig owner be aware of this, so when it runs, it
98- * asks for a brand new ASID. This would only happen for a long-lived
99- * task with ASID from prev allocation cycle (before ASID roll-over).
100- *
101- * This might look wrong - if we are re-using some other task's ASID,
102- * won't we use it's stale TLB entries too. Actually switch_mm( ) takes
103- * care of such a case: it ensures that task with ASID from prev alloc
104- * cycle, when scheduled will refresh it's ASID: see switch_mm( ) below
105- * The stealing scenario described here will only happen if that task
106- * didn't get a chance to refresh it's ASID - implying stale entries
107- * won't exist.
108- */
109- prev_owner = asid_mm_map [asid_cache ];
110- if (prev_owner )
111- prev_owner -> context .asid = NO_ASID ;
82+ /*
83+ * Above checke for rollover of 8 bit ASID in 32 bit container.
84+ * If the container itself wrapped around, set it to a non zero
85+ * "generation" to distinguish from no context
86+ */
87+ if (!asid_cache )
88+ asid_cache = MM_CTXT_FIRST_CYCLE ;
89+ }
11290
11391 /* Assign new ASID to tsk */
114- asid_mm_map [asid_cache ] = mm ;
11592 mm -> context .asid = asid_cache ;
11693
117- #ifdef CONFIG_ARC_TLB_DBG
118- pr_info ("ARC_TLB_DBG: NewMM=0x%x OldMM=0x%x task_struct=0x%x Task: %s,"
119- " pid:%u, assigned asid:%lu\n" ,
120- (unsigned int )mm , (unsigned int )prev_owner ,
121- (unsigned int )(mm -> context .tsk ), (mm -> context .tsk )-> comm ,
122- (mm -> context .tsk )-> pid , mm -> context .asid );
123- #endif
124-
125- write_aux_reg (ARC_REG_PID , asid_cache | MMU_ENABLE );
94+ set_hw :
95+ write_aux_reg (ARC_REG_PID , hw_pid (mm ) | MMU_ENABLE );
12696
12797 local_irq_restore (flags );
12898}
@@ -134,10 +104,7 @@ static inline void get_new_mmu_context(struct mm_struct *mm)
134104static inline int
135105init_new_context (struct task_struct * tsk , struct mm_struct * mm )
136106{
137- mm -> context .asid = NO_ASID ;
138- #ifdef CONFIG_ARC_TLB_DBG
139- mm -> context .tsk = tsk ;
140- #endif
107+ mm -> context .asid = MM_CTXT_NO_ASID ;
141108 return 0 ;
142109}
143110
@@ -152,40 +119,21 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
152119 write_aux_reg (ARC_REG_SCRATCH_DATA0 , next -> pgd );
153120#endif
154121
155- /*
156- * Get a new ASID if task doesn't have a valid one. Possible when
157- * -task never had an ASID (fresh after fork)
158- * -it's ASID was stolen - past an ASID roll-over.
159- * -There's a third obscure scenario (if this task is running for the
160- * first time afer an ASID rollover), where despite having a valid
161- * ASID, we force a get for new ASID - see comments at top.
162- *
163- * Both the non-alloc scenario and first-use-after-rollover can be
164- * detected using the single condition below: NO_ASID = 256
165- * while asid_cache is always a valid ASID value (0-255).
166- */
167- if (next -> context .asid > asid_cache ) {
168- get_new_mmu_context (next );
169- } else {
170- /*
171- * XXX: This will never happen given the chks above
172- * BUG_ON(next->context.asid > MAX_ASID);
173- */
174- write_aux_reg (ARC_REG_PID , next -> context .asid | MMU_ENABLE );
175- }
176-
122+ get_new_mmu_context (next );
177123}
178124
125+ /*
126+ * Called at the time of execve() to get a new ASID
127+ * Note the subtlety here: get_new_mmu_context() behaves differently here
128+ * vs. in switch_mm(). Here it always returns a new ASID, because mm has
129+ * an unallocated "initial" value, while in latter, it moves to a new ASID,
130+ * only if it was unallocated
131+ */
132+ #define activate_mm (prev , next ) switch_mm(prev, next, NULL)
133+
179134static inline void destroy_context (struct mm_struct * mm )
180135{
181- unsigned long flags ;
182-
183- local_irq_save (flags );
184-
185- asid_mm_map [mm -> context .asid ] = NULL ;
186- mm -> context .asid = NO_ASID ;
187-
188- local_irq_restore (flags );
136+ mm -> context .asid = MM_CTXT_NO_ASID ;
189137}
190138
191139/* it seemed that deactivate_mm( ) is a reasonable place to do book-keeping
@@ -197,17 +145,6 @@ static inline void destroy_context(struct mm_struct *mm)
197145 */
198146#define deactivate_mm (tsk , mm ) do { } while (0)
199147
200- static inline void activate_mm (struct mm_struct * prev , struct mm_struct * next )
201- {
202- #ifndef CONFIG_SMP
203- write_aux_reg (ARC_REG_SCRATCH_DATA0 , next -> pgd );
204- #endif
205-
206- /* Unconditionally get a new ASID */
207- get_new_mmu_context (next );
208-
209- }
210-
211148#define enter_lazy_tlb (mm , tsk )
212149
213150#endif /* __ASM_ARC_MMU_CONTEXT_H */
0 commit comments