@@ -95,28 +95,15 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
9595 unsigned long entries , unsigned long dev_hpa ,
9696 struct mm_iommu_table_group_mem_t * * pmem )
9797{
98- struct mm_iommu_table_group_mem_t * mem ;
99- long i , ret , locked_entries = 0 ;
98+ struct mm_iommu_table_group_mem_t * mem , * mem2 ;
99+ long i , ret , locked_entries = 0 , pinned = 0 ;
100100 unsigned int pageshift ;
101-
102- mutex_lock (& mem_list_mutex );
103-
104- list_for_each_entry_rcu (mem , & mm -> context .iommu_group_mem_list ,
105- next ) {
106- /* Overlap? */
107- if ((mem -> ua < (ua + (entries << PAGE_SHIFT ))) &&
108- (ua < (mem -> ua +
109- (mem -> entries << PAGE_SHIFT )))) {
110- ret = - EINVAL ;
111- goto unlock_exit ;
112- }
113-
114- }
101+ unsigned long entry , chunk ;
115102
116103 if (dev_hpa == MM_IOMMU_TABLE_INVALID_HPA ) {
117104 ret = mm_iommu_adjust_locked_vm (mm , entries , true);
118105 if (ret )
119- goto unlock_exit ;
106+ return ret ;
120107
121108 locked_entries = entries ;
122109 }
@@ -148,17 +135,27 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
148135 }
149136
150137 down_read (& mm -> mmap_sem );
151- ret = get_user_pages_longterm (ua , entries , FOLL_WRITE , mem -> hpages , NULL );
138+ chunk = (1UL << (PAGE_SHIFT + MAX_ORDER - 1 )) /
139+ sizeof (struct vm_area_struct * );
140+ chunk = min (chunk , entries );
141+ for (entry = 0 ; entry < entries ; entry += chunk ) {
142+ unsigned long n = min (entries - entry , chunk );
143+
144+ ret = get_user_pages_longterm (ua + (entry << PAGE_SHIFT ), n ,
145+ FOLL_WRITE , mem -> hpages + entry , NULL );
146+ if (ret == n ) {
147+ pinned += n ;
148+ continue ;
149+ }
150+ if (ret > 0 )
151+ pinned += ret ;
152+ break ;
153+ }
152154 up_read (& mm -> mmap_sem );
153- if (ret != entries ) {
154- /* free the reference taken */
155- for (i = 0 ; i < ret ; i ++ )
156- put_page (mem -> hpages [i ]);
157-
158- vfree (mem -> hpas );
159- kfree (mem );
160- ret = - EFAULT ;
161- goto unlock_exit ;
155+ if (pinned != entries ) {
156+ if (!ret )
157+ ret = - EFAULT ;
158+ goto free_exit ;
162159 }
163160
164161 pageshift = PAGE_SHIFT ;
@@ -183,21 +180,43 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
183180 }
184181
185182good_exit :
186- ret = 0 ;
187183 atomic64_set (& mem -> mapped , 1 );
188184 mem -> used = 1 ;
189185 mem -> ua = ua ;
190186 mem -> entries = entries ;
191- * pmem = mem ;
192187
193- list_add_rcu ( & mem -> next , & mm -> context . iommu_group_mem_list );
188+ mutex_lock ( & mem_list_mutex );
194189
195- unlock_exit :
196- if (locked_entries && ret )
197- mm_iommu_adjust_locked_vm (mm , locked_entries , false);
190+ list_for_each_entry_rcu (mem2 , & mm -> context .iommu_group_mem_list , next ) {
191+ /* Overlap? */
192+ if ((mem2 -> ua < (ua + (entries << PAGE_SHIFT ))) &&
193+ (ua < (mem2 -> ua +
194+ (mem2 -> entries << PAGE_SHIFT )))) {
195+ ret = - EINVAL ;
196+ mutex_unlock (& mem_list_mutex );
197+ goto free_exit ;
198+ }
199+ }
200+
201+ list_add_rcu (& mem -> next , & mm -> context .iommu_group_mem_list );
198202
199203 mutex_unlock (& mem_list_mutex );
200204
205+ * pmem = mem ;
206+
207+ return 0 ;
208+
209+ free_exit :
210+ /* free the reference taken */
211+ for (i = 0 ; i < pinned ; i ++ )
212+ put_page (mem -> hpages [i ]);
213+
214+ vfree (mem -> hpas );
215+ kfree (mem );
216+
217+ unlock_exit :
218+ mm_iommu_adjust_locked_vm (mm , locked_entries , false);
219+
201220 return ret ;
202221}
203222
@@ -266,7 +285,7 @@ static void mm_iommu_release(struct mm_iommu_table_group_mem_t *mem)
266285long mm_iommu_put (struct mm_struct * mm , struct mm_iommu_table_group_mem_t * mem )
267286{
268287 long ret = 0 ;
269- unsigned long entries , dev_hpa ;
288+ unsigned long unlock_entries = 0 ;
270289
271290 mutex_lock (& mem_list_mutex );
272291
@@ -287,17 +306,17 @@ long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem)
287306 goto unlock_exit ;
288307 }
289308
309+ if (mem -> dev_hpa == MM_IOMMU_TABLE_INVALID_HPA )
310+ unlock_entries = mem -> entries ;
311+
290312 /* @mapped became 0 so now mappings are disabled, release the region */
291- entries = mem -> entries ;
292- dev_hpa = mem -> dev_hpa ;
293313 mm_iommu_release (mem );
294314
295- if (dev_hpa == MM_IOMMU_TABLE_INVALID_HPA )
296- mm_iommu_adjust_locked_vm (mm , entries , false);
297-
298315unlock_exit :
299316 mutex_unlock (& mem_list_mutex );
300317
318+ mm_iommu_adjust_locked_vm (mm , unlock_entries , false);
319+
301320 return ret ;
302321}
303322EXPORT_SYMBOL_GPL (mm_iommu_put );
0 commit comments