2424#include <asm/page.h>
2525#include <asm/desc.h>
2626#include <asm/cpufeature.h>
27+ #include <asm/vdso/vsyscall.h>
2728#include <clocksource/hyperv_timer.h>
2829
2930struct vdso_data * arch_get_vdso_data (void * vvar_page )
@@ -175,19 +176,7 @@ static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
175176 }
176177
177178 return vmf_insert_pfn (vma , vmf -> address , pfn );
178- } else if (sym_offset == image -> sym_pvclock_page ) {
179- struct pvclock_vsyscall_time_info * pvti =
180- pvclock_get_pvti_cpu0_va ();
181- if (pvti && vclock_was_used (VDSO_CLOCKMODE_PVCLOCK )) {
182- return vmf_insert_pfn_prot (vma , vmf -> address ,
183- __pa (pvti ) >> PAGE_SHIFT ,
184- pgprot_decrypted (vma -> vm_page_prot ));
185- }
186- } else if (sym_offset == image -> sym_hvclock_page ) {
187- pfn = hv_get_tsc_pfn ();
188179
189- if (pfn && vclock_was_used (VDSO_CLOCKMODE_HVCLOCK ))
190- return vmf_insert_pfn (vma , vmf -> address , pfn );
191180 } else if (sym_offset == image -> sym_timens_page ) {
192181 struct page * timens_page = find_timens_vvar_page (vma );
193182
@@ -201,6 +190,33 @@ static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
201190 return VM_FAULT_SIGBUS ;
202191}
203192
193+ static vm_fault_t vvar_vclock_fault (const struct vm_special_mapping * sm ,
194+ struct vm_area_struct * vma , struct vm_fault * vmf )
195+ {
196+ switch (vmf -> pgoff ) {
197+ #ifdef CONFIG_PARAVIRT_CLOCK
198+ case VDSO_PAGE_PVCLOCK_OFFSET :
199+ struct pvclock_vsyscall_time_info * pvti =
200+ pvclock_get_pvti_cpu0_va ();
201+ if (pvti && vclock_was_used (VDSO_CLOCKMODE_PVCLOCK ))
202+ return vmf_insert_pfn_prot (vma , vmf -> address ,
203+ __pa (pvti ) >> PAGE_SHIFT ,
204+ pgprot_decrypted (vma -> vm_page_prot ));
205+ break ;
206+ #endif /* CONFIG_PARAVIRT_CLOCK */
207+ #ifdef CONFIG_HYPERV_TIMER
208+ case VDSO_PAGE_HVCLOCK_OFFSET :
209+ unsigned long pfn = hv_get_tsc_pfn ();
210+
211+ if (pfn && vclock_was_used (VDSO_CLOCKMODE_HVCLOCK ))
212+ return vmf_insert_pfn (vma , vmf -> address , pfn );
213+ break ;
214+ #endif /* CONFIG_HYPERV_TIMER */
215+ }
216+
217+ return VM_FAULT_SIGBUS ;
218+ }
219+
204220static const struct vm_special_mapping vdso_mapping = {
205221 .name = "[vdso]" ,
206222 .fault = vdso_fault ,
@@ -210,6 +226,10 @@ static const struct vm_special_mapping vvar_mapping = {
210226 .name = "[vvar]" ,
211227 .fault = vvar_fault ,
212228};
229+ static const struct vm_special_mapping vvar_vclock_mapping = {
230+ .name = "[vvar_vclock]" ,
231+ .fault = vvar_vclock_fault ,
232+ };
213233
214234/*
215235 * Add vdso and vvar mappings to current process.
@@ -252,19 +272,34 @@ static int map_vdso(const struct vdso_image *image, unsigned long addr)
252272
253273 vma = _install_special_mapping (mm ,
254274 addr ,
255- - image -> sym_vvar_start ,
275+ ( __VVAR_PAGES - VDSO_NR_VCLOCK_PAGES ) * PAGE_SIZE ,
256276 VM_READ |VM_MAYREAD |VM_IO |VM_DONTDUMP |
257277 VM_PFNMAP ,
258278 & vvar_mapping );
259279
260280 if (IS_ERR (vma )) {
261281 ret = PTR_ERR (vma );
262282 do_munmap (mm , text_start , image -> size , NULL );
263- } else {
264- current -> mm -> context .vdso = (void __user * )text_start ;
265- current -> mm -> context .vdso_image = image ;
283+ goto up_fail ;
266284 }
267285
286+ vma = _install_special_mapping (mm ,
287+ addr + (__VVAR_PAGES - VDSO_NR_VCLOCK_PAGES ) * PAGE_SIZE ,
288+ VDSO_NR_VCLOCK_PAGES * PAGE_SIZE ,
289+ VM_READ |VM_MAYREAD |VM_IO |VM_DONTDUMP |
290+ VM_PFNMAP ,
291+ & vvar_vclock_mapping );
292+
293+ if (IS_ERR (vma )) {
294+ ret = PTR_ERR (vma );
295+ do_munmap (mm , text_start , image -> size , NULL );
296+ do_munmap (mm , addr , image -> size , NULL );
297+ goto up_fail ;
298+ }
299+
300+ current -> mm -> context .vdso = (void __user * )text_start ;
301+ current -> mm -> context .vdso_image = image ;
302+
268303up_fail :
269304 mmap_write_unlock (mm );
270305 return ret ;
@@ -286,7 +321,8 @@ int map_vdso_once(const struct vdso_image *image, unsigned long addr)
286321 */
287322 for_each_vma (vmi , vma ) {
288323 if (vma_is_special_mapping (vma , & vdso_mapping ) ||
289- vma_is_special_mapping (vma , & vvar_mapping )) {
324+ vma_is_special_mapping (vma , & vvar_mapping ) ||
325+ vma_is_special_mapping (vma , & vvar_vclock_mapping )) {
290326 mmap_write_unlock (mm );
291327 return - EEXIST ;
292328 }
0 commit comments