Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

Enable ib_dev.mmap function

Removed the ifdef linux from this function.
Added stub function for contiguous pages to avoid compilation
errors.

Submitted by:	Orit Moskovich (oritm mellanox.com)
Approved by:	re
  • Loading branch information...
commit 7a625ace1a94bcf74742c5b6be12d8f9a8f57b2e 1 parent 8ecd1f0
@splbio splbio authored
View
43 sys/ofed/drivers/infiniband/core/umem.c
@@ -530,3 +530,46 @@ int ib_umem_page_count(struct ib_umem *umem)
return n;
}
EXPORT_SYMBOL(ib_umem_page_count);
+
+/**********************************************/
+/*
+ * Stub functions for contiguous pages -
+ * We currently do not support this feature
+ */
+/**********************************************/
+
+/**
+ * ib_cmem_release_contiguous_pages - release memory allocated by
+ * ib_cmem_alloc_contiguous_pages.
+ * @cmem: cmem struct to release
+ */
+void ib_cmem_release_contiguous_pages(struct ib_cmem *cmem)
+{
+}
+EXPORT_SYMBOL(ib_cmem_release_contiguous_pages);
+
+/**
+ * * ib_cmem_alloc_contiguous_pages - allocate contiguous pages
+ * * @context: userspace context to allocate memory for
+ * * @total_size: total required size for that allocation.
+ * * @page_size_order: order of one contiguous page.
+ * */
+struct ib_cmem *ib_cmem_alloc_contiguous_pages(struct ib_ucontext *context,
+ unsigned long total_size,
+ unsigned long page_size_order)
+{
+ return NULL;
+}
+EXPORT_SYMBOL(ib_cmem_alloc_contiguous_pages);
+
+/**
+ * * ib_cmem_map_contiguous_pages_to_vma - map contiguous pages into VMA
+ * * @ib_cmem: cmem structure returned by ib_cmem_alloc_contiguous_pages
+ * * @vma: VMA to inject pages into.
+ * */
+int ib_cmem_map_contiguous_pages_to_vma(struct ib_cmem *ib_cmem,
+ struct vm_area_struct *vma)
+{
+ return 0;
+}
+EXPORT_SYMBOL(ib_cmem_map_contiguous_pages_to_vma);
View
4 sys/ofed/drivers/infiniband/hw/mlx4/main.c
@@ -726,6 +726,7 @@ static unsigned long mlx4_ib_get_unmapped_area(struct file *file,
addr = ALIGN(vma->vm_end, 1 << page_size_order);
}
}
+#endif
static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
{
@@ -780,7 +781,6 @@ static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
return 0;
}
-#endif
static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev,
struct ib_ucontext *context,
@@ -1984,8 +1984,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->ib_dev.modify_port = mlx4_ib_modify_port;
ibdev->ib_dev.alloc_ucontext = mlx4_ib_alloc_ucontext;
ibdev->ib_dev.dealloc_ucontext = mlx4_ib_dealloc_ucontext;
-#ifdef __linux__
ibdev->ib_dev.mmap = mlx4_ib_mmap;
+#ifdef __linux__
ibdev->ib_dev.get_unmapped_area = mlx4_ib_get_unmapped_area;
#endif
ibdev->ib_dev.alloc_pd = mlx4_ib_alloc_pd;
View
28 sys/ofed/include/rdma/ib_umem.h
@@ -57,6 +57,24 @@ struct ib_umem {
unsigned long diff;
};
+struct ib_cmem {
+
+ struct ib_ucontext *context;
+ size_t length;
+ /* Link list of contiguous blocks being part of that cmem */
+ struct list_head ib_cmem_block;
+
+ /* Order of cmem block, 2^ block_order will equal number
+ of physical pages per block
+ */
+ unsigned long block_order;
+ /* Refernce counter for that memory area
+ - When value became 0 pages will be returned to the kernel.
+ */
+ struct kref refcount;
+};
+
+
struct ib_umem_chunk {
struct list_head list;
int nents;
@@ -70,4 +88,14 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
void ib_umem_release(struct ib_umem *umem);
int ib_umem_page_count(struct ib_umem *umem);
+int ib_cmem_map_contiguous_pages_to_vma(struct ib_cmem *ib_cmem,
+ struct vm_area_struct *vma);
+struct ib_cmem *ib_cmem_alloc_contiguous_pages(struct ib_ucontext *context,
+ unsigned long total_size,
+ unsigned long page_size_order);
+void ib_cmem_release_contiguous_pages(struct ib_cmem *cmem);
+int ib_umem_map_to_vma(struct ib_umem *umem,
+ struct vm_area_struct *vma);
+
+
#endif /* IB_UMEM_H */
Please sign in to comment.
Something went wrong with that request. Please try again.