Skip to content

Commit

Permalink
CHROMIUM: drm/virtio: rebase zero-copy patches to virgl/drm-misc-next
Browse files Browse the repository at this point in the history
* Adds RESOURCE_MAP/RESOURCE_UNMAP
* Removes guest_memory_type/guest_caching_type in favor of a bitmask
* Removes EXECBUFFER_v2 until Q3
* Renames HOST_COHERENT to HOST_VISIBLE

BUG=chromium:924405
TEST=compile

Test:
- dEQP-VK.smoke* pass w/ gfxstream and host coherent memory enabled
- launch_cvd with 2d, virgl, and gfxstream modes work with current
- launch_cvd with 2d, virgl, and gfxstream modes work w/ crosvm modified
for host coherent memory
(https://chromium-review.googlesource.com/c/chromiumos/platform/crosvm/+/2035595)

Signed-off-by: Lingfeng Yang <lfy@google.com>
Bug: 153580313
Change-Id: I04052c3d164c77c713bbc7251c357fd43653fa50
  • Loading branch information
741g authored and adelva1984 committed Apr 17, 2020
1 parent 8c6da98 commit 4d701a3
Show file tree
Hide file tree
Showing 11 changed files with 280 additions and 450 deletions.
5 changes: 2 additions & 3 deletions drivers/gpu/drm/virtio/virtgpu_debugfs.c
Expand Up @@ -47,9 +47,8 @@ static int virtio_gpu_features(struct seq_file *m, void *data)

virtio_add_bool(m, "virgl", vgdev->has_virgl_3d);
virtio_add_bool(m, "edid", vgdev->has_edid);
virtio_add_bool(m, "resource v2", vgdev->has_resource_v2);
virtio_add_bool(m, "shared guest", vgdev->has_shared_guest);
virtio_add_bool(m, "host coherent", vgdev->has_host_coherent);
virtio_add_bool(m, "resource blob", vgdev->has_resource_blob);
virtio_add_bool(m, "host visible", vgdev->has_host_visible);
virtio_add_int(m, "cap sets", vgdev->num_capsets);
virtio_add_int(m, "scanouts", vgdev->num_scanouts);
return 0;
Expand Down
5 changes: 2 additions & 3 deletions drivers/gpu/drm/virtio/virtgpu_drv.c
Expand Up @@ -178,9 +178,8 @@ static unsigned int features[] = {
VIRTIO_GPU_F_VIRGL,
#endif
VIRTIO_GPU_F_EDID,
VIRTIO_GPU_F_RESOURCE_V2,
VIRTIO_GPU_F_SHARED_GUEST,
VIRTIO_GPU_F_HOST_COHERENT,
VIRTIO_GPU_F_RESOURCE_BLOB,
VIRTIO_GPU_F_HOST_VISIBLE,
};
static struct virtio_driver virtio_gpu_driver = {
.feature_table = features,
Expand Down
54 changes: 22 additions & 32 deletions drivers/gpu/drm/virtio/virtgpu_drv.h
Expand Up @@ -58,9 +58,8 @@ struct virtio_gpu_object_params {
bool dumb;
/* 3d */
bool virgl;
bool resource_v2;
enum virtio_gpu_memory_type guest_memory_type;
enum virtio_gpu_caching_type caching_type;
bool blob;
uint32_t blob_flags;
uint32_t target;
uint32_t bind;
uint32_t depth;
Expand All @@ -85,14 +84,13 @@ struct virtio_gpu_object {
uint32_t mapped;
void *vmap;
bool dumb;
bool resource_v2;
bool blob;
struct ttm_place placement_code;
struct ttm_placement placement;
struct ttm_buffer_object tbo;
struct ttm_bo_kmap_obj kmap;
bool created;
enum virtio_gpu_memory_type guest_memory_type;
enum virtio_gpu_caching_type caching_type;
uint32_t blob_flags;
};
#define gem_to_virtio_gpu_obj(gobj) \
container_of((gobj), struct virtio_gpu_object, gem_base)
Expand Down Expand Up @@ -201,12 +199,6 @@ struct virtio_gpu_drv_cap_cache {
atomic_t is_valid;
};

struct virtio_gpu_allocation_metadata_response {
bool callback_done;
struct virtio_gpu_resp_allocation_metadata info;
uint32_t response_data[];
};

struct virtio_gpu_device {
struct device *dev;
struct drm_device *ddev;
Expand Down Expand Up @@ -240,9 +232,8 @@ struct virtio_gpu_device {

bool has_virgl_3d;
bool has_edid;
bool has_resource_v2;
bool has_shared_guest;
bool has_host_coherent;
bool has_resource_blob;
bool has_host_visible;

struct work_struct config_changed_work;

Expand Down Expand Up @@ -374,24 +365,23 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo,
struct virtio_gpu_object_params *params,
struct virtio_gpu_fence *fence);

void
virtio_gpu_cmd_resource_create_v2(struct virtio_gpu_device *vgdev,
uint32_t resource_id, uint32_t guest_memory_type,
uint32_t caching_type, uint64_t size,
uint64_t pci_addr, uint32_t nents,
uint32_t args_size, void *data, uint32_t data_size,
struct virtio_gpu_fence *fence);
void
virtio_gpu_cmd_resource_v2_unref(struct virtio_gpu_device *vgdev,
uint32_t resource_id,
struct virtio_gpu_fence *fence);
int
virtio_gpu_cmd_allocation_metadata(struct virtio_gpu_device *vgdev,
uint32_t request_id,
uint32_t request_size,
uint32_t response_size,
void *request,
struct virtio_gpu_fence *fence);
virtio_gpu_cmd_resource_create_blob(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo,
uint32_t ctx_id, uint32_t flags,
uint64_t size, uint64_t memory_id,
uint32_t nents,
struct virtio_gpu_mem_entry *ents);

void virtio_gpu_cmd_map(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo,
uint64_t offset,
struct virtio_gpu_fence *fence);

void virtio_gpu_cmd_unmap(struct virtio_gpu_device *vgdev,
uint32_t resource_id);

void virtio_gpu_ctrl_ack(struct virtqueue *vq);
void virtio_gpu_cursor_ack(struct virtqueue *vq);
void virtio_gpu_fence_ack(struct virtqueue *vq);
Expand Down
4 changes: 2 additions & 2 deletions drivers/gpu/drm/virtio/virtgpu_gem.c
Expand Up @@ -134,7 +134,7 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
struct virtio_gpu_object *qobj = gem_to_virtio_gpu_obj(obj);
int r;

if (!vgdev->has_virgl_3d || qobj->resource_v2)
if (!vgdev->has_virgl_3d)
return 0;

r = virtio_gpu_object_reserve(qobj, false);
Expand All @@ -155,7 +155,7 @@ void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
struct virtio_gpu_object *qobj = gem_to_virtio_gpu_obj(obj);
int r;

if (!vgdev->has_virgl_3d || qobj->resource_v2)
if (!vgdev->has_virgl_3d)
return;

r = virtio_gpu_object_reserve(qobj, false);
Expand Down
186 changes: 47 additions & 139 deletions drivers/gpu/drm/virtio/virtgpu_ioctl.c
Expand Up @@ -261,6 +261,12 @@ static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
case VIRTGPU_PARAM_CAPSET_QUERY_FIX:
value = 1;
break;
case VIRTGPU_PARAM_RESOURCE_BLOB:
value = vgdev->has_resource_blob == true ? 1 : 0;
break;
case VIRTGPU_PARAM_HOST_VISIBLE:
value = vgdev->has_host_visible == true ? 1 : 0;
break;
default:
return -EINVAL;
}
Expand Down Expand Up @@ -575,29 +581,44 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
return 0;
}

static int virtio_gpu_resource_create_v2_ioctl(struct drm_device *dev,
static int virtio_gpu_resource_create_blob_ioctl(struct drm_device *dev,
void *data, struct drm_file *file)
{
void *buf;
int ret, si, nents;
uint32_t handle = 0;
uint64_t pci_addr = 0;
struct scatterlist *sg;
size_t total_size, offset;
struct virtio_gpu_object *obj;
struct virtio_gpu_fence *fence;
struct virtio_gpu_mem_entry *ents;
struct drm_virtgpu_resource_create_v2 *rc_v2 = data;
struct drm_virtgpu_resource_create_blob *rc_blob = data;
struct virtio_gpu_object_params params = { 0 };
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
void __user *args = u64_to_user_ptr(rc_v2->args);
bool mappable = rc_blob->flags & VIRTGPU_RES_BLOB_USE_MAPPABLE;
bool guest = rc_blob->flags & VIRTGPU_RES_BLOB_GUEST_MASK;

params.size = rc_blob->size;
params.blob_flags = rc_blob->flags;
params.blob = true;

if (rc_blob->cmd_size && vfpriv) {
void *buf;
void __user *cmd = u64_to_user_ptr(rc_blob->cmd);

buf = kzalloc(rc_blob->cmd_size, GFP_KERNEL);
if (!buf)
return -ENOMEM;

if (copy_from_user(buf, cmd, rc_blob->cmd_size)) {
kfree(buf);
return -EFAULT;
}

ret = total_size = offset = 0;
params.size = rc_v2->size;
params.guest_memory_type = rc_v2->guest_memory_type;
params.resource_v2 = true;
params.caching_type = rc_v2->caching_type;
virtio_gpu_cmd_submit(vgdev, buf, rc_blob->cmd_size,
vfpriv->ctx_id, NULL);
}

obj = virtio_gpu_alloc_object(dev, &params, NULL);
if (IS_ERR(obj))
Expand All @@ -609,7 +630,7 @@ static int virtio_gpu_resource_create_v2_ioctl(struct drm_device *dev,
goto err_free_obj;
}

if (rc_v2->guest_memory_type == VIRTGPU_MEMORY_HOST_COHERENT) {
if (!guest) {
nents = 0;
} else if (use_dma_api) {
obj->mapped = dma_map_sg(vgdev->vdev->dev.parent,
Expand All @@ -620,34 +641,14 @@ static int virtio_gpu_resource_create_v2_ioctl(struct drm_device *dev,
nents = obj->pages->nents;
}

total_size = nents * sizeof(struct virtio_gpu_mem_entry) +
rc_v2->args_size;

buf = kzalloc(total_size, GFP_KERNEL);
if (!buf) {
ret = -ENOMEM;
goto err_free_obj;
}

ents = buf;
if (rc_v2->guest_memory_type == VIRTGPU_MEMORY_HOST_COHERENT) {
pci_addr = vgdev->caddr + obj->tbo.offset;
} else {
ents = kzalloc(nents * sizeof(struct virtio_gpu_mem_entry), GFP_KERNEL);
if (guest) {
for_each_sg(obj->pages->sgl, sg, nents, si) {
ents[si].addr = cpu_to_le64(use_dma_api
? sg_dma_address(sg)
: sg_phys(sg));
ents[si].length = cpu_to_le32(sg->length);
ents[si].padding = 0;
offset += sizeof(struct virtio_gpu_mem_entry);
}
}

if (rc_v2->args_size) {
if (copy_from_user(buf + offset, args,
rc_v2->args_size)) {
ret = -EFAULT;
goto err_free_buf;
}
}

Expand All @@ -657,15 +658,18 @@ static int virtio_gpu_resource_create_v2_ioctl(struct drm_device *dev,
goto err_free_buf;
}

virtio_gpu_cmd_resource_create_blob(vgdev, obj, vfpriv->ctx_id,
rc_blob->flags, rc_blob->size,
rc_blob->memory_id, nents,
ents);

ret = drm_gem_handle_create(file, &obj->gem_base, &handle);
if (ret)
goto err_fence_put;

virtio_gpu_cmd_resource_create_v2(vgdev, obj->hw_res_handle,
rc_v2->guest_memory_type,
rc_v2->caching_type, rc_v2->size,
pci_addr, nents, rc_v2->args_size,
buf, total_size, fence);
if (!guest && mappable) {
virtio_gpu_cmd_map(vgdev, obj, obj->tbo.offset, fence);
}

/*
* No need to call virtio_gpu_object_reserve since the buffer is not
Expand All @@ -677,8 +681,8 @@ static int virtio_gpu_resource_create_v2_ioctl(struct drm_device *dev,
dma_fence_put(&fence->f);
drm_gem_object_put_unlocked(&obj->gem_base);

rc_v2->resource_id = obj->hw_res_handle;
rc_v2->gem_handle = handle;
rc_blob->res_handle = obj->hw_res_handle;
rc_blob->bo_handle = handle;
return 0;

err_fence_put:
Expand All @@ -690,94 +694,6 @@ static int virtio_gpu_resource_create_v2_ioctl(struct drm_device *dev,
return ret;
}

static int virtio_gpu_allocation_metadata_request_ioctl(struct drm_device *dev,
void *data, struct drm_file *file)
{
void *request;
uint32_t request_id = 0;
struct drm_virtgpu_allocation_metadata_request *amr = data;
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_allocation_metadata_response *response;
void __user *params = u64_to_user_ptr(amr->request);

if (!amr->request_size)
return -EINVAL;

request = kzalloc(amr->request_size, GFP_KERNEL);
if (!request) {
return -ENOMEM;
}

if (copy_from_user(request, params,
amr->request_size)) {
kfree(request);
return -EFAULT;
}

if (amr->response_size) {
response = kzalloc(sizeof(struct virtio_gpu_allocation_metadata_response) +
amr->response_size, GFP_KERNEL);
if (!response) {
kfree(request);
return -ENOMEM;
}

response->callback_done = false;
idr_preload(GFP_KERNEL);
spin_lock(&vgdev->request_idr_lock);
request_id = idr_alloc(&vgdev->request_idr, response, 1, 0,
GFP_NOWAIT);
spin_unlock(&vgdev->request_idr_lock);
idr_preload_end();
amr->request_id = request_id;
}

virtio_gpu_cmd_allocation_metadata(vgdev, request_id,
amr->request_size,
amr->response_size,
request,
NULL);
return 0;
}

static int virtio_gpu_allocation_metadata_response_ioctl(struct drm_device *dev,
void *data, struct drm_file *file)
{
int ret = -EINVAL;
struct virtio_gpu_allocation_metadata_response *response;
struct virtio_gpu_device *vgdev = dev->dev_private;
struct drm_virtgpu_allocation_metadata_response *rcr = data;
void __user *user_data = u64_to_user_ptr(rcr->response);

spin_lock(&vgdev->request_idr_lock);
response = idr_find(&vgdev->request_idr, rcr->request_id);
spin_unlock(&vgdev->request_idr_lock);

if (!response)
goto out;

ret = wait_event_interruptible(vgdev->resp_wq,
response->callback_done);
if (ret)
goto out_remove;

if (copy_to_user(user_data, &response->response_data,
rcr->response_size)) {
ret = -EFAULT;
goto out_remove;
}

ret = 0;

out_remove:
spin_lock(&vgdev->request_idr_lock);
response = idr_remove(&vgdev->request_idr, rcr->request_id);
spin_unlock(&vgdev->request_idr_lock);
kfree(response);
out:
return ret;
}

struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
DRM_AUTH | DRM_RENDER_ALLOW),
Expand Down Expand Up @@ -811,15 +727,7 @@ struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,
DRM_AUTH | DRM_RENDER_ALLOW),

DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE_V2,
virtio_gpu_resource_create_v2_ioctl,
DRM_AUTH | DRM_RENDER_ALLOW),

DRM_IOCTL_DEF_DRV(VIRTGPU_ALLOCATION_METADATA_REQUEST,
virtio_gpu_allocation_metadata_request_ioctl,
DRM_AUTH | DRM_RENDER_ALLOW),

DRM_IOCTL_DEF_DRV(VIRTGPU_ALLOCATION_METADATA_RESPONSE,
virtio_gpu_allocation_metadata_response_ioctl,
DRM_AUTH | DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE_BLOB,
virtio_gpu_resource_create_blob_ioctl,
DRM_RENDER_ALLOW)
};

0 comments on commit 4d701a3

Please sign in to comment.