|
5 | 5 |
|
6 | 6 | #include <drm/amdxdna_accel.h> |
7 | 7 | #include <drm/drm_device.h> |
| 8 | +#include <drm/drm_drv.h> |
8 | 9 | #include <drm/drm_gem_shmem_helper.h> |
9 | 10 | #include <drm/drm_managed.h> |
10 | 11 | #include <drm/drm_print.h> |
@@ -525,11 +526,232 @@ static void aie2_fini(struct amdxdna_dev *xdna) |
525 | 526 | pci_free_irq_vectors(pdev); |
526 | 527 | } |
527 | 528 |
|
| 529 | +static int aie2_get_aie_status(struct amdxdna_client *client, |
| 530 | + struct amdxdna_drm_get_info *args) |
| 531 | +{ |
| 532 | + struct amdxdna_drm_query_aie_status status; |
| 533 | + struct amdxdna_dev *xdna = client->xdna; |
| 534 | + struct amdxdna_dev_hdl *ndev; |
| 535 | + int ret; |
| 536 | + |
| 537 | + ndev = xdna->dev_handle; |
| 538 | + if (copy_from_user(&status, u64_to_user_ptr(args->buffer), sizeof(status))) { |
| 539 | + XDNA_ERR(xdna, "Failed to copy AIE request into kernel"); |
| 540 | + return -EFAULT; |
| 541 | + } |
| 542 | + |
| 543 | + if (ndev->metadata.cols * ndev->metadata.size < status.buffer_size) { |
| 544 | + XDNA_ERR(xdna, "Invalid buffer size. Given Size: %u. Need Size: %u.", |
| 545 | + status.buffer_size, ndev->metadata.cols * ndev->metadata.size); |
| 546 | + return -EINVAL; |
| 547 | + } |
| 548 | + |
| 549 | + ret = aie2_query_status(ndev, u64_to_user_ptr(status.buffer), |
| 550 | + status.buffer_size, &status.cols_filled); |
| 551 | + if (ret) { |
| 552 | + XDNA_ERR(xdna, "Failed to get AIE status info. Ret: %d", ret); |
| 553 | + return ret; |
| 554 | + } |
| 555 | + |
| 556 | + if (copy_to_user(u64_to_user_ptr(args->buffer), &status, sizeof(status))) { |
| 557 | + XDNA_ERR(xdna, "Failed to copy AIE request info to user space"); |
| 558 | + return -EFAULT; |
| 559 | + } |
| 560 | + |
| 561 | + return 0; |
| 562 | +} |
| 563 | + |
| 564 | +static int aie2_get_aie_metadata(struct amdxdna_client *client, |
| 565 | + struct amdxdna_drm_get_info *args) |
| 566 | +{ |
| 567 | + struct amdxdna_drm_query_aie_metadata *meta; |
| 568 | + struct amdxdna_dev *xdna = client->xdna; |
| 569 | + struct amdxdna_dev_hdl *ndev; |
| 570 | + int ret = 0; |
| 571 | + |
| 572 | + ndev = xdna->dev_handle; |
| 573 | + meta = kzalloc(sizeof(*meta), GFP_KERNEL); |
| 574 | + if (!meta) |
| 575 | + return -ENOMEM; |
| 576 | + |
| 577 | + meta->col_size = ndev->metadata.size; |
| 578 | + meta->cols = ndev->metadata.cols; |
| 579 | + meta->rows = ndev->metadata.rows; |
| 580 | + |
| 581 | + meta->version.major = ndev->metadata.version.major; |
| 582 | + meta->version.minor = ndev->metadata.version.minor; |
| 583 | + |
| 584 | + meta->core.row_count = ndev->metadata.core.row_count; |
| 585 | + meta->core.row_start = ndev->metadata.core.row_start; |
| 586 | + meta->core.dma_channel_count = ndev->metadata.core.dma_channel_count; |
| 587 | + meta->core.lock_count = ndev->metadata.core.lock_count; |
| 588 | + meta->core.event_reg_count = ndev->metadata.core.event_reg_count; |
| 589 | + |
| 590 | + meta->mem.row_count = ndev->metadata.mem.row_count; |
| 591 | + meta->mem.row_start = ndev->metadata.mem.row_start; |
| 592 | + meta->mem.dma_channel_count = ndev->metadata.mem.dma_channel_count; |
| 593 | + meta->mem.lock_count = ndev->metadata.mem.lock_count; |
| 594 | + meta->mem.event_reg_count = ndev->metadata.mem.event_reg_count; |
| 595 | + |
| 596 | + meta->shim.row_count = ndev->metadata.shim.row_count; |
| 597 | + meta->shim.row_start = ndev->metadata.shim.row_start; |
| 598 | + meta->shim.dma_channel_count = ndev->metadata.shim.dma_channel_count; |
| 599 | + meta->shim.lock_count = ndev->metadata.shim.lock_count; |
| 600 | + meta->shim.event_reg_count = ndev->metadata.shim.event_reg_count; |
| 601 | + |
| 602 | + if (copy_to_user(u64_to_user_ptr(args->buffer), meta, sizeof(*meta))) |
| 603 | + ret = -EFAULT; |
| 604 | + |
| 605 | + kfree(meta); |
| 606 | + return ret; |
| 607 | +} |
| 608 | + |
| 609 | +static int aie2_get_aie_version(struct amdxdna_client *client, |
| 610 | + struct amdxdna_drm_get_info *args) |
| 611 | +{ |
| 612 | + struct amdxdna_drm_query_aie_version version; |
| 613 | + struct amdxdna_dev *xdna = client->xdna; |
| 614 | + struct amdxdna_dev_hdl *ndev; |
| 615 | + |
| 616 | + ndev = xdna->dev_handle; |
| 617 | + version.major = ndev->version.major; |
| 618 | + version.minor = ndev->version.minor; |
| 619 | + |
| 620 | + if (copy_to_user(u64_to_user_ptr(args->buffer), &version, sizeof(version))) |
| 621 | + return -EFAULT; |
| 622 | + |
| 623 | + return 0; |
| 624 | +} |
| 625 | + |
| 626 | +static int aie2_get_clock_metadata(struct amdxdna_client *client, |
| 627 | + struct amdxdna_drm_get_info *args) |
| 628 | +{ |
| 629 | + struct amdxdna_drm_query_clock_metadata *clock; |
| 630 | + struct amdxdna_dev *xdna = client->xdna; |
| 631 | + struct amdxdna_dev_hdl *ndev; |
| 632 | + int ret = 0; |
| 633 | + |
| 634 | + ndev = xdna->dev_handle; |
| 635 | + clock = kzalloc(sizeof(*clock), GFP_KERNEL); |
| 636 | + if (!clock) |
| 637 | + return -ENOMEM; |
| 638 | + |
| 639 | + memcpy(clock->mp_npu_clock.name, ndev->mp_npu_clock.name, |
| 640 | + sizeof(clock->mp_npu_clock.name)); |
| 641 | + clock->mp_npu_clock.freq_mhz = ndev->mp_npu_clock.freq_mhz; |
| 642 | + memcpy(clock->h_clock.name, ndev->h_clock.name, sizeof(clock->h_clock.name)); |
| 643 | + clock->h_clock.freq_mhz = ndev->h_clock.freq_mhz; |
| 644 | + |
| 645 | + if (copy_to_user(u64_to_user_ptr(args->buffer), clock, sizeof(*clock))) |
| 646 | + ret = -EFAULT; |
| 647 | + |
| 648 | + kfree(clock); |
| 649 | + return ret; |
| 650 | +} |
| 651 | + |
| 652 | +static int aie2_get_hwctx_status(struct amdxdna_client *client, |
| 653 | + struct amdxdna_drm_get_info *args) |
| 654 | +{ |
| 655 | + struct amdxdna_drm_query_hwctx __user *buf; |
| 656 | + struct amdxdna_dev *xdna = client->xdna; |
| 657 | + struct amdxdna_drm_query_hwctx *tmp; |
| 658 | + struct amdxdna_client *tmp_client; |
| 659 | + struct amdxdna_hwctx *hwctx; |
| 660 | + bool overflow = false; |
| 661 | + u32 req_bytes = 0; |
| 662 | + u32 hw_i = 0; |
| 663 | + int ret = 0; |
| 664 | + int next; |
| 665 | + int idx; |
| 666 | + |
| 667 | + drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock)); |
| 668 | + |
| 669 | + tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); |
| 670 | + if (!tmp) |
| 671 | + return -ENOMEM; |
| 672 | + |
| 673 | + buf = u64_to_user_ptr(args->buffer); |
| 674 | + list_for_each_entry(tmp_client, &xdna->client_list, node) { |
| 675 | + idx = srcu_read_lock(&tmp_client->hwctx_srcu); |
| 676 | + next = 0; |
| 677 | + idr_for_each_entry_continue(&tmp_client->hwctx_idr, hwctx, next) { |
| 678 | + req_bytes += sizeof(*tmp); |
| 679 | + if (args->buffer_size < req_bytes) { |
| 680 | + /* Continue iterating to get the required size */ |
| 681 | + overflow = true; |
| 682 | + continue; |
| 683 | + } |
| 684 | + |
| 685 | + memset(tmp, 0, sizeof(*tmp)); |
| 686 | + tmp->pid = tmp_client->pid; |
| 687 | + tmp->context_id = hwctx->id; |
| 688 | + tmp->start_col = hwctx->start_col; |
| 689 | + tmp->num_col = hwctx->num_col; |
| 690 | + tmp->command_submissions = hwctx->priv->seq; |
| 691 | + tmp->command_completions = hwctx->priv->completed; |
| 692 | + |
| 693 | + if (copy_to_user(&buf[hw_i], tmp, sizeof(*tmp))) { |
| 694 | + ret = -EFAULT; |
| 695 | + srcu_read_unlock(&tmp_client->hwctx_srcu, idx); |
| 696 | + goto out; |
| 697 | + } |
| 698 | + hw_i++; |
| 699 | + } |
| 700 | + srcu_read_unlock(&tmp_client->hwctx_srcu, idx); |
| 701 | + } |
| 702 | + |
| 703 | + if (overflow) { |
| 704 | + XDNA_ERR(xdna, "Invalid buffer size. Given: %u Need: %u.", |
| 705 | + args->buffer_size, req_bytes); |
| 706 | + ret = -EINVAL; |
| 707 | + } |
| 708 | + |
| 709 | +out: |
| 710 | + kfree(tmp); |
| 711 | + args->buffer_size = req_bytes; |
| 712 | + return ret; |
| 713 | +} |
| 714 | + |
| 715 | +static int aie2_get_info(struct amdxdna_client *client, struct amdxdna_drm_get_info *args) |
| 716 | +{ |
| 717 | + struct amdxdna_dev *xdna = client->xdna; |
| 718 | + int ret, idx; |
| 719 | + |
| 720 | + if (!drm_dev_enter(&xdna->ddev, &idx)) |
| 721 | + return -ENODEV; |
| 722 | + |
| 723 | + switch (args->param) { |
| 724 | + case DRM_AMDXDNA_QUERY_AIE_STATUS: |
| 725 | + ret = aie2_get_aie_status(client, args); |
| 726 | + break; |
| 727 | + case DRM_AMDXDNA_QUERY_AIE_METADATA: |
| 728 | + ret = aie2_get_aie_metadata(client, args); |
| 729 | + break; |
| 730 | + case DRM_AMDXDNA_QUERY_AIE_VERSION: |
| 731 | + ret = aie2_get_aie_version(client, args); |
| 732 | + break; |
| 733 | + case DRM_AMDXDNA_QUERY_CLOCK_METADATA: |
| 734 | + ret = aie2_get_clock_metadata(client, args); |
| 735 | + break; |
| 736 | + case DRM_AMDXDNA_QUERY_HW_CONTEXTS: |
| 737 | + ret = aie2_get_hwctx_status(client, args); |
| 738 | + break; |
| 739 | + default: |
| 740 | + XDNA_ERR(xdna, "Not supported request parameter %u", args->param); |
| 741 | + ret = -EOPNOTSUPP; |
| 742 | + } |
| 743 | + XDNA_DBG(xdna, "Got param %d", args->param); |
| 744 | + |
| 745 | + drm_dev_exit(idx); |
| 746 | + return ret; |
| 747 | +} |
| 748 | + |
528 | 749 | const struct amdxdna_dev_ops aie2_ops = { |
529 | 750 | .init = aie2_init, |
530 | 751 | .fini = aie2_fini, |
531 | 752 | .resume = aie2_hw_start, |
532 | 753 | .suspend = aie2_hw_stop, |
| 754 | + .get_aie_info = aie2_get_info, |
533 | 755 | .hwctx_init = aie2_hwctx_init, |
534 | 756 | .hwctx_fini = aie2_hwctx_fini, |
535 | 757 | .hwctx_config = aie2_hwctx_config, |
|
0 commit comments