Skip to content
This repository was archived by the owner on Jul 24, 2024. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .github/workflows/test-single-config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ on:
- torch
- dynamo
- torch_mlir
- torch_mlir_xsmm
- torchscript
- torchscript_onednn
- ipex
Expand Down
12 changes: 2 additions & 10 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,8 @@ jobs:
{device: 'cpu', compiler: 'ipex'},
{device: 'cpu', compiler: 'ipex_onednn_graph'},
# {device: 'xpu', compiler: 'ipex'},
{device: 'cpu', compiler: 'torch_mlir'}
{device: 'cpu', compiler: 'torch_mlir'},
{device: 'cpu', compiler: 'torch_mlir_xsmm'}
]
test_script: ${{ fromJson(inputs.test_scripts) }}
fail-fast: false
Expand All @@ -56,12 +57,3 @@ jobs:
test_script: ${{ matrix.test_script }}
secrets:
DB_URL: ${{ secrets.DB_URL }}

shutdown:
needs: mlp_test
if: ${{ contains(inputs.runner_type, 'amd') }} && inputs.shutdown_cloud_runner
runs-on: ${{ inputs.runner_type }}
steps:
- name: shutdown
shell: bash -el {0}
run: sudo shutdown -h +2
1 change: 1 addition & 0 deletions dl_bench/cli/launcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,7 @@ def parse_args():
"ipex",
"ipex_onednn_graph",
"torch_mlir",
"torch_mlir_xsmm",
],
help="Compilation mode to use. No compilation by default.",
)
Expand Down
7 changes: 5 additions & 2 deletions dl_bench/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -268,7 +268,7 @@ def _compile_model(compile_mode: str, device, model: Module, sample_input, dtype

compiled_model = dynamo.optimize(be.refbackend_torchdynamo_backend)(model)
print("Compiled with torch_mlir (torchscript, inference)")
elif compile_mode == "torch_mlir":
elif compile_mode == "torch_mlir" or compile_mode == "torch_mlir_xsmm":
from torch_mlir._dynamo_fx_importer import import_fx_graph_as_func
from torch_mlir_e2e_test.configs.torchdynamo import jit
from torch_mlir_e2e_test.framework import TestOptions
Expand All @@ -277,6 +277,9 @@ def _compile_model(compile_mode: str, device, model: Module, sample_input, dtype
from torch_mlir_e2e_test.linalg_on_tensors_backends.cpuprotobackend import (
CpuProtoLinalgOnTensorsBackend,
)
from torch_mlir_e2e_test.linalg_on_tensors_backends.xsmmprotobackend import (
XsmmProtoLinalgOnTensorsBackend,
)
import torch.utils._pytree as pytree

# debug_timer seems to cause problems:
Expand All @@ -290,7 +293,7 @@ def _compile_model(compile_mode: str, device, model: Module, sample_input, dtype
opts,
output_type="linalg-on-tensors",
)
backend = CpuProtoLinalgOnTensorsBackend(opts)
backend = CpuProtoLinalgOnTensorsBackend(opts) if compile_mode == "torch_mlir" else XsmmProtoLinalgOnTensorsBackend(opts)
# backend = RefBackendLinalgOnTensorsBackend()
module = backend.compile(module)
backend_module = backend.load(module)
Expand Down