|
1 | | -# Any targets that should be shared between fbcode and xplat must be defined in |
2 | | -# targets.bzl. This file can contain xplat-only targets. |
3 | | - |
4 | | -load(":targets.bzl", "define_common_targets") |
| 1 | +load("@fbcode_macros//build_defs:python_library.bzl", "python_library") |
| 2 | +load("@fbsource//xplat/executorch/backends/qualcomm/qnn_version.bzl", "get_qnn_library_verision") |
| 3 | +load("@fbcode_macros//build_defs:python_binary.bzl", "python_binary") |
| 4 | +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") |
5 | 5 |
|
6 | 6 | oncall("executorch") |
7 | 7 |
|
8 | | -define_common_targets() |
| 8 | +python_binary( |
| 9 | + name = "llama", |
| 10 | + srcs = ["llama.py"], |
| 11 | + main_function = "executorch.examples.qualcomm.oss_scripts.llama3_2.llama.main", |
| 12 | + deps = [ |
| 13 | + "//executorch/examples/qualcomm/oss_scripts/llama2:static_llama", |
| 14 | + "//caffe2:torch", |
| 15 | + "//executorch/extension/pybindings:aten_lib", |
| 16 | + "//executorch/backends/qualcomm/partition:partition", |
| 17 | + "//executorch/backends/qualcomm/quantizer:quantizer", |
| 18 | + "//executorch/devtools:lib", |
| 19 | + "//executorch/examples/models:models", |
| 20 | + "//executorch/examples/qualcomm:utils", |
| 21 | + "//executorch/extension/export_util:export_util", |
| 22 | + "//executorch/extension/llm/export:export_lib", |
| 23 | + ], |
| 24 | +) |
| 25 | + |
| 26 | +runtime.command_alias( |
| 27 | + name = "llama_qnn", |
| 28 | + env = { |
| 29 | + "LD_LIBRARY_PATH": "$(location fbsource//third-party/qualcomm/qnn/qnn-{0}:qnn_offline_compile_libs)".format(get_qnn_library_verision()), |
| 30 | + }, |
| 31 | + exe = ":llama", |
| 32 | +) |
0 commit comments