Skip to content

Commit c3f2ccb

Browse files
cccclaifacebook-github-bot
authored andcommitted
Add buck file for static llama
Summary: As title, moves the buck file related changes in D66107964 to here. Differential Revision: D67057242
1 parent 343aa0c commit c3f2ccb

File tree

3 files changed

+85
-5
lines changed

3 files changed

+85
-5
lines changed

examples/qualcomm/TARGETS

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,3 +20,12 @@ runtime.python_binary(
2020
"//executorch/extension/export_util:export_util",
2121
],
2222
)
23+
24+
python_library(
25+
name = "utils",
26+
srcs = ["utils.py"],
27+
deps = [
28+
"//executorch/backends/qualcomm/partition:partition",
29+
"//executorch/backends/qualcomm/quantizer:quantizer",
30+
],
31+
)
Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
# Any targets that should be shared between fbcode and xplat must be defined in
2+
# targets.bzl. This file can contain xplat-only targets.
3+
4+
load("@fbsource//xplat/executorch/backends/qualcomm/qnn_version.bzl", "get_qnn_library_verision")
5+
load("@fbcode_macros//build_defs:python_binary.bzl", "python_binary")
6+
load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime")
7+
8+
python_binary(
9+
name = "llama",
10+
srcs = ["llama.py"],
11+
main_function = "executorch.examples.qualcomm.oss_scripts.llama3_2.llama.main",
12+
preload_deps = [
13+
"//executorch/extension/llm/custom_ops:model_sharding_py",
14+
],
15+
deps = [
16+
"//executorch/examples/qualcomm/oss_scripts/llama2:static_llama",
17+
"//caffe2:torch",
18+
"//executorch/extension/pybindings:aten_lib",
19+
"//executorch/backends/qualcomm/partition:partition",
20+
"//executorch/backends/qualcomm/quantizer:quantizer",
21+
"//executorch/devtools:lib",
22+
"//executorch/examples/models:models",
23+
"//executorch/examples/qualcomm:utils",
24+
"//executorch/extension/export_util:export_util",
25+
"//executorch/extension/llm/export:export_lib",
26+
],
27+
)
28+
29+
runtime.command_alias(
30+
name = "llama_qnn",
31+
env = {
32+
"LD_LIBRARY_PATH": "$(location fbsource//third-party/qualcomm/qnn/qnn-{0}:qnn_offline_compile_libs)".format(get_qnn_library_verision()),
33+
},
34+
exe = ":llama",
35+
)
Lines changed: 41 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,44 @@
1-
# Any targets that should be shared between fbcode and xplat must be defined in
2-
# targets.bzl. This file can contain xplat-only targets.
3-
4-
load(":targets.bzl", "define_common_targets")
1+
load("@fbcode_macros//build_defs:python_library.bzl", "python_library")
2+
load("@fbcode_macros//build_defs:python_unittest.bzl", "python_unittest")
3+
load("@fbsource//xplat/executorch/backends/qualcomm/qnn_version.bzl", "get_qnn_library_verision")
4+
load("@fbcode_macros//build_defs:python_binary.bzl", "python_binary")
5+
load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime")
56

67
oncall("executorch")
78

8-
define_common_targets()
9+
10+
python_library(
11+
name = "static_llama",
12+
srcs = [
13+
"model/static_llama.py",
14+
],
15+
deps = [
16+
"//caffe2:torch",
17+
],
18+
)
19+
20+
python_binary(
21+
name = "llama",
22+
srcs = ["llama.py"],
23+
main_function = "executorch.examples.qualcomm.oss_scripts.llama2.llama.main",
24+
deps = [
25+
":static_llama",
26+
"//caffe2:torch",
27+
"//executorch/extension/pybindings:aten_lib",
28+
"//executorch/backends/qualcomm/partition:partition",
29+
"//executorch/backends/qualcomm/quantizer:quantizer",
30+
"//executorch/devtools:lib",
31+
"//executorch/examples/models:models",
32+
"//executorch/examples/qualcomm:utils",
33+
"//executorch/extension/export_util:export_util",
34+
"//executorch/extension/llm/export:export_lib",
35+
],
36+
)
37+
38+
runtime.command_alias(
39+
name = "llama_qnn",
40+
env = {
41+
"LD_LIBRARY_PATH": "$(location fbsource//third-party/qualcomm/qnn/qnn-{0}:qnn_offline_compile_libs)".format(get_qnn_library_verision()),
42+
},
43+
exe = ":llama",
44+
)

0 commit comments

Comments
 (0)