Skip to content

Commit 3311d9c

Browse files
cccclaifacebook-github-bot
authored andcommitted
Add buck file for static llama (#7276)
Summary: As title, moves the buck file related changes in D66107964 to here. Reviewed By: kirklandsign Differential Revision: D67057242
1 parent e89e320 commit 3311d9c

File tree

3 files changed

+95
-7
lines changed

3 files changed

+95
-7
lines changed

examples/qualcomm/TARGETS

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,12 @@
11
# Any targets that should be shared between fbcode and xplat must be defined in
22
# targets.bzl. This file can contain fbcode-only targets.
33

4-
load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime")
4+
load("@fbcode_macros//build_defs:python_library.bzl", "python_library")
5+
load("@fbcode_macros//build_defs:python_binary.bzl", "python_binary")
56

67
oncall("executorch")
78

8-
runtime.python_binary(
9+
python_binary(
910
name = "export_example",
1011
srcs = ["scripts/export_example.py"],
1112
main_function = ".scripts.export_example.main",
@@ -20,3 +21,12 @@ runtime.python_binary(
2021
"//executorch/extension/export_util:export_util",
2122
],
2223
)
24+
25+
python_library(
26+
name = "utils",
27+
srcs = ["utils.py"],
28+
deps = [
29+
"//executorch/backends/qualcomm/partition:partition",
30+
"//executorch/backends/qualcomm/quantizer:quantizer",
31+
],
32+
)
Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,43 @@
1+
load("@fbcode_macros//build_defs:python_library.bzl", "python_library")
2+
load("@fbsource//xplat/executorch/backends/qualcomm/qnn_version.bzl", "get_qnn_library_verision")
3+
load("@fbcode_macros//build_defs:python_binary.bzl", "python_binary")
4+
load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime")
5+
6+
oncall("executorch")
7+
8+
9+
python_library(
10+
name = "static_llama",
11+
srcs = [
12+
"model/static_llama.py",
13+
],
14+
deps = [
15+
"//caffe2:torch",
16+
],
17+
)
18+
19+
python_binary(
20+
name = "llama",
21+
srcs = ["llama.py"],
22+
main_function = "executorch.examples.qualcomm.oss_scripts.llama2.llama.main",
23+
deps = [
24+
":static_llama",
25+
"//caffe2:torch",
26+
"//executorch/extension/pybindings:aten_lib",
27+
"//executorch/backends/qualcomm/partition:partition",
28+
"//executorch/backends/qualcomm/quantizer:quantizer",
29+
"//executorch/devtools:lib",
30+
"//executorch/examples/models:models",
31+
"//executorch/examples/qualcomm:utils",
32+
"//executorch/extension/export_util:export_util",
33+
"//executorch/extension/llm/export:export_lib",
34+
],
35+
)
36+
37+
runtime.command_alias(
38+
name = "llama_qnn",
39+
env = {
40+
"LD_LIBRARY_PATH": "$(location fbsource//third-party/qualcomm/qnn/qnn-{0}:qnn_offline_compile_libs)".format(get_qnn_library_verision()),
41+
},
42+
exe = ":llama",
43+
)
Lines changed: 40 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,43 @@
1-
# Any targets that should be shared between fbcode and xplat must be defined in
2-
# targets.bzl. This file can contain xplat-only targets.
3-
4-
load(":targets.bzl", "define_common_targets")
1+
load("@fbcode_macros//build_defs:python_library.bzl", "python_library")
2+
load("@fbsource//xplat/executorch/backends/qualcomm/qnn_version.bzl", "get_qnn_library_verision")
3+
load("@fbcode_macros//build_defs:python_binary.bzl", "python_binary")
4+
load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime")
55

66
oncall("executorch")
77

8-
define_common_targets()
8+
9+
python_library(
10+
name = "static_llama",
11+
srcs = [
12+
"model/static_llama.py",
13+
],
14+
deps = [
15+
"//caffe2:torch",
16+
],
17+
)
18+
19+
python_binary(
20+
name = "llama",
21+
srcs = ["llama.py"],
22+
main_function = "executorch.examples.qualcomm.oss_scripts.llama2.llama.main",
23+
deps = [
24+
":static_llama",
25+
"//caffe2:torch",
26+
"//executorch/extension/pybindings:aten_lib",
27+
"//executorch/backends/qualcomm/partition:partition",
28+
"//executorch/backends/qualcomm/quantizer:quantizer",
29+
"//executorch/devtools:lib",
30+
"//executorch/examples/models:models",
31+
"//executorch/examples/qualcomm:utils",
32+
"//executorch/extension/export_util:export_util",
33+
"//executorch/extension/llm/export:export_lib",
34+
],
35+
)
36+
37+
runtime.command_alias(
38+
name = "llama_qnn",
39+
env = {
40+
"LD_LIBRARY_PATH": "$(location fbsource//third-party/qualcomm/qnn/qnn-{0}:qnn_offline_compile_libs)".format(get_qnn_library_verision()),
41+
},
42+
exe = ":llama",
43+
)

0 commit comments

Comments
 (0)