forked from protectai/llm-guard
-
Notifications
You must be signed in to change notification settings - Fork 0
/
setup.py
64 lines (54 loc) · 2 KB
/
setup.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
import os
import re
from typing import List
import setuptools
from setuptools import find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open("README.md", "r") as fh:
long_description = fh.read()
try:
filepath = "llm_guard/version.py"
with open(filepath) as version_file:
(__version__,) = re.findall('__version__ = "(.*)"', version_file.read())
except Exception as error:
assert False, "Error: Could not open '%s' due %s\n" % (filepath, error)
def parse_requirements(file_name: str) -> List[str]:
with open(file_name) as f:
return [require.strip() for require in f if require.strip() and not require.startswith("#")]
TESTS_REQUIRE = parse_requirements("requirements-dev.txt")
EXTRAS_REQUIRE = {
"onnxruntime": [
"onnx",
"onnxruntime>=1.11.0",
"optimum[onnxruntime]",
],
"tests": TESTS_REQUIRE,
}
setuptools.setup(
name="llm-guard",
version=__version__,
author="Laiyer.ai",
author_email="hello@laiyer.ai",
description=(
"LLM-Guard is a comprehensive tool designed to fortify the security of Large Language Models (LLMs). By "
"offering sanitization, detection of harmful language, prevention of data leakage, and resistance against "
"prompt injection attacks, LLM-Guard ensures that your interactions with LLMs remain safe and "
"secure."
),
license="MIT",
keywords="llm, language model, security, adversarial attacks, prompt injection, prompt leakage, PII detection, "
"self-hardening, firewall",
packages=find_packages(
include=["llm_guard", "llm_guard.*"],
exclude=["tests", "tests.*", "llm_guard_api", "llm_guard_api.*"],
),
install_requires=parse_requirements("requirements.txt"),
extras_require=EXTRAS_REQUIRE,
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/laiyer-ai/llm-guard",
package_data={
"llm_guard": ["**/*.json"],
},
python_requires=">=3.9",
)