Skip to content

Commit

Permalink
chore: added a help message to makefile (#6861)
Browse files Browse the repository at this point in the history
* makefile with help

* format fix with black
  • Loading branch information
jjmachan committed Jul 12, 2023
1 parent 8a45f98 commit 9a48208
Show file tree
Hide file tree
Showing 6 changed files with 10 additions and 11 deletions.
15 changes: 6 additions & 9 deletions Makefile
Original file line number Diff line number Diff line change
@@ -1,18 +1,15 @@
.PHONY: format lint

GIT_ROOT ?= $(shell git rev-parse --show-toplevel)
help: ## Show all Makefile targets
@grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[33m%-30s\033[0m %s\n", $$1, $$2}'

format:
.PHONY: format lint
format: ## Run code formatter: black
black .

lint:
lint: ## Run linters: mypy, black, ruff
mypy .
black . --check
ruff check .

test:
test: ## Run tests
pytest tests

# Docs
watch-docs: ## Build and watch documentation
sphinx-autobuild docs/ docs/_build/html --open-browser --watch $(GIT_ROOT)/llama_index/
1 change: 1 addition & 0 deletions docs/examples/agent/openai_agent_query_cookbook.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -254,6 +254,7 @@
" ],\n",
")\n",
"\n",
"\n",
"# define pydantic model for auto-retrieval function\n",
"class AutoRetrieveModel(BaseModel):\n",
" query: str = Field(..., description=\"natural language query string\")\n",
Expand Down
2 changes: 1 addition & 1 deletion docs/examples/evaluation/TestNYC-Evaluation-Query.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -517,11 +517,11 @@
"source": [
"from typing import List\n",
"\n",
"\n",
"# define jupyter display function\n",
"def display_eval_sources(\n",
" query: str, response: Response, eval_result: List[str]\n",
") -> None:\n",
"\n",
" sources = [s.node.get_text() for s in response.source_nodes]\n",
" eval_df = pd.DataFrame(\n",
" {\n",
Expand Down
1 change: 1 addition & 0 deletions examples/async/AsyncQueryDemo.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -278,6 +278,7 @@
" response_mode=\"tree_summarize\",\n",
")\n",
"\n",
"\n",
"# run each query in parallel\n",
"async def async_query(query_engine, questions):\n",
" tasks = [query_engine.aquery(q) for q in questions]\n",
Expand Down
1 change: 1 addition & 0 deletions llama_index/llms/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ def __str__(self) -> str:
ChatResponseGen = Generator[ChatResponse, None, None]
ChatResponseAsyncGen = AsyncGenerator[ChatResponse, None]


# ===== Generic Model Output - Completion =====
class CompletionResponse(BaseModel):
"""Completion response."""
Expand Down
1 change: 0 additions & 1 deletion tests/llm_predictor/test_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@
from llama_index.prompts.prompts import Prompt, SimpleInputPrompt

try:

gptcache_installed = True
except ImportError:
gptcache_installed = False
Expand Down

0 comments on commit 9a48208

Please sign in to comment.