Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fixed bugs, changed backend port #272

Merged
merged 3 commits into from
Jun 25, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion application/.env_sample
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ EMBEDDINGS_KEY=your_api_key
CELERY_BROKER_URL=redis://localhost:6379/0
CELERY_RESULT_BACKEND=redis://localhost:6379/1
MONGO_URI=mongodb://localhost:27017/docsgpt
API_URL=http://localhost:5001
API_URL=http://localhost:7091

#For OPENAI on Azure
OPENAI_API_BASE=
Expand Down
4 changes: 2 additions & 2 deletions application/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,6 @@ COPY . /app
ENV FLASK_APP=app.py
ENV FLASK_DEBUG=true

EXPOSE 5001
EXPOSE 7091

CMD ["gunicorn", "-w", "2", "--timeout", "120", "--bind", "0.0.0.0:5001", "wsgi:app"]
CMD ["gunicorn", "-w", "2", "--timeout", "120", "--bind", "0.0.0.0:7091", "wsgi:app"]
25 changes: 15 additions & 10 deletions application/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,10 @@
# os.environ["LANGCHAIN_HANDLER"] = "langchain"

logger = logging.getLogger(__name__)
if settings.LLM_NAME == "gpt4":
gpt_model = 'gpt-4'
else:
gpt_model = 'gpt-3.5-turbo'

if settings.LLM_NAME == "manifest":
from manifest import Manifest
Expand Down Expand Up @@ -195,7 +199,7 @@ def complete_stream(question, docsearch, chat_history, api_key):
messages_combine.append({"role": "user", "content": i["prompt"]})
messages_combine.append({"role": "system", "content": i["response"]})
messages_combine.append({"role": "user", "content": question})
completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", engine=settings.AZURE_DEPLOYMENT_NAME,
completion = openai.ChatCompletion.create(model=gpt_model, engine=settings.AZURE_DEPLOYMENT_NAME,
messages=messages_combine, stream=True, max_tokens=500, temperature=0)

for line in completion:
Expand All @@ -208,26 +212,27 @@ def complete_stream(question, docsearch, chat_history, api_key):
yield f"data: {data}\n\n"


@app.route("/stream", methods=["POST", "GET"])
@app.route("/stream", methods=["POST"])
def stream():
data = request.get_json()
# get parameter from url question
question = request.args.get("question")
history = request.args.get("history")
question = data["question"]
history = data["history"]
# history to json object from string
history = json.loads(history)

# check if active_docs is set

if not api_key_set:
api_key = request.args.get("api_key")
api_key = data["api_key"]
else:
api_key = settings.API_KEY
if not embeddings_key_set:
embeddings_key = request.args.get("embeddings_key")
embeddings_key = data["embeddings_key"]
else:
embeddings_key = settings.EMBEDDINGS_KEY
if "active_docs" in request.args:
vectorstore = get_vectorstore({"active_docs": request.args.get("active_docs")})
if "active_docs" in data:
vectorstore = get_vectorstore({"active_docs": data["active_docs"]})
else:
vectorstore = ""
docsearch = get_docsearch(vectorstore, embeddings_key)
Expand Down Expand Up @@ -279,7 +284,7 @@ def api_answer():
)
else:
logger.debug("plain OpenAI")
llm = ChatOpenAI(openai_api_key=api_key) # optional parameter: model_name="gpt-4"
llm = ChatOpenAI(openai_api_key=api_key, model_name=gpt_model) # optional parameter: model_name="gpt-4"
messages_combine = [SystemMessagePromptTemplate.from_template(chat_combine_template)]
if history:
tokens_current_history = 0
Expand Down Expand Up @@ -597,4 +602,4 @@ def after_request(response):


if __name__ == "__main__":
app.run(debug=True, port=5001)
app.run(debug=True, port=7091)
2 changes: 1 addition & 1 deletion application/core/settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ class Settings(BaseSettings):
MODEL_PATH: str = "./models/gpt4all-model.bin"
TOKENS_MAX_HISTORY: int = 150

API_URL: str = "http://localhost:5001" # backend url for celery worker
API_URL: str = "http://localhost:7091" # backend url for celery worker

API_KEY: str = None # LLM api key
EMBEDDINGS_KEY: str = None # api key for embeddings (if using openai, just copy API_KEY
Expand Down
2 changes: 1 addition & 1 deletion application/wsgi.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from app import app

if __name__ == "__main__":
app.run(debug=True, port=5001)
app.run(debug=True, port=7091)
6 changes: 3 additions & 3 deletions docker-compose-azure.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ services:
frontend:
build: ./frontend
environment:
- VITE_API_HOST=http://localhost:5001
- VITE_API_HOST=http://localhost:7091
- VITE_API_STREAMING=$VITE_API_STREAMING
ports:
- "5173:5173"
Expand All @@ -25,7 +25,7 @@ services:
- AZURE_DEPLOYMENT_NAME=$AZURE_DEPLOYMENT_NAME
- AZURE_EMBEDDINGS_DEPLOYMENT_NAME=$AZURE_EMBEDDINGS_DEPLOYMENT_NAME
ports:
- "5001:5001"
- "7091:7091"
volumes:
- ./application/indexes:/app/indexes
- ./application/inputs:/app/inputs
Expand All @@ -43,7 +43,7 @@ services:
- CELERY_BROKER_URL=redis://redis:6379/0
- CELERY_RESULT_BACKEND=redis://redis:6379/1
- MONGO_URI=mongodb://mongo:27017/docsgpt
- API_URL=http://backend:5001
- API_URL=http://backend:7091
- OPENAI_API_KEY=$OPENAI_API_KEY
- OPENAI_API_BASE=$OPENAI_API_BASE
- OPENAI_API_VERSION=$OPENAI_API_VERSION
Expand Down
18 changes: 8 additions & 10 deletions docker-compose.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ services:
frontend:
build: ./frontend
environment:
- VITE_API_HOST=http://localhost:5001
- VITE_API_HOST=http://localhost:7091
- VITE_API_STREAMING=$VITE_API_STREAMING
ports:
- "5173:5173"
Expand All @@ -20,14 +20,14 @@ services:
- CELERY_RESULT_BACKEND=redis://redis:6379/1
- MONGO_URI=mongodb://mongo:27017/docsgpt
ports:
- "5001:5001"
- "7091:7091"
volumes:
- ./application/indexes:/app/indexes
- ./application/inputs:/app/inputs
- ./application/vectors:/app/vectors
depends_on:
- redis
- mongo
- redis
- mongo

worker:
build: ./application
Expand All @@ -38,10 +38,10 @@ services:
- CELERY_BROKER_URL=redis://redis:6379/0
- CELERY_RESULT_BACKEND=redis://redis:6379/1
- MONGO_URI=mongodb://mongo:27017/docsgpt
- API_URL=http://backend:5001
- API_URL=http://backend:7091
depends_on:
- redis
- mongo
- redis
- mongo

redis:
image: redis:6-alpine
Expand All @@ -55,7 +55,5 @@ services:
volumes:
- mongodb_data_container:/data/db



volumes:
mongodb_data_container:
mongodb_data_container:
2 changes: 1 addition & 1 deletion extensions/chrome/popup.js
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ document.getElementById("message-form").addEventListener("submit", function(even
}

// send post request to server http://127.0.0.1:5000/ with message in json body
fetch('http://127.0.0.1:5001/api/answer', {
fetch('http://127.0.0.1:7091/api/answer', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
Expand Down
2 changes: 1 addition & 1 deletion extensions/discord/bot.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
# Replace 'YOUR_BOT_TOKEN' with your bot's token
TOKEN = os.getenv("DISCORD_TOKEN")
PREFIX = '@DocsGPT'
BASE_API_URL = 'http://localhost:5001'
BASE_API_URL = 'http://localhost:7091'

intents = discord.Intents.default()
intents.message_content = True
Expand Down
2 changes: 1 addition & 1 deletion extensions/web-widget/src/js/script.js
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
const API_ENDPOINT = "http://localhost:5001/api/answer"; // Replace with your API endpoint
const API_ENDPOINT = "http://localhost:7091/api/answer"; // Replace with your API endpoint

const widgetInitMessage = document.getElementById("docsgpt-init-message");
const widgetAnswerMessage = document.getElementById("docsgpt-answer");
Expand Down
2 changes: 1 addition & 1 deletion frontend/.env.development
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
# Please put appropriate value
VITE_API_HOST=http://localhost:5001
VITE_API_HOST=http://localhost:7091
68 changes: 55 additions & 13 deletions frontend/src/conversation/conversationApi.ts
Original file line number Diff line number Diff line change
Expand Up @@ -91,22 +91,64 @@ export function fetchAnswerSteaming(
});

return new Promise<Answer>((resolve, reject) => {
const url = new URL(apiHost + '/stream');
url.searchParams.append('question', question);
url.searchParams.append('api_key', apiKey);
url.searchParams.append('embeddings_key', apiKey);
url.searchParams.append('active_docs', docPath);
url.searchParams.append('history', JSON.stringify(history));
const body = {
question: question,
api_key: apiKey,
embeddings_key: apiKey,
active_docs: docPath,
history: JSON.stringify(history),
};

fetch(apiHost + '/stream', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify(body),
})
.then((response) => {
if (!response.body) throw Error("No response body");

const reader = response.body.getReader();
const decoder = new TextDecoder('utf-8');
var counterrr = 0
const processStream = ({ done, value }: ReadableStreamReadResult<Uint8Array>) => {
if (done) {
console.log(counterrr);
return;
}

const eventSource = new EventSource(url.href);
counterrr += 1;

const chunk = decoder.decode(value);

eventSource.onmessage = onEvent;
const lines = chunk.split("\n");

eventSource.onerror = (error) => {
console.log('Connection failed.');
eventSource.close();
};
});
for (let line of lines) {
if (line.trim() == "") {
continue;
}
if (line.startsWith('data:')) {
line = line.substring(5);
}

const messageEvent: MessageEvent = new MessageEvent("message", {
data: line,
});

onEvent(messageEvent); // handle each message
}

reader.read().then(processStream).catch(reject);
}

reader.read().then(processStream).catch(reject);
})
.catch((error) => {
console.error('Connection failed:', error);
reject(error);
});
});
}

export function sendFeedback(
Expand Down
8 changes: 6 additions & 2 deletions scripts/parser/file/markdown_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,8 +119,12 @@ def parse_tups(
self, filepath: Path, errors: str = "ignore"
) -> List[Tuple[Optional[str], str]]:
"""Parse file into tuples."""
with open(filepath, "r") as f:
content = f.read()
with open(filepath, "r", encoding='utf8') as f:
try:
content = f.read()
except (Exception,) as e:
print(f'Error a file: "{filepath}"')
raise e
if self._remove_hyperlinks:
content = self.remove_hyperlinks(content)
if self._remove_images:
Expand Down
6 changes: 3 additions & 3 deletions setup.sh
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ docker run -d --name redis -p 6379:6379 redis:6-alpine
docker run -d --name mongo -p 27017:27017 -v mongodb_data_container:/data/db mongo:6

# Run backend and worker services
docker run -d --name backend -p 5001:5001 \
docker run -d --name backend -p 7091:7091 \
--link redis:redis --link mongo:mongo \
-v $(pwd)/application/indexes:/app/indexes \
-v $(pwd)/application/inputs:/app/inputs \
Expand All @@ -34,12 +34,12 @@ docker run -d --name worker \
-e CELERY_BROKER_URL=redis://redis:6379/0 \
-e CELERY_RESULT_BACKEND=redis://redis:6379/1 \
-e MONGO_URI=mongodb://mongo:27017/docsgpt \
-e API_URL=http://backend:5001 \
-e API_URL=http://backend:7091 \
backend_image \
celery -A app.celery worker -l INFO

# Run frontend service
docker run -d --name frontend -p 5173:5173 \
-e VITE_API_HOST=http://localhost:5001 \
-e VITE_API_HOST=http://localhost:7091 \
frontend_image