Any idea if this is possible like chat GPT to feature to continue generating? For example:
import os
import time
import urllib.request
from llama_cpp import Llama
def download_file(file_link, filename):
# Checks if the file already exists before downloading
if not os.path.isfile(filename):
urllib.request.urlretrieve(file_link, filename)
print("File downloaded successfully.")
else:
print("File already exists.")
# Dowloading GGML model from HuggingFace
ggml_model_path = "https://huggingface.co/CRD716/ggml-vicuna-1.1-quantized/resolve/main/ggml-vicuna-7b-1.1-q4_1.bin"
filename = "ggml-vicuna-7b-1.1-q4_1.bin"
download_file(ggml_model_path, filename)
llm = Llama(model_path="ggml-vicuna-7b-1.1-q4_1.bin", n_ctx=512, n_batch=126)
def generate_text(
prompt="Who is the CEO of Apple?",
max_tokens=300,
temperature=0.1,
top_p=0.5,
echo=False,
stop=["#"],
):
start_time = time.time()
output = llm(
prompt,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
echo=echo,
stop=stop,
)
end_time = time.time()
time_taken = end_time - start_time
output_text = output["choices"][0]["text"].strip()
return output_text, time_taken
test, inference_time = generate_text(
"create a python script that uses the pandas inside a flask app where the route will return a value from a data science model make in sci kit learn."
)
minutes, seconds = divmod(inference_time, 60)
print(f"Model inference time: {int(minutes)}m {int(seconds)}s")
print(test)
File already exists.
AVX = 1 | AVX2 = 1 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | FMA = 1 | NEON = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 0 | SSE3 = 1 | VSX = 0 |
Model inference time: 0m 45s
I have created a simple script using Flask and Pandas to retrieve some data from an API, but I am having trouble figuring out how to use the data to create a prediction model using Scikit-learn. Here is my code:
```python
from flask import Flask, request
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
app = Flask(__name__)
@app.route('/predict')
def predict():
Any idea if this is possible like chat GPT to feature to continue generating? For example:
Get about this far: