diff --git a/tools/flaskIfc/flaskIfc.py b/tools/flaskIfc/flaskIfc.py
index 61187c91a09d4..4d65c9a7ffa0e 100644
--- a/tools/flaskIfc/flaskIfc.py
+++ b/tools/flaskIfc/flaskIfc.py
@@ -1,32 +1,60 @@
-from flask import Flask, request
+from flask import Flask, render_template, request
import subprocess
app = Flask(__name__)
-@app.route('/serial', methods=['GET'])
-def serial_command():
+@app.route('/')
+def index():
+ return render_template('index.html')
+
+@app.route('/submit', methods=['POST'])
+def submit():
+ #./run_platform_test.sh "my cat's name" "10" "tinyllama-vo-5m-para.gguf" "none"
+ model = request.form.get('model')
+ backend = request.form.get('backend')
+ tokens = request.form.get('tokens')
+ prompt = request.form.get('prompt')
+
+ # Define the model path (update with actual paths)
+ model_paths = {
+ "tiny-llama": "tinyllama-vo-5m-para.gguf",
+ "Tiny-llama-F32": "Tiny-Llama-v0.3-FP32-1.1B-F32.gguf"
+ }
+
+ model_path = model_paths.get(model, "")
+ if not model_path:
+ return f"
Error: Model path not found for '{model}'
"
+
+ # Below is for reference i will remove later
+ # Build llama-cli command
+ #command = [
+ # "./llama-cli",
+ # "-p", prompt,
+ # "-m", model_path,
+ # "--device", backend,
+ # "--temp", "0",
+ # "--n-predict", tokens,
+ # "--repeat-penalty", "1",
+ # "--top-k", "0",
+ # "--top-p", "1"
+ #]
# Currently the port is hard coded to /dev/ttyUSB3 but can be parameterized
port = '/dev/ttyUSB3'
- #port = request.args.get('port')
# Currently the baudrate is hard coded to 921600 but can be parameterized
- #baudrate = request.args.get('baudrate')
baudrate = '921600'
+ script_path = "/usr/bin/tsi/v0.1.1.tsv31_06_06_2025/bin/run_platform_test.sh"
+ command = f"{script_path} \"{prompt}\" {tokens} {model_path} {backend}"
- # Parse the command and send it to serial.py
- command = request.args.get('command')
-
- #if not all([port, baudrate, command]):
- if not all([command]):
- return "Missing parameters", 400
try:
result = subprocess.run(['python3', 'serial_script.py', port, baudrate, command], capture_output=True, text=True, check=True)
- return result.stdout.strip(), 200
+ output = result.stdout # This should have \n
except subprocess.CalledProcessError as e:
- return f"Error executing script: {e.stderr}", 500
+ output = f"Error running model: {e.stderr}"
+ return render_template('result.html', output=output)
if __name__ == '__main__':
app.run(debug=True, port=5000)
diff --git a/tools/flaskIfc/serial_script.py b/tools/flaskIfc/serial_script.py
index e138d19ab7de0..0e1064225921f 100644
--- a/tools/flaskIfc/serial_script.py
+++ b/tools/flaskIfc/serial_script.py
@@ -10,13 +10,12 @@ def send_serial_command(port, baudrate, command):
ser.write('\n'.encode()) # Encode command to bytes
# Wait to read the serial port
- # Need to add a break somewhere for when we see the phrase "root@name"
data = '\0'
while True:
try:
line = ser.readline()
if line: # Check if line is not empty
- data += (line.decode('utf-8').strip()) # Decode and strip to remove extra chars
+ data += line.decode('utf-8') # Keep the line as-is with newline
else:
break # Exit loop if no data is received
except serial.SerialException as e:
@@ -26,6 +25,7 @@ def send_serial_command(port, baudrate, command):
ser.close()
return ("Program interrupted by user")
ser.close()
+ print (data)
return data
except serial.SerialException as e:
@@ -42,4 +42,3 @@ def send_serial_command(port, baudrate, command):
baudrate = int(sys.argv[2])
command = sys.argv[3]
response = send_serial_command(port, baudrate, command)
- print(response)
diff --git a/tools/flaskIfc/templates/index.html b/tools/flaskIfc/templates/index.html
new file mode 100644
index 0000000000000..9152167a86c44
--- /dev/null
+++ b/tools/flaskIfc/templates/index.html
@@ -0,0 +1,38 @@
+
+
+
+ TSAVORITE Web UI For Model Inference
+
+
+ Model Inference Configuration
+
+
+
diff --git a/tools/flaskIfc/templates/result.html b/tools/flaskIfc/templates/result.html
new file mode 100644
index 0000000000000..07c79c409f596
--- /dev/null
+++ b/tools/flaskIfc/templates/result.html
@@ -0,0 +1,12 @@
+
+
+
+ Model Output
+
+
+ Model Response
+ {{ output }}
+
+ ⟵ Back to Form
+
+