ollama/proto.py

106 lines
2.6 KiB
Python
Raw Normal View History

2023-06-23 13:10:13 -04:00
import json
import os
2023-06-26 12:08:03 -04:00
import threading
2023-06-23 13:10:13 -04:00
from llama_cpp import Llama
from flask import Flask, Response, stream_with_context, request
2023-06-23 18:38:22 -04:00
from flask_cors import CORS
2023-06-23 13:10:13 -04:00
app = Flask(__name__)
CORS(app) # enable CORS for all routes
# llms tracks which models are loaded
llms = {}
2023-06-26 12:08:03 -04:00
lock = threading.Lock()
def load(model):
with lock:
if not os.path.exists(f"./models/{model}.bin"):
return {"error": "The model does not exist."}
if model not in llms:
llms[model] = Llama(model_path=f"./models/{model}.bin")
return None
def unload(model):
with lock:
if not os.path.exists(f"./models/{model}.bin"):
return {"error": "The model does not exist."}
llms.pop(model, None)
return None
def generate(model, prompt):
# auto load
error = load(model)
if error is not None:
return error
stream = llms[model](
str(prompt), # TODO: optimize prompt based on model
max_tokens=4096,
stop=["Q:", "\n"],
echo=True,
stream=True,
)
for output in stream:
yield json.dumps(output)
def models():
all_files = os.listdir("./models")
bin_files = [
file.replace(".bin", "") for file in all_files if file.endswith(".bin")
]
return bin_files
2023-06-23 13:10:13 -04:00
2023-06-23 14:47:57 -04:00
@app.route("/load", methods=["POST"])
2023-06-26 12:08:03 -04:00
def load_route_handler():
2023-06-23 14:47:57 -04:00
data = request.get_json()
model = data.get("model")
if not model:
return Response("Model is required", status=400)
2023-06-26 12:08:03 -04:00
error = load(model)
if error is not None:
return error
2023-06-23 14:47:57 -04:00
return Response(status=204)
@app.route("/unload", methods=["POST"])
2023-06-26 12:08:03 -04:00
def unload_route_handler():
2023-06-23 14:47:57 -04:00
data = request.get_json()
model = data.get("model")
if not model:
return Response("Model is required", status=400)
2023-06-26 12:08:03 -04:00
error = unload(model)
if error is not None:
return error
2023-06-23 14:47:57 -04:00
return Response(status=204)
2023-06-23 13:10:13 -04:00
@app.route("/generate", methods=["POST"])
2023-06-26 12:08:03 -04:00
def generate_route_handler():
2023-06-23 13:10:13 -04:00
data = request.get_json()
model = data.get("model")
prompt = data.get("prompt")
if not model:
return Response("Model is required", status=400)
if not prompt:
return Response("Prompt is required", status=400)
2023-06-25 14:18:48 -04:00
if not os.path.exists(f"./models/{model}.bin"):
2023-06-23 14:47:57 -04:00
return {"error": "The model does not exist."}, 400
2023-06-23 13:10:13 -04:00
return Response(
2023-06-26 12:08:03 -04:00
stream_with_context(generate(model, prompt)), mimetype="text/event-stream"
2023-06-23 13:10:13 -04:00
)
2023-06-26 12:08:03 -04:00
2023-06-25 14:22:07 -04:00
@app.route("/models", methods=["GET"])
2023-06-26 12:08:03 -04:00
def models_route_handler():
bin_files = models()
2023-06-25 14:22:07 -04:00
return Response(json.dumps(bin_files), mimetype="application/json")
2023-06-26 12:08:03 -04:00
2023-06-23 13:10:13 -04:00
if __name__ == "__main__":
2023-06-25 00:30:02 -04:00
app.run(debug=True, threaded=True, port=5001)
2023-06-25 14:18:48 -04:00
app.run()