From d34985b9df5e8ee7144ed8b475dfc167548e2eef Mon Sep 17 00:00:00 2001 From: Bruce MacDonald Date: Mon, 26 Jun 2023 13:41:16 -0400 Subject: [PATCH] add templates to prompt command --- .gitignore | 1 + proto.py | 2 ++ template.py | 57 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 60 insertions(+) create mode 100644 template.py diff --git a/.gitignore b/.gitignore index aee3f423..2aaa3fd7 100644 --- a/.gitignore +++ b/.gitignore @@ -5,3 +5,4 @@ *.spec build dist +__pycache__ diff --git a/proto.py b/proto.py index 00139973..f4070564 100644 --- a/proto.py +++ b/proto.py @@ -5,6 +5,7 @@ import click from llama_cpp import Llama from flask import Flask, Response, stream_with_context, request from flask_cors import CORS +from template import template app = Flask(__name__) CORS(app) # enable CORS for all routes @@ -124,6 +125,7 @@ def generate(model, prompt): if prompt == "": prompt = input("Prompt: ") output = "" + prompt = template(model, prompt) for generated in query(model, prompt): generated_json = json.loads(generated) text = generated_json["choices"][0]["text"] diff --git a/template.py b/template.py new file mode 100644 index 00000000..9077c99c --- /dev/null +++ b/template.py @@ -0,0 +1,57 @@ +from difflib import SequenceMatcher + +model_prompts = { + "alpaca": """Below is an instruction that describes a task. Write a response that appropriately completes the request. + +### Instruction: +{prompt} + +### Response: + +""", + "oasst": "<|prompter|>{prompt}<|endoftext|><|assistant|>", + "vicuna": """A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. + +USER: {prompt} +ASSISTANT:""", + "hermes": """### Instruction: +{prompt} + +### Response: +""", + "gpt4": """### Instruction: +{prompt} + +### Response: +""", + "qlora": """### Human: {prompt} +### Assistant:""", + "tulu": """<|user|> +{prompt} +<|assistant|> +(include newline)""", + "wizardlm-7b": """{prompt} + +### Response:""", + "wizardlm-13b": """{prompt} + +### Response:""", + "wizardlm-30b": """{prompt} + +### Response:""", +} + + +def template(model, prompt): + max_ratio = 0 + closest_key = "" + model_name = model.lower() + # Find the specialized prompt with the closest name match + for key in model_prompts.keys(): + ratio = SequenceMatcher(None, model_name, key).ratio() + if ratio > max_ratio: + max_ratio = ratio + closest_key = key + # Return the value of the closest match + p = model_prompts.get(closest_key) # .format(placeholder=prompt) + return p.format(prompt=prompt)