From 1879267ac90550ee26ed7aab891559788c77079e Mon Sep 17 00:00:00 2001 From: Bruce MacDonald Date: Tue, 27 Jun 2023 14:07:30 -0400 Subject: [PATCH] move model prompts to python dict --- model_prompts.json | 12 ------------ template.py | 15 ++++++++++++--- 2 files changed, 12 insertions(+), 15 deletions(-) delete mode 100644 model_prompts.json diff --git a/model_prompts.json b/model_prompts.json deleted file mode 100644 index 3d1e7287..00000000 --- a/model_prompts.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "alpaca": "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n### Instruction:\n{prompt}\n\n### Response:\n\n", - "ggml": "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.\n\n### Human: Hello, Assistant.\n### Assistant: Hello. How may I help you today?\n### Human: ${prompt}", - "gpt4": "### Instruction:\n{prompt}\n\n### Response:\n", - "hermes": "### Instruction:\n{prompt}\n\n### Response:\n", - "oasst": "{prompt}", - "orca": "### System:\nYou are an AI assistant that follows instruction extremely well. Help as much as you can.\n\n### User:\n{prompt}\n\n### Response:", - "qlora": "### Human: {prompt}\n### Assistant:", - "tulu": "\n{prompt}\n\n(include newline)", - "vicuna": "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.\n\nUSER: {prompt}\nASSISTANT:", - "wizardlm": "{prompt}\n\n### Response:" -} \ No newline at end of file diff --git a/template.py b/template.py index e03c2feb..9c1ca307 100644 --- a/template.py +++ b/template.py @@ -1,8 +1,17 @@ from difflib import SequenceMatcher -import json -with open("./model_prompts.json", "r") as f: - model_prompts = json.load(f) +model_prompts = { + "alpaca": "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n### Instruction:\n{prompt}\n\n### Response:\n\n", + "ggml": "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.\n\n### Human: Hello, Assistant.\n### Assistant: Hello. How may I help you today?\n### Human: ${prompt}", + "gpt4": "### Instruction:\n{prompt}\n\n### Response:\n", + "hermes": "### Instruction:\n{prompt}\n\n### Response:\n", + "oasst": "{prompt}", + "orca": "### System:\nYou are an AI assistant that follows instruction extremely well. Help as much as you can.\n\n### User:\n{prompt}\n\n### Response:", + "qlora": "### Human: {prompt}\n### Assistant:", + "tulu": "\n{prompt}\n\n(include newline)", + "vicuna": "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.\n\nUSER: {prompt}\nASSISTANT:", + "wizardlm": "{prompt}\n\n### Response:", +} def template(model, prompt):