-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathprompts.py
More file actions
184 lines (131 loc) · 7.45 KB
/
prompts.py
File metadata and controls
184 lines (131 loc) · 7.45 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
from langchain.prompts import PromptTemplate
def mistral_prompt_qa() -> PromptTemplate:
"""
Returns a prompt for mistral instruct version for RAG Q&A.
"""
prompt = """[INST]
Answer the following question only based on the provided context:
<context>
{context}
</context>
## Question: {question}
## Anwer:
[/INST] """
return PromptTemplate(template=prompt, input_variables=["context", "question"])
def llama3_prompt_qa() -> PromptTemplate:
"""
Returns a prompt for llama3 instruct version for RAG Q&A.
"""
prompt = """<|begin_of_text|><|start_header_id|>system<|end_header_id|>
Answer the following question only based on the provided context:
<|eot_id|><|start_header_id|>user<|end_header_id|>
## Context: {context}
## Question: {question}
## Anwer:
<|eot_id|><|start_header_id|>assistant<|end_header_id|>"""
return PromptTemplate(template=prompt, input_variables=["context", "question"])
def llama_chat_prompt_qa() -> PromptTemplate:
"""
Returns a prompt for llama chat version for RAG Q&A.
"""
prompt = """Answer the following question only based on the provided context:
## Context: {context}
## Question: {question}
## Anwer: """
return PromptTemplate(template=prompt, input_variables=["context", "question"])
def load_prompt(model_name: str) -> PromptTemplate:
"""
Returns the correct prompt for the model name.
"""
# todo add llama 2 prompt and mixtral prompt
if model_name.lower().find("mistral") >=0 or model_name.lower().find("mixtral") >=0:
return mistral_prompt_qa()
elif model_name.lower().find("llama-3") >=0 :
return llama3_prompt_qa()
elif model_name.lower().find("llama") >=0 and model_name.lower().find("chat") >=0:
return llama_chat_prompt_qa()
else:
raise ValueError("Model name not supported")
def improved_judge_prompt_llama3() -> PromptTemplate:
"""
Returns a prompt for the improved judge version for RAG Q&A.
Propmt taken from: https://huggingface.co/learn/cookbook/en/llm_judge
"""
prompt = """<|begin_of_text|><|start_header_id|>system<|end_header_id|>
You will be given a user_question and system_answer couple.
Your task is to provide a 'total rating' scoring how well the system_answer answers the user concerns expressed in the user_question.
Give your answer on a scale of 1 to 4, where 1 means that the system_answer is not helpful at all, and 4 means that the system_answer completely and helpfully addresses the user_question.
Here is the scale you should use to build your answer:
1: The system_answer is terrible: completely irrelevant to the question asked, or very partial
2: The system_answer is mostly not helpful: misses some key aspects of the question
3: The system_answer is mostly helpful: provides support, but still could be improved
4: The system_answer is excellent: relevant, direct, detailed, and addresses all the concerns raised in the question
Provide your feedback as follows:
Total rating: (your rating, as a number between 1 and 4)
You MUST provide values for 'Evaluation:' and 'Total rating:' in your answer.
Now here are the question and answer.
<|eot_id|><|start_header_id|>user<|end_header_id|>
Question: {question}
Answer: {answer}
If you give a correct rating, I'll give you 100 H100 GPUs to start your AI company.
Total rating:
<|eot_id|><|start_header_id|>assistant<|end_header_id|>"""
return PromptTemplate(template=prompt, input_variables=["answer", "question"])
def improved_judge_prompt_llama_chat() -> PromptTemplate:
"""
Returns a prompt for the improved judge version for RAG Q&A.
Propmt taken from: https://huggingface.co/learn/cookbook/en/llm_judge
"""
prompt = """
You will be given a user_question and system_answer couple.
Your task is to provide a 'total rating' scoring how well the system_answer answers the user concerns expressed in the user_question.
Give your answer on a scale of 1 to 4, where 1 means that the system_answer is not helpful at all, and 4 means that the system_answer completely and helpfully addresses the user_question.
Here is the scale you should use to build your answer:
1: The system_answer is terrible: completely irrelevant to the question asked, or very partial
2: The system_answer is mostly not helpful: misses some key aspects of the question
3: The system_answer is mostly helpful: provides support, but still could be improved
4: The system_answer is excellent: relevant, direct, detailed, and addresses all the concerns raised in the question
Provide your feedback as follows:
Total rating: (your rating, as a number between 1 and 4)
You MUST provide values for 'Evaluation:' and 'Total rating:' in your answer.
Now here are the question and answer.
Question: {question}
Answer: {answer}
If you give a correct rating, I'll give you 100 H100 GPUs to start your AI company.
Total rating: """
return PromptTemplate(template=prompt, input_variables=["answer", "question"])
def improved_judge_prompt_llama_chat_mistral() -> PromptTemplate:
"""
Returns a prompt for the improved judge version for RAG Q&A.
Propmt taken from: https://huggingface.co/learn/cookbook/en/llm_judge
"""
prompt = """[INST]
You will be given a user_question and system_answer couple.
Your task is to provide a 'total rating' scoring how well the system_answer answers the user concerns expressed in the user_question.
Give your answer on a scale of 1 to 4, where 1 means that the system_answer is not helpful at all, and 4 means that the system_answer completely and helpfully addresses the user_question.
Here is the scale you should use to build your answer:
1: The system_answer is terrible: completely irrelevant to the question asked, or very partial
2: The system_answer is mostly not helpful: misses some key aspects of the question
3: The system_answer is mostly helpful: provides support, but still could be improved
4: The system_answer is excellent: relevant, direct, detailed, and addresses all the concerns raised in the question
Provide your feedback as follows:
Total rating: (your rating, as a number between 1 and 4)
You MUST provide values for 'Evaluation:' and 'Total rating:' in your answer.
Now here are the question and answer.
Question: {question}
Answer: {answer}
If you give a correct rating, I'll give you 100 H100 GPUs to start your AI company.
Total rating: [/INST]"""
return PromptTemplate(template=prompt, input_variables=["answer", "question"])
def improved_judge_prompt(model_name:str) -> PromptTemplate:
"""
Returns the correct prompt for the model name.
"""
if model_name.lower().find("llama-3") >=0 :
return improved_judge_prompt_llama3()
elif model_name.lower().find("llama") >=0 and model_name.lower().find("chat") >=0:
return improved_judge_prompt_llama_chat()
elif model_name.lower().find("mistral") >=0 or model_name.lower().find("mixtral") >=0:
return improved_judge_prompt_llama_chat_mistral()
else:
raise ValueError("Model name not supported")