-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathutils.py
235 lines (215 loc) · 9.06 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
from langchain.agents import AgentExecutor
from langchain.agents.format_scratchpad.openai_tools import format_to_openai_tool_messages
from langchain.agents.output_parsers.openai_tools import OpenAIToolsAgentOutputParser
from langchain_core.tools import BaseTool
from langchain_openai import ChatOpenAI
from openai import OpenAI
import json
import os
from langchain_community.adapters.openai import convert_dict_to_message
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from langchain.schema import BaseOutputParser
from langchain.chains import LLMChain
import logging
from pathlib import Path
from pydantic import BaseModel, model_validator, root_validator, validator
import os
import logging
from pathlib import Path
from dotenv import load_dotenv
from langchain_community.adapters.openai import convert_dict_to_message
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
import json
import logging
from pathlib import Path
from typing import (
Any,
Union,
)
from langchain_core.output_parsers import BaseOutputParser
from langchain.chains.llm import LLMChain
from global_var import *
from global_var import Any
logger = logging.getLogger(Path(__file__).stem)
def chater(query:str, history, stream=False, api_key:str|None=None):
messages = history + [{"role": "user", "content": f"{query}"}]
messages = [convert_dict_to_message(m) for m in messages]
llm = ChatOpenAI(model='gpt-3.5-turbo-0125',streaming=stream, api_key=api_key)#type: ignore
for chunk in llm.stream(messages):
yield chunk.content
# define output_parser
class ModuleOutputParser(BaseOutputParser):
def get_format_instructions(self) -> str:
output_format = [
{
"name" : 'module\'s name',
"function" : 'module\'s function name',
"todo": 'Assuming you are the user, write a query telling the model what you want to do with this function'
}
]
return json.dumps(output_format)
def parse(self, text: str) -> list:
return json.loads(text)
def task_decider(user_input:str, module_descriptions):
# define chat prompt
system_template = "You are a helpful assistant that can choose which module to execute for the user's input.\
The modules' information and function is in json format as below:\n{module_descriptions}."
system_message_prompt = SystemMessagePromptTemplate.from_template(system_template)
human_template = "You should generate the modules' name, function to execute and the function's target based on the user's input.\
The user's input is as below:\n{text}\n \
The output should be formatted as json. The format requirement is as below:\n{output_format}\n\
Attention: The numer of the modules can be more than 1."
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
# define output parser
output_parser = ModuleOutputParser()
chat_prompt = ChatPromptTemplate(
messages=[system_message_prompt, human_message_prompt],
input_variables=["text"],
partial_variables={
"module_descriptions": str(module_descriptions),
"output_format": output_parser.get_format_instructions()
}
)
chain = LLMChain(
llm=ChatOpenAI(temperature=0),
prompt=chat_prompt,
output_parser=output_parser
)
response = chain.invoke({"text": user_input})
logger.info(response['text'])
return response['text']
def result_parser(raw_exe_result, exe_module:str, query:str|None=None, stream=False, api_key:str|None=None):
'''
Parse execution result of execution to the user.
'''
system_msg = 'You are a useful assistant that can summary, induction, extract information.'
messages = []
if exe_module == 'websearch':
if query is None:
raise Exception('query is None and exe_module is websearch!')
user_msg = f"Reply to the user's input according to the information that is in JSON array format and contained some paper's metadata. You should provide reference to the papers you mentioned. Answer the question directly without using any expression that is similar to \"According to the JSON array\" or \"In the given JSON array\" in your reply.\nJSON array:\n{raw_exe_result}\n\nuser's input:\n{query}"
messages = [
{"role": "system", "content": system_msg},
{"role": "user", "content": user_msg}
]
elif exe_module == 'retrieve':
raise Exception('retrieve does not need a result_parser')
elif exe_module == 'exception':
user_msg = f"Reply to the user's input according to the information. Answer directly without using any expression such as \"accoring to the information\" or \"in the information\".\ninformation:\n{raw_exe_result}\nuser\'s input:\n{query}"
messages = [
{"role": "system", "content": system_msg},
{"role": "user", "content": user_msg}
]
messages = [convert_dict_to_message(m) for m in messages]
llm = ChatOpenAI(model='gpt-3.5-turbo-0125',streaming=stream, api_key=api_key)#type: ignore
for chunk in llm.stream(messages):
yield chunk.content
def judger(history, question):
client = OpenAI()
messages = history + [
{"role": "user", "content": f"Make a \"True\" or \"False\" decision about this question based on historical information:{question}\n Answer the question simply by \"True\" or \"False\""}
]
response = client.chat.completions.create(
model="gpt-3.5-turbo",
temperature = 0,
messages=messages
)
if response.choices[0].message.content not in ("True", "False"):
raise Exception("judger: response not in (\"True\", \"False\")")
return response.choices[0].message.content
def fn_args_generator(query:str, functions, history = []):
client = OpenAI()
messages = history + [{"role": "user", "content": f"{query}"}]
response = client.chat.completions.create(
model="gpt-3.5-turbo-0613",
temperature = 0,
messages=messages,
functions=functions,
function_call="auto",
)
response_message = response.choices[0].message
logger.debug(response_message)
if response_message.function_call:
function_args = json.loads(response_message.function_call.arguments)
return function_args
else:
raise Exception("Not receive function call")
def translator(src:str):
client = OpenAI()
prompt = f"Please translate this sentence into English: {src}"
messages = [{"role": "user", "content": prompt}]
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=messages, # type: ignore
temperature=0,
)
return response.choices[0].message.content
def auto_extractor(query, history = []):
client = OpenAI()
prompt = """
Extract keywords from the query:
[query]: {}
The output should be formated as below:
keyword1,keyword2,...
""".format(query)
messages = history + [{"role": "user", "content": prompt}]
response = client.chat.completions.create(
model="gpt-3.5-turbo",
temperature = 0,
messages=messages
)
keywords = response.choices[0].message.content
if keywords:
keywords = keywords.split(',')
keywords = [keyword.strip() for keyword in keywords]
else:
raise Exception('response.choices[0].message.content is None')
return keywords
if __name__ == '__main__':
from dotenv import load_dotenv
import openai
load_dotenv()
openai.api_key = os.getenv('OPENAI_API_KEY')
print(auto_extractor("What are the two components with extreme distributions that RepQ-ViT focuses on?"))
def load_zhipuai_agent_excutor(
tools_inst: list[BaseTool], model_kwargs:dict[str,Any]
):
model = model_kwargs.pop('model')
temperature = model_kwargs.pop('temperature')
api_key = model_kwargs.pop('api_key')
base_url = model_kwargs.pop('base_url')
llm = ChatOpenAI(model=model, temperature=temperature,api_key=api_key,base_url=base_url, model_kwargs=model_kwargs)
if len(tools_inst) == 0:
llm_with_tools = llm
else:
llm_with_tools = llm.bind_tools(tools_inst)
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You are a helpful assistant.",
),
("user", "{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad"),
]
)
agent = (
{
"input": lambda x: x["input"],
"agent_scratchpad": lambda x: format_to_openai_tool_messages(
x["intermediate_steps"]
),
}
| prompt
| llm_with_tools
| OpenAIToolsAgentOutputParser()
)
agent_executor = AgentExecutor(
agent=agent, tools=tools_inst, handle_parsing_errors=True # type: ignore
)
return agent_executor