Skip to content

Commit d31592b

Browse files
authored
Merge pull request #3 from dev-strender/apply-chatui
채팅과 유사한 UI적용, 약간의 css 추가
2 parents b29ec8e + c77fbc6 commit d31592b

File tree

3 files changed

+278
-15
lines changed

3 files changed

+278
-15
lines changed

app.py

+36-2
Original file line numberDiff line numberDiff line change
@@ -41,18 +41,52 @@ async def summary(request: Request):
4141
resp = es.gpt_answer(q, text_results=text_results)
4242
else:
4343
es_results = es.search(q)
44-
44+
4545
if es_results:
4646
# Generate summaries of the search results
4747
resp = es.gpt_answer(q, es_results=es_results)
4848
else:
4949
resp = es.gpt_answer(q, text_results="No results found")
50-
50+
51+
if resp.status_code != 200:
52+
raise HTTPException(resp.status_code, resp.text)
53+
54+
return StreamingResponse(resp.iter_content(1),
55+
media_type="text/event-stream")
56+
57+
## 추가를 해보았는데 막상 제대로 동작하지 않는 것 같아서 우선은 사용하지 않습니다.
58+
@app.post("/question-suggestion")
59+
async def question_suggestion(request: Request):
60+
payload = await request.json()
61+
text_results = payload.get("text_results", "")
62+
63+
if text_results is not None:
64+
# Generate further question by first relevent abstract text
65+
resp = es.gpt_question_generator(text_results=text_results)
66+
67+
if resp.status_code != 200:
68+
raise HTTPException(resp.status_code, resp.text)
69+
else:
70+
return StreamingResponse(resp.iter_content(1), media_type="text/event-stream")
71+
else:
72+
raise HTTPException(400, "Unexpected Error")
73+
74+
@app.post("/question")
75+
async def question(request: Request):
76+
payload = await request.json()
77+
q = payload.get("q", "")
78+
79+
if q:
80+
resp = es.gpt_direct_answer(q)
81+
else:
82+
raise HTTPException(400, "Unexpected Error")
83+
5184
if resp.status_code != 200:
5285
raise HTTPException(resp.status_code, resp.text)
5386

5487
return StreamingResponse(resp.iter_content(1),
5588
media_type="text/event-stream")
89+
5690
# Define the static files route
5791
# Need to set html=True to serve index.html
5892
# Need to put at the end of the routes

es_gpt.py

+40-3
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,10 @@ def __init__(self, index_name):
3535
# Load the cl100k_base tokenizer which is designed to work with the ada-002 model
3636
self.tokenizer = tiktoken.get_encoding("cl100k_base")
3737

38+
self.answer_generation_prompt = "Based on the context below\"\n\nContext: {}\n\n---\n\nPlease provide concise answer for this questions: {}"
39+
self.question_suggestion_prompt = "Based on the context below\"\n\nContext: {}\n\n---\n\nPlease recommend 3 more questions to be curious about {}"
40+
self.just_question_prompt = "{}{}"
41+
3842
def index(self, doc_id, doc, text):
3943
doc["embeddings_dict_list"] = self._create_emb_dict_list(text)
4044
self.es.index(index=self.index_name,
@@ -146,10 +150,17 @@ def _create_context(self, question, df):
146150
# Return the context and the length of the context
147151
return "\n\n###\n\n".join(returns), cur_len
148152

149-
def _gpt_api_call(self, query, input_token_len, context):
153+
def _gpt_api_call(self, query, input_token_len, context, call_type):
154+
if call_type == "answer":
155+
prompt = self.answer_generation_prompt
156+
elif call_type == "question":
157+
prompt = self.just_question_prompt
158+
else:
159+
prompt = self.question_suggestion_prompt
160+
150161
body = {
151162
"model": self.model_engine,
152-
"prompt": f"Based on the context below\"\n\nContext: {context}\n\n---\n\nPlease provide concise answer for this questions: {query}",
163+
"prompt": prompt.format(context, query),
153164
"max_tokens": self.model_max_tokens - input_token_len,
154165
"n": 1,
155166
"temperature": 0.5,
@@ -165,6 +176,7 @@ def _gpt_api_call(self, query, input_token_len, context):
165176
stream=True)
166177
return resp
167178

179+
168180
def gpt_answer(self, query, es_results=None, text_results=None):
169181
# Generate summaries for each search result
170182
if text_results:
@@ -204,7 +216,32 @@ def gpt_answer(self, query, es_results=None, text_results=None):
204216
else:
205217
assert False, "Must provide either es_results or text_results"
206218

207-
return self._gpt_api_call(query, input_token_len, context)
219+
return self._gpt_api_call(query, input_token_len, context, call_type="answer")
220+
221+
def gpt_question_generator(self, text_results=None):
222+
if text_results:
223+
input_token_len = len(self.tokenizer.encode(text_results))
224+
if input_token_len < self.max_tokens:
225+
context = text_results
226+
else:
227+
context = text_results[:self.max_tokens]
228+
input_token_len = self.max_tokens
229+
else:
230+
assert False, "Text results are not found"
231+
232+
return self._gpt_api_call("", input_token_len, context, call_type="suggestion")
233+
234+
def gpt_direct_answer(self, q):
235+
input_token_len = len(self.tokenizer.encode(q))
236+
if input_token_len < self.max_tokens:
237+
query = q
238+
else:
239+
query = q[:self.max_tokens]
240+
input_token_len = self.max_tokens
241+
return self._gpt_api_call(q, input_token_len, "", call_type="question")
242+
243+
244+
208245

209246

210247
# Example usage

0 commit comments

Comments
 (0)