Skip to content

Commit 34a5ba1

Browse files
author
Dinesh Sajwan
committed
feat(viosualqa): added default value for verbose
1 parent 6220e48 commit 34a5ba1

File tree

2 files changed

+6
-3
lines changed

2 files changed

+6
-3
lines changed

lambda/aws-qa-appsync-opensearch/question_answering/src/qa_agent/doc_qa.py

+4-2
Original file line numberDiff line numberDiff line change
@@ -124,8 +124,9 @@ def run_qa_agent_rag_no_memory(input_params):
124124
template = """\n\nHuman: {context}
125125
Answer from this text: {question}
126126
\n\nAssistant:"""
127+
verbose = input_params.get('verbose',False)
127128
prompt = PromptTemplate(template=template, input_variables=["context", "question"])
128-
chain = LLMChain(llm=_qa_llm, prompt=prompt, verbose=input_params['verbose'])
129+
chain = LLMChain(llm=_qa_llm, prompt=prompt, verbose=verbose)
129130

130131
try:
131132
tmp = chain.predict(context=source_documents, question=decoded_question)
@@ -219,8 +220,9 @@ def run_qa_agent_from_single_document_no_memory(input_params):
219220
template = """\n\nHuman: {context}
220221
Answer from this text: {question}
221222
\n\nAssistant:"""
223+
verbose = input_params.get('verbose',False)
222224
prompt = PromptTemplate(template=template, input_variables=["context", "question"])
223-
chain = LLMChain(llm=_qa_llm, prompt=prompt, verbose=input_params['verbose'])
225+
chain = LLMChain(llm=_qa_llm, prompt=prompt, verbose=verbose)
224226

225227
try:
226228
logger.info(f'file content is: {_file_content}')

lambda/aws-qa-appsync-opensearch/question_answering/src/qa_agent/image_qa.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -228,8 +228,9 @@ def generate_vision_answer_sagemaker(_qa_llm,input_params,decoded_question,statu
228228

229229
template = """\n\nUser: {question}![]({image})<end_of_utterance>
230230
\n\nAssistant:"""
231+
verbose = input_params.get('verbose',False)
231232
prompt = PromptTemplate(template=template, input_variables=["image", "question"])
232-
chain = LLMChain(llm=_qa_llm, prompt=prompt, verbose=input_params['verbose'])
233+
chain = LLMChain(llm=_qa_llm, prompt=prompt, verbose=verbose)
233234

234235
try:
235236
logger.info(f'decoded_question is: {decoded_question}')

0 commit comments

Comments
 (0)