@@ -33,11 +33,7 @@ def run_qa_agent_on_image_no_memory(input_params):
33
33
}
34
34
send_job_status (status_variables )
35
35
36
- # 1 : load the document
37
- global _file_content
38
- global _current_file_name
39
-
40
- bucket_name = os .environ ['INPUT_BUCKET' ]
36
+
41
37
filename = input_params ['filename' ]
42
38
image_url = input_params ['presignedurl' ]
43
39
logger .info (f"Generating response to question for file { filename } " )
@@ -52,15 +48,8 @@ def run_qa_agent_on_image_no_memory(input_params):
52
48
send_job_status (status_variables )
53
49
return
54
50
55
- # 2 : run the question
56
51
streaming = input_params .get ("streaming" , False )
57
- # TODO use streaming in response
58
- callback_manager = [StreamingCallbackHandler (status_variables )] if streaming else None
59
-
60
- #_qa_llm = get_llm(callback_manager,"HuggingFaceM4/idefics-80b-instruct")
61
- #TODO : Update get_llm to support sagemaker as provider,
62
- # this needs to be updated with @alain changes
63
- print (f' get LLM Ideficsllm' )
52
+ callback_manager = [StreamingCallbackHandler (status_variables )] if streaming else None
64
53
_qa_llm = Ideficsllm .sagemakerendpoint_llm ("idefics" )
65
54
66
55
if (_qa_llm is None ):
@@ -71,8 +60,6 @@ def run_qa_agent_on_image_no_memory(input_params):
71
60
send_job_status (status_variables )
72
61
return status_variables
73
62
74
- # 3: run LLM
75
- #template="User:{question}<end_of_utterance>\nAssistant:"
76
63
template = """\n \n User: {question}<end_of_utterance>
77
64
\n \n Assistant:"""
78
65
prompt = PromptTemplate (template = template , input_variables = ["image" , "question" ])
@@ -82,7 +69,6 @@ def run_qa_agent_on_image_no_memory(input_params):
82
69
logger .info (f'image is: { filename } ' )
83
70
logger .info (f'decoded_question is: { decoded_question } ' )
84
71
tmp = chain .predict (image = image_url , question = decoded_question )
85
- #answer = tmp.removeprefix(' ')
86
72
answer = tmp .split ("Assistant:" ,1 )[1 ]
87
73
88
74
logger .info (f'tmp is: { tmp } ' )
0 commit comments