VladyslavH commited on
Commit
8d73174
·
1 Parent(s): 2d28c5b

Return of try except

Browse files
Files changed (1) hide show
  1. app.py +38 -40
app.py CHANGED
@@ -197,47 +197,45 @@ def bot(
197
 
198
  # Use processor if images are present
199
  if processor is not None and has_images:
200
- # try:
201
- processor_history = []
202
- for msg in history:
203
- role = msg.get("role", "user")
204
- content = msg.get("content")
205
-
206
- if isinstance(content, str):
207
- processor_history.append({"role": role, "content": [{"type": "text", "text": content}]})
208
- elif isinstance(content, tuple):
209
- formatted_content = []
210
- tmp_path, _ = content
211
- image_input = {
212
- "type": "image",
213
- "url": f"{tmp_path}",
214
- }
215
-
216
- if processor_history[-1].get('role') == 'user':
217
- if isinstance(processor_history[-1].get('content'), str):
218
- previous_message = processor_history[-1].get('content')
219
- formatted_content.append({"type": "text", "text": previous_message})
 
 
 
 
 
220
  formatted_content.append(image_input)
221
- processor_history[-1]['content'] = formatted_content
222
- elif isinstance(processor_history[-1].get('content'), list):
223
- processor_history[-1]['content'].append(image_input)
224
- else:
225
- formatted_content.append(image_input)
226
- processor_history.append({"role": role, "content": formatted_content})
227
-
228
- print(processor_history)
229
-
230
- model_inputs = processor.apply_chat_template(
231
- processor_history,
232
- tokenize=True,
233
- return_dict=True,
234
- return_tensors="pt",
235
- add_generation_prompt=True,
236
- ).to(model.device)
237
- print("Using processor for vision input")
238
- # except Exception as exc:
239
- # print(f"Processor failed: {exc}")
240
- # model_inputs = None
241
 
242
  # Fallback to tokenizer for text-only
243
  if model_inputs is None:
 
197
 
198
  # Use processor if images are present
199
  if processor is not None and has_images:
200
+ try:
201
+ processor_history = []
202
+ for msg in history:
203
+ role = msg.get("role", "user")
204
+ content = msg.get("content")
205
+
206
+ if isinstance(content, str):
207
+ processor_history.append({"role": role, "content": [{"type": "text", "text": content}]})
208
+ elif isinstance(content, tuple):
209
+ formatted_content = []
210
+ tmp_path, _ = content
211
+ image_input = {
212
+ "type": "image",
213
+ "url": f"{tmp_path}",
214
+ }
215
+
216
+ if processor_history[-1].get('role') == 'user':
217
+ if isinstance(processor_history[-1].get('content'), str):
218
+ previous_message = processor_history[-1].get('content')
219
+ formatted_content.append({"type": "text", "text": previous_message})
220
+ formatted_content.append(image_input)
221
+ processor_history[-1]['content'] = formatted_content
222
+ elif isinstance(processor_history[-1].get('content'), list):
223
+ processor_history[-1]['content'].append(image_input)
224
+ else:
225
  formatted_content.append(image_input)
226
+ processor_history.append({"role": role, "content": formatted_content})
227
+
228
+ model_inputs = processor.apply_chat_template(
229
+ processor_history,
230
+ tokenize=True,
231
+ return_dict=True,
232
+ return_tensors="pt",
233
+ add_generation_prompt=True,
234
+ ).to(model.device)
235
+ print("Using processor for vision input")
236
+ except Exception as exc:
237
+ print(f"Processor failed: {exc}")
238
+ model_inputs = None
 
 
 
 
 
 
 
239
 
240
  # Fallback to tokenizer for text-only
241
  if model_inputs is None: