Spaces:
Sleeping
Sleeping
Update mmlu_pro_eval_adapted.py
Browse files- mmlu_pro_eval_adapted.py +1 -1
mmlu_pro_eval_adapted.py
CHANGED
|
@@ -162,7 +162,7 @@ def batch_inference(llm, sampling_params, inference_batch, tokenizer):
|
|
| 162 |
def batch_inference_debug_mode(llm, sampling_params, inference_batch, tokenizer):
|
| 163 |
start = time.time()
|
| 164 |
outputs = llm.generate(inference_batch, sampling_params)
|
| 165 |
-
logging.info(str(len(inference_batch)) + "
|
| 166 |
response_batch = []
|
| 167 |
pred_batch = []
|
| 168 |
input_token_counts = []
|
|
|
|
| 162 |
def batch_inference_debug_mode(llm, sampling_params, inference_batch, tokenizer):
|
| 163 |
start = time.time()
|
| 164 |
outputs = llm.generate(inference_batch, sampling_params)
|
| 165 |
+
logging.info("Batch of size: ", str(len(inference_batch)) + ". Time taken: " + str(time.time() - start))
|
| 166 |
response_batch = []
|
| 167 |
pred_batch = []
|
| 168 |
input_token_counts = []
|