OfflineArcher / Tasks.py
Jessie09's picture
Upload dataset
8014d08 verified
from lightning import LightningDataModule
import torch.utils.data as data
from Dataset import TrajectoryDataset, EmptyDataset
from SimulateOnEnv import batch_simulate_on_environment
import numpy as np
from copy import deepcopy
import sys
import random
def rsa_reward(num_feature, min_turns, conv_turn, gamma=2.0):
"""
Nonlinear normalization function, returns u ∈ [0, 1]
- num_feature = min_turns -> u = 1
- num_feature = conv_turn -> u = 0
- The closer to min_turns, the slower it approaches 1
"""
if num_feature == min_turns:
return 1
# Normalize to [0,1]
u = (conv_turn - num_feature) / (min_turns - num_feature)
# Keep direction (support num_feature < min_turns)
return max(0, min(1, u**gamma))
class Task(LightningDataModule):
def __init__(self, batch_size: int, n_traj_eval: int, **kwargs):
super().__init__(**kwargs)
self.batch_size = batch_size
self.eval_batch_size = self.batch_size
self.n_traj_eval = n_traj_eval
# Set Defaults
self.shuffle = True
self.drop_last = True # skips last batch to make sure gradient accumulation works as intended
def setup(self, stage: str):
raise NotImplementedError
def train_dataloader(self):
return data.DataLoader(
dataset=self.dataset,
batch_size=self.batch_size,
shuffle=self.shuffle,
drop_last=self.drop_last,
num_workers=8,
pin_memory=True,
persistent_workers=True,
)
def val_dataloader(self):
return data.DataLoader(
dataset=EmptyDataset(length=self.n_traj_eval),
batch_size=self.eval_batch_size,
pin_memory=True,
)
def get_eval_log(self, **kwargs):
pass
def teardown(self, stage: str):
# Used to clean-up when the run is finished
pass
class TwentyQuestions(Task):
def __init__(self, batch_size: int, n_traj_eval: int, word_list=None, **kwargs):
super().__init__(batch_size, n_traj_eval, **kwargs)
self.word_list = word_list
self.max_horizon = 20
def setup(self, stage: str):
self.dataset = self.read_data()
self.dataset.check_consistency()
print(
"\n *** Dataset Trimming Now Disabled. Please Called the Subroutine for triming"
)
def read_data(self):
import json
from Dataset import TrajectoryDataset
f = open("datasets/20q_train.json")
data = json.load(f)
dataset = TrajectoryDataset()
for game in data:
assert len(game["lines"]) <= 20
history = "Questions:\n" # assertion is checked with history = ''
for interaction in game["lines"]:
yesAnswer = interaction[-5:] == " Yes."
noAnswer = interaction[-4:] == " No."
assert yesAnswer or noAnswer
observation = history
done = (
True if interaction == game["lines"][-1] else False
) # if the interaction is the last interaction we are done
reward = 0 if done and game["correct"] else -1
if yesAnswer:
action = interaction[:-5]
if noAnswer:
action = interaction[:-4]
history += interaction + "\n"
dataset.append_observation_action_reward(observation, action, reward)
dataset.append_terminal_observation(
history,
trajectory_info={"correct": game["correct"], "word": game["word"]},
)
dataset.check_consistency()
return dataset
class RSAGame(Task):
def __init__(
self,
base_model: str,
batch_size: int,
n_traj_eval: int,
word_list=None,
**kwargs,
):
super().__init__(batch_size, n_traj_eval, **kwargs)
self.base_model = base_model
self.word_list = word_list
self.max_horizon = 20
def setup(self, stage: str):
self.dataset = self.read_data()
self.dataset.check_consistency()
print(
"\n *** Dataset Trimming Now Disabled. Please Called the Subroutine for triming"
)
def read_data(self):
import json
from Dataset import TrajectoryDataset
from rsa_game import get_game_outcome, randomly_convert_game_history_to_query
with open(
f"rsa/{self.base_model}_sampling_all_targets_results.json"
) as f:
data = json.load(f)
with open(
"rsa/reasoning_dialogs.json"
) as f:
for key, value in json.load(f).items():
instance = {}
instance["history"] = value["dialog"]
instance["target"] = value["target_referent"].split(" ")
instance["min_turns"] = len(value["dialog"])
instance["max_turns"] = len(instance["target"]) * 2
instance["object_list"] = value["referent_set"]
data.append(instance)
dataset = TrajectoryDataset()
for game in random.sample(data, 3200):
is_valid = True
for message in game["history"]:
if message["content"] == "":
is_valid = False
break
if not is_valid:
continue
outcome, history_length = get_game_outcome(
game["history"], game["target"], game["min_turns"]
)
if outcome == "game wins":
reward = rsa_reward(
len(game["target"]) * 2, game["min_turns"] * 2, history_length
)
else:
continue
if reward == 0:
continue
for idx, interaction in enumerate(game["history"][:history_length]):
query = randomly_convert_game_history_to_query(
game["history"][:idx],
target=game["target"],
min_turns=game["min_turns"],
object_list=game["object_list"],
)
target = interaction["content"]
done = (
True if idx >= history_length - 2 else False
) # if the interaction is the last interaction we are done
reward = 0 if done else reward
dataset.append_observation_action_reward(query, target, reward)
history = randomly_convert_game_history_to_query(
game["history"],
target=game["target"],
min_turns=game["min_turns"],
object_list=game["object_list"],
)
dataset.append_terminal_observation(
history,
trajectory_info={
"object_list": game["object_list"],
"target": game["target"],
},
)
print("The length of the dataset is: ", len(dataset))
dataset.check_consistency()
return dataset
class WordTaboo(Task):
def __init__(
self,
base_model: str,
batch_size: int,
n_traj_eval: int,
word_list=None,
**kwargs,
):
super().__init__(batch_size, n_traj_eval, **kwargs)
self.base_model = base_model
self.word_list = word_list
self.max_horizon = 20
def setup(self, stage: str):
self.dataset = self.read_data()
self.dataset.check_consistency()
print(
"\n *** Dataset Trimming Now Disabled. Please Called the Subroutine for triming"
)
def read_data(self):
import json
from Dataset import TrajectoryDataset
from word_taboo import get_game_outcome, randomly_convert_game_history_to_query
with open(
f"wordtaboo/{self.base_model}_sampling_all_targets_results.json", "r"
) as f:
data = json.load(f)
with open(
"wordtaboo/llm_game_top_test_results.json", "r"
) as f:
data.extend(json.load(f))
dataset = TrajectoryDataset()
for game in data:
is_valid = True
for message in game["history"]:
if message["content"] == "":
is_valid = False
break
if not is_valid:
continue
outcome, history_length = get_game_outcome(
game["history"], game["target"], game["max_turns"]
)
if outcome == "defender wins":
winner = "defender"
elif outcome == "attacker wins":
if self.base_model == "Qwen3-14B":
if random.random() < 0.85: # 0.85 for qwen3; 0.9 for llama3
continue
else:
if random.random() < 0.9: # 0.85 for qwen3; 0.9 for llama3
continue
winner = "attacker"
else:
continue
for idx, interaction in enumerate(game["history"][:history_length]):
if interaction["role"] != winner:
continue
query = randomly_convert_game_history_to_query(
game["history"][:idx],
target=game["target"],
max_turns=game["max_turns"],
)
target = interaction["content"]
done = (
True if idx >= history_length - 2 else False
) # if the interaction is the last interaction we are done
reward = 0 if done else 1
dataset.append_observation_action_reward(query, target, reward)
history = randomly_convert_game_history_to_query(
game["history"], game["target"], game["max_turns"]
)
dataset.append_terminal_observation(
history, trajectory_info={"target": game["target"]}
)
print("The length of the dataset is: ", len(dataset))
dataset.check_consistency()
return dataset
class StrategicDialogue(Task):
def __init__(
self,
base_model: str,
batch_size: int,
n_traj_eval: int,
word_list=None,
**kwargs,
):
super().__init__(batch_size, n_traj_eval, **kwargs)
self.base_model = base_model
self.word_list = word_list
self.max_horizon = 20
def setup(self, stage: str):
self.dataset = self.read_data()
self.dataset.check_consistency()
print(
"\n *** Dataset Trimming Now Disabled. Please Called the Subroutine for triming"
)
def read_data(self):
import json
from Dataset import TrajectoryDataset
from word_taboo import get_game_outcome, randomly_convert_game_history_to_query
with open(
f"wordtaboo/{self.base_model}_sampling_all_targets_results.json", "r"
) as f:
data = json.load(f)
with open(
"wordtaboo/llm_game_top_test_results.json", "r"
) as f:
data.extend(json.load(f))
dataset = TrajectoryDataset()
for game in data:
is_valid = True
for message in game["history"]:
if message["content"] == "":
is_valid = False
break
if not is_valid:
continue
outcome, history_length = get_game_outcome(
game["history"], game["target"], game["max_turns"]
)
if outcome == "defender wins":
winner = "defender"
elif outcome == "attacker wins":
if self.base_model == "Qwen3-14B":
if random.random() < 0.85: # 0.85 for qwen3; 0.9 for llama3
continue
else:
if random.random() < 0.9: # 0.85 for qwen3; 0.9 for llama3
continue
winner = "attacker"
else:
continue
for idx, interaction in enumerate(game["history"][:history_length]):
if interaction["role"] != winner:
continue
query = randomly_convert_game_history_to_query(
game["history"][:idx],
target=game["target"],
max_turns=game["max_turns"],
)
target = interaction["content"]
done = (
True if idx >= history_length - 2 else False
) # if the interaction is the last interaction we are done
reward = 0 if done else 1
dataset.append_observation_action_reward(query, target, reward)
history = randomly_convert_game_history_to_query(
game["history"], game["target"], game["max_turns"]
)
dataset.append_terminal_observation(
history, trajectory_info={"target": game["target"]}
)
from rsa_game import get_game_outcome, randomly_convert_game_history_to_query
with open(
f"rsa/{self.base_model}_sampling_all_targets_results.json"
) as f:
data = json.load(f)
with open(
"rsa/reasoning_dialogs.json"
) as f:
for key, value in json.load(f).items():
instance = {}
instance["history"] = value["dialog"]
instance["target"] = value["target_referent"].split(" ")
instance["min_turns"] = len(value["dialog"])
instance["max_turns"] = len(instance["target"]) * 2
instance["object_list"] = value["referent_set"]
data.append(instance)
for game in random.sample(data, 3200):
is_valid = True
for message in game["history"]:
if message["content"] == "":
is_valid = False
break
if not is_valid:
continue
outcome, history_length = get_game_outcome(
game["history"], game["target"], game["min_turns"]
)
if outcome == "game wins":
reward = rsa_reward(
len(game["target"]) * 2, game["min_turns"] * 2, history_length
)
else:
continue
for idx, interaction in enumerate(game["history"][:history_length]):
query = randomly_convert_game_history_to_query(
game["history"][:idx],
target=game["target"],
min_turns=game["min_turns"],
object_list=game["object_list"],
)
target = interaction["content"]
done = (
True if idx >= history_length - 2 else False
) # if the interaction is the last interaction we are done
reward = 0 if done else reward
dataset.append_observation_action_reward(query, target, reward)
history = randomly_convert_game_history_to_query(
game["history"],
target=game["target"],
min_turns=game["min_turns"],
object_list=game["object_list"],
)
dataset.append_terminal_observation(
history,
trajectory_info={
"object_list": game["object_list"],
"target": game["target"],
},
)
print("The length of the dataset is: ", len(dataset))
dataset.check_consistency()
return dataset