Datasets:
Upload folder using huggingface_hub
Browse files- README.md +6 -0
- download_android_control.ipynb +1 -37
README.md
CHANGED
|
@@ -113,6 +113,12 @@ Original dataset: [Google Research Android Control](https://github.com/google-re
|
|
| 113 |
|
| 114 |
The Android Control dataset was created by Google Research for advancing mobile UI understanding and automation research.
|
| 115 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 116 |
## Notes
|
| 117 |
|
| 118 |
- The images are referenced with relative paths starting with `and_ctrl/`
|
|
|
|
| 113 |
|
| 114 |
The Android Control dataset was created by Google Research for advancing mobile UI understanding and automation research.
|
| 115 |
|
| 116 |
+
### License
|
| 117 |
+
|
| 118 |
+
This dataset is derived from Google Research's Android Control dataset, which is licensed under the [Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0). The reformatted version for LLaMA-Factory maintains the same Apache 2.0 license terms.
|
| 119 |
+
|
| 120 |
+
Copyright for the original dataset belongs to Google LLC. Any modifications or reformatting for LLaMA-Factory compatibility are also provided under Apache License 2.0.
|
| 121 |
+
|
| 122 |
## Notes
|
| 123 |
|
| 124 |
- The images are referenced with relative paths starting with `and_ctrl/`
|
download_android_control.ipynb
CHANGED
|
@@ -28,7 +28,7 @@
|
|
| 28 |
"outputId": "64381a1a-5e3d-4ce4-92dd-6a11e43f61ed"
|
| 29 |
},
|
| 30 |
"outputs": [],
|
| 31 |
-
"source": "# -*- coding: utf-8 -*-\nimport io\nimport json\nfrom pathlib import Path\nfrom typing import Any, Dict, List, Optional, Tuple\n\nimport tensorflow as tf\nfrom PIL import Image\n\n# =========================\n# TF.Feature -> Python ๊ฐ ๋ณต์\n# =========================\ndef feature_to_list_bytes(f: tf.train.Feature) -> List[bytes]:\n return list(f.bytes_list.value)\n\ndef feature_to_list_int(f: tf.train.Feature) -> List[int]:\n return list(f.int64_list.value)\n\ndef feature_to_list_float(f: tf.train.Feature) -> List[float]:\n return list(f.float_list.value)\n\ndef get_feature(example: tf.train.Example, key: str) -> Optional[tf.train.Feature]:\n fmap = example.features.feature\n return fmap[key] if key in fmap else None\n\n# =========================\n# ์ก์
(dict)์์ ์ขํ ์ถ์ถ(์ ๊ทํ/ํฝ์
๋ชจ๋ ์ง์)\n# =========================\ndef extract_points_from_action(\n action: Dict[str, Any],\n img_w: int,\n img_h: int\n) -> List[Tuple[int, int]]:\n \"\"\"\n ๋ฐํ: [(x_px, y_px), ...]\n - ์ ํ๋(ํญ)๋ฉด ๊ธธ์ด 1\n - ๋๋๊ทธ/์ค์์ดํ๋ฉด ์์/๋ 2์ \n - ์์ผ๋ฉด []\n \"\"\"\n pts: List[Tuple[int, int]] = []\n\n def to_px(x: float, y: float, normalized: Optional[bool]=None) -> Tuple[int,int]:\n if normalized is None:\n normalized = (0.0 <= x <= 1.0 and 0.0 <= y <= 1.0)\n if normalized:\n return (int(round(x * img_w)), int(round(y * img_h)))\n else:\n return (int(round(x)), int(round(y)))\n\n # 1) ์ต์์ x, y\n if \"x\" in action and \"y\" in action:\n pts.append(to_px(float(action[\"x\"]), float(action[\"y\"]), None))\n\n # 2) point / click / tap / press / long_press / long_tap\n for k in [\"point\", \"click\", \"tap\", \"press\", \"long_press\", \"long_tap\"]:\n if k in action and isinstance(action[k], dict):\n px = action[k]\n if \"x\" in px and \"y\" in px:\n pts.append(to_px(float(px[\"x\"]), float(px[\"y\"]), None))\n if k in action and isinstance(action[k], list):\n for px in action[k]:\n if isinstance(px, dict) and \"x\" in px and \"y\" in px:\n pts.append(to_px(float(px[\"x\"]), float(px[\"y\"]), None))\n\n # 3) from/to, start/end\n for a, b in [(\"from\", \"to\"), (\"start\", \"end\")]:\n if a in action and b in action and isinstance(action[a], dict) and isinstance(action[b], dict):\n ax, ay = action[a].get(\"x\"), action[a].get(\"y\")\n bx, by = action[b].get(\"x\"), action[b].get(\"y\")\n if ax is not None and ay is not None and bx is not None and by is not None:\n pts.append(to_px(float(ax), float(ay), None))\n pts.append(to_px(float(bx), float(by), None))\n\n # 4) start_x/start_y/end_x/end_y\n cand = {\"start_x\": None, \"start_y\": None, \"end_x\": None, \"end_y\": None}\n found = False\n for ck in cand.keys():\n if ck in action:\n cand[ck] = float(action[ck])\n found = True\n if found and cand[\"start_x\"] is not None and cand[\"start_y\"] is not None:\n pts.append(to_px(cand[\"start_x\"], cand[\"start_y\"], None))\n if cand[\"end_x\"] is not None and cand[\"end_y\"] is not None:\n pts.append(to_px(cand[\"end_x\"], cand[\"end_y\"], None))\n\n # ์ค๋ณต ์ ๊ฑฐ\n uniq: List[Tuple[int,int]] = []\n seen = set()\n for p in pts:\n if p not in seen:\n uniq.append(p)\n seen.add(p)\n return uniq\n\n# =========================\n# ์ํผ์๋ ํ์ฑ\n# =========================\ndef load_episode_from_example(ex: tf.train.Example) -> Dict[str, Any]:\n f = ex.features.feature\n\n screenshots_bytes = feature_to_list_bytes(f[\"screenshots\"])\n a11y_bytes_list = feature_to_list_bytes(f[\"accessibility_trees\"])\n widths = feature_to_list_int(f[\"screenshot_widths\"])\n heights = feature_to_list_int(f[\"screenshot_heights\"])\n\n actions_json_list = [b.decode(\"utf-8\") for b in feature_to_list_bytes(f[\"actions\"])]\n step_insts = [b.decode(\"utf-8\") for b in feature_to_list_bytes(f[\"step_instructions\"])]\n actions = [json.loads(s) for s in actions_json_list]\n\n goal = f[\"goal\"].bytes_list.value[0].decode(\"utf-8\")\n episode_id = int(f[\"episode_id\"].int64_list.value[0]) if f[\"episode_id\"].int64_list.value else int(\n f[\"episode_id\"].bytes_list.value[0].decode(\"utf-8\")\n )\n\n assert len(screenshots_bytes) == len(widths) == len(heights), \"screenshot/width/height ๊ธธ์ด ๋ถ์ผ์น\"\n assert len(actions) == len(step_insts) == (len(screenshots_bytes) - 1), \\\n \"actions/step_instructions๋ screenshots-1๊ณผ ๊ฐ์์ผ ํจ\"\n\n return {\n \"episode_id\": episode_id,\n \"goal\": goal,\n \"screenshots\": screenshots_bytes,\n \"a11y\": a11y_bytes_list,\n \"widths\": widths,\n \"heights\": heights,\n \"actions\": actions,\n \"step_instructions\": step_insts,\n }\n\n# =========================\n# ์ก์
๋งคํ & ์ ํธ\n# =========================\ndef _center_xy(w: int, h: int) -> Tuple[int,int]:\n return (int(round(w/2)), int(round(h/2)))\n\ndef _norm_dir(d: Optional[str]) -> str:\n if not d: return \"down\"\n d = str(d).lower()\n if d in [\"up\",\"down\",\"left\",\"right\"]:\n return d\n if d in [\"u\",\"top\"]: return \"up\"\n if d in [\"d\",\"bottom\"]: return \"down\"\n if d in [\"l\"]: return \"left\"\n if d in [\"r\"]: return \"right\"\n return \"down\"\n\ndef map_action(\n action: Dict[str, Any],\n w: int,\n h: int,\n pts: List[Tuple[int,int]],\n) -> Optional[Dict[str, Any]]:\n \"\"\"\n ํ์ฉ ๋งคํ:\n click -> {\"type\": \"touch\", \"x\": <x>, \"y\": <y>}\n long_press -> {\"type\": \"long_touch\", \"x\": <x>, \"y\": <y>}\n input_text -> {\"type\": \"set_text\", \"text\": \"...\", \"x\": <x>, \"y\": <y>}\n scroll -> {\"type\": \"scroll\", \"direction\": \"up|down|left|right\", \"x\": <center_x>, \"y\": <center_y>}\n navigate_home -> {\"type\": \"press\", \"key\": \"home\"}\n navigate_back -> {\"type\": \"press\", \"key\": \"back\"}\n \"\"\"\n atype = (action.get(\"action_type\") or action.get(\"type\") or action.get(\"action\") or \"\").lower()\n x, y = (pts[0] if pts else _center_xy(w, h))\n\n if atype in [\"click\", \"tap\", \"press\", \"click_view\"]:\n return {\"type\": \"touch\", \"x\": x, \"y\": y}\n\n if atype in [\"long_press\", \"long_tap\", \"long_click\"]:\n return {\"type\": \"long_touch\", \"x\": x, \"y\": y}\n\n if atype in [\"input_text\", \"set_text\", \"type_text\", \"enter_text\", \"text\"]:\n text = action.get(\"text\") or action.get(\"input_text\") or action.get(\"value\") or \"\"\n return {\"type\": \"set_text\", \"text\": str(text), \"x\": x, \"y\": y}\n\n if atype in [\"scroll\", \"swipe\"]:\n if len(pts) >= 2:\n cx = (pts[0][0] + pts[1][0]) // 2\n cy = (pts[0][1] + pts[1][1]) // 2\n else:\n cx, cy = _center_xy(w, h)\n return {\"type\": \"scroll\", \"direction\": _norm_dir(action.get(\"direction\")), \"x\": cx, \"y\": cy}\n\n if atype in [\"navigate_home\", \"home\", \"press_home\"]:\n return {\"type\": \"press\", \"key\": \"home\"}\n\n if atype in [\"navigate_back\", \"back\", \"press_back\"]:\n return {\"type\": \"press\", \"key\": \"back\"}\n\n # ๊ทธ ์ธ(open_app, wait ๋ฑ) โ ์ ์ฅํ์ง ์์\n return None\n\ndef save_clean_image(img_bytes: bytes, episode_id: int, step_idx: int, base_dir: str = \"and_ctrl\") -> str:\n \"\"\"\n out_episode_{EP}_step_{STEP:03d}.png (์ค๋ฒ๋ ์ด ์์)\n \"\"\"\n Path(base_dir).mkdir(parents=True, exist_ok=True)\n fname = f\"out_episode_{episode_id}_step_{step_idx:03d}.png\"\n fpath = Path(base_dir) / fname\n Image.open(io.BytesIO(img_bytes)).convert(\"RGB\").save(fpath)\n # Return just the relative path from base_dir\n return f\"{base_dir}/{fname}\"\n\n# =========================\n# ๋ฉ์์ง JSON ๋ด๋ณด๋ด๊ธฐ\n# =========================\ndef export_messages(ds, limit_episodes: int = 5, out_json: str = \"and_ctrl.json\", image_dir: str = \"and_ctrl\"):\n \"\"\"\n ์ง์ ํ TFRecordDataset์์ ์ N๊ฐ ์ํผ์๋์ ์คํ
์ค\n ํ์ฉ ์ก์
๋ง ๋ชจ์ ์์ฒญ ํฌ๋งท์ผ๋ก and_ctrl.json ์ ์ฅ.\n \"\"\"\n all_items: List[Dict[str, Any]] = []\n ep_cnt = 0\n\n for raw in ds:\n ex = tf.train.Example()\n ex.ParseFromString(raw.numpy())\n ep = load_episode_from_example(ex)\n\n ep_id = ep[\"episode_id\"]\n for i, (action, inst) in enumerate(zip(ep[\"actions\"], ep[\"step_instructions\"])):\n w, h = ep[\"widths\"][i], ep[\"heights\"][i]\n img_bytes = ep[\"screenshots\"][i]\n pts = extract_points_from_action(action, w, h)\n mapped = map_action(action, w, h, pts)\n if not mapped:\n continue # ์คํต\n\n img_path = save_clean_image(img_bytes, ep_id, i, base_dir=image_dir)\n\n all_items.append({\n \"messages\": [\n {\"role\": \"user\", \"content\": f\"<image>\\n{inst}\"},\n # JSON์ด ์๋, ํ์ด์ฌ dict ๋ฌธ์์ด(single quote)๋ก ์ ์ฅ\n {\"role\": \"assistant\", \"content\": str(mapped)}\n ],\n \"images\": [img_path]\n })\n\n ep_cnt += 1\n if ep_cnt >= limit_episodes:\n break\n\n with open(out_json, \"w\", encoding=\"utf-8\") as f:\n json.dump(all_items, f, ensure_ascii=False, indent=2)\n\n print(f\"[DONE] episodes processed: {ep_cnt}, items saved: {len(all_items)} โ {out_json}\")\n\n# =========================\n# ์คํ ์ง์
์ \n# =========================\ndef main():\n # ํ์์ ๊ฒฝ๋ก ํจํด ์กฐ์ \n filenames = tf.io.gfile.glob('gs://gresearch/android_control/android_control*')\n ds = tf.data.TFRecordDataset(filenames, compression_type='GZIP')\n export_messages(ds, limit_episodes=50, out_json=\"and_ctrl.json\", image_dir=\"and_ctrl\")\n\nif __name__ == \"__main__\":\n main()"
|
| 32 |
},
|
| 33 |
{
|
| 34 |
"cell_type": "code",
|
|
@@ -52,42 +52,6 @@
|
|
| 52 |
"source": [
|
| 53 |
"!gsutil du -sh gs://gresearch/android_control/android_control*\n"
|
| 54 |
]
|
| 55 |
-
},
|
| 56 |
-
{
|
| 57 |
-
"cell_type": "markdown",
|
| 58 |
-
"metadata": {},
|
| 59 |
-
"source": [
|
| 60 |
-
"# Download Images and episodes"
|
| 61 |
-
]
|
| 62 |
-
},
|
| 63 |
-
{
|
| 64 |
-
"cell_type": "code",
|
| 65 |
-
"execution_count": null,
|
| 66 |
-
"metadata": {
|
| 67 |
-
"id": "eP8n5YdK8Mns"
|
| 68 |
-
},
|
| 69 |
-
"outputs": [
|
| 70 |
-
{
|
| 71 |
-
"ename": "",
|
| 72 |
-
"evalue": "",
|
| 73 |
-
"output_type": "error",
|
| 74 |
-
"traceback": [
|
| 75 |
-
"\u001b[1;31mRunning cells with 'and_ctrl (Python 3.12.11)' requires the ipykernel package.\n",
|
| 76 |
-
"\u001b[1;31mInstall 'ipykernel' into the Python environment. \n",
|
| 77 |
-
"\u001b[1;31mCommand: '/home/work/kyochul/and_ctrl/bin/python -m pip install ipykernel -U --force-reinstall'"
|
| 78 |
-
]
|
| 79 |
-
}
|
| 80 |
-
],
|
| 81 |
-
"source": [
|
| 82 |
-
"import os"
|
| 83 |
-
]
|
| 84 |
-
},
|
| 85 |
-
{
|
| 86 |
-
"cell_type": "code",
|
| 87 |
-
"execution_count": null,
|
| 88 |
-
"metadata": {},
|
| 89 |
-
"outputs": [],
|
| 90 |
-
"source": []
|
| 91 |
}
|
| 92 |
],
|
| 93 |
"metadata": {
|
|
|
|
| 28 |
"outputId": "64381a1a-5e3d-4ce4-92dd-6a11e43f61ed"
|
| 29 |
},
|
| 30 |
"outputs": [],
|
| 31 |
+
"source": "# -*- coding: utf-8 -*-\nimport io\nimport json\nfrom pathlib import Path\nfrom typing import Any, Dict, List, Optional, Tuple\n\nimport tensorflow as tf\nfrom PIL import Image\n\n# =========================\n# TF.Feature -> Python value conversion\n# =========================\ndef feature_to_list_bytes(f: tf.train.Feature) -> List[bytes]:\n return list(f.bytes_list.value)\n\ndef feature_to_list_int(f: tf.train.Feature) -> List[int]:\n return list(f.int64_list.value)\n\ndef feature_to_list_float(f: tf.train.Feature) -> List[float]:\n return list(f.float_list.value)\n\ndef get_feature(example: tf.train.Example, key: str) -> Optional[tf.train.Feature]:\n fmap = example.features.feature\n return fmap[key] if key in fmap else None\n\n# =========================\n# Extract coordinates from action (supports both normalized/pixel)\n# =========================\ndef extract_points_from_action(\n action: Dict[str, Any],\n img_w: int,\n img_h: int\n) -> List[Tuple[int, int]]:\n \"\"\"\n Returns: [(x_px, y_px), ...]\n - Single point (tap) returns length 1\n - Drag/swipe returns 2 points (start/end)\n - Empty list if no points\n \"\"\"\n pts: List[Tuple[int, int]] = []\n\n def to_px(x: float, y: float, normalized: Optional[bool]=None) -> Tuple[int,int]:\n if normalized is None:\n normalized = (0.0 <= x <= 1.0 and 0.0 <= y <= 1.0)\n if normalized:\n return (int(round(x * img_w)), int(round(y * img_h)))\n else:\n return (int(round(x)), int(round(y)))\n\n # 1) Top-level x, y\n if \"x\" in action and \"y\" in action:\n pts.append(to_px(float(action[\"x\"]), float(action[\"y\"]), None))\n\n # 2) point / click / tap / press / long_press / long_tap\n for k in [\"point\", \"click\", \"tap\", \"press\", \"long_press\", \"long_tap\"]:\n if k in action and isinstance(action[k], dict):\n px = action[k]\n if \"x\" in px and \"y\" in px:\n pts.append(to_px(float(px[\"x\"]), float(px[\"y\"]), None))\n if k in action and isinstance(action[k], list):\n for px in action[k]:\n if isinstance(px, dict) and \"x\" in px and \"y\" in px:\n pts.append(to_px(float(px[\"x\"]), float(px[\"y\"]), None))\n\n # 3) from/to, start/end\n for a, b in [(\"from\", \"to\"), (\"start\", \"end\")]:\n if a in action and b in action and isinstance(action[a], dict) and isinstance(action[b], dict):\n ax, ay = action[a].get(\"x\"), action[a].get(\"y\")\n bx, by = action[b].get(\"x\"), action[b].get(\"y\")\n if ax is not None and ay is not None and bx is not None and by is not None:\n pts.append(to_px(float(ax), float(ay), None))\n pts.append(to_px(float(bx), float(by), None))\n\n # 4) start_x/start_y/end_x/end_y\n cand = {\"start_x\": None, \"start_y\": None, \"end_x\": None, \"end_y\": None}\n found = False\n for ck in cand.keys():\n if ck in action:\n cand[ck] = float(action[ck])\n found = True\n if found and cand[\"start_x\"] is not None and cand[\"start_y\"] is not None:\n pts.append(to_px(cand[\"start_x\"], cand[\"start_y\"], None))\n if cand[\"end_x\"] is not None and cand[\"end_y\"] is not None:\n pts.append(to_px(cand[\"end_x\"], cand[\"end_y\"], None))\n\n # Remove duplicates\n uniq: List[Tuple[int,int]] = []\n seen = set()\n for p in pts:\n if p not in seen:\n uniq.append(p)\n seen.add(p)\n return uniq\n\n# =========================\n# Parse episode from TF Example\n# =========================\ndef load_episode_from_example(ex: tf.train.Example) -> Dict[str, Any]:\n f = ex.features.feature\n\n screenshots_bytes = feature_to_list_bytes(f[\"screenshots\"])\n a11y_bytes_list = feature_to_list_bytes(f[\"accessibility_trees\"])\n widths = feature_to_list_int(f[\"screenshot_widths\"])\n heights = feature_to_list_int(f[\"screenshot_heights\"])\n\n actions_json_list = [b.decode(\"utf-8\") for b in feature_to_list_bytes(f[\"actions\"])]\n step_insts = [b.decode(\"utf-8\") for b in feature_to_list_bytes(f[\"step_instructions\"])]\n actions = [json.loads(s) for s in actions_json_list]\n\n goal = f[\"goal\"].bytes_list.value[0].decode(\"utf-8\")\n episode_id = int(f[\"episode_id\"].int64_list.value[0]) if f[\"episode_id\"].int64_list.value else int(\n f[\"episode_id\"].bytes_list.value[0].decode(\"utf-8\")\n )\n\n assert len(screenshots_bytes) == len(widths) == len(heights), \"screenshot/width/height length mismatch\"\n assert len(actions) == len(step_insts) == (len(screenshots_bytes) - 1), \\\n \"actions/step_instructions must equal screenshots-1\"\n\n return {\n \"episode_id\": episode_id,\n \"goal\": goal,\n \"screenshots\": screenshots_bytes,\n \"a11y\": a11y_bytes_list,\n \"widths\": widths,\n \"heights\": heights,\n \"actions\": actions,\n \"step_instructions\": step_insts,\n }\n\n# =========================\n# Action mapping & utilities\n# =========================\ndef _center_xy(w: int, h: int) -> Tuple[int,int]:\n return (int(round(w/2)), int(round(h/2)))\n\ndef _norm_dir(d: Optional[str]) -> str:\n if not d: return \"down\"\n d = str(d).lower()\n if d in [\"up\",\"down\",\"left\",\"right\"]:\n return d\n if d in [\"u\",\"top\"]: return \"up\"\n if d in [\"d\",\"bottom\"]: return \"down\"\n if d in [\"l\"]: return \"left\"\n if d in [\"r\"]: return \"right\"\n return \"down\"\n\ndef map_action(\n action: Dict[str, Any],\n w: int,\n h: int,\n pts: List[Tuple[int,int]],\n) -> Optional[Dict[str, Any]]:\n \"\"\"\n Allowed mappings:\n click -> {\"type\": \"touch\", \"x\": <x>, \"y\": <y>}\n long_press -> {\"type\": \"long_touch\", \"x\": <x>, \"y\": <y>}\n input_text -> {\"type\": \"set_text\", \"text\": \"...\", \"x\": <x>, \"y\": <y>}\n scroll -> {\"type\": \"scroll\", \"direction\": \"up|down|left|right\", \"x\": <center_x>, \"y\": <center_y>}\n navigate_home -> {\"type\": \"press\", \"key\": \"home\"}\n navigate_back -> {\"type\": \"press\", \"key\": \"back\"}\n \"\"\"\n atype = (action.get(\"action_type\") or action.get(\"type\") or action.get(\"action\") or \"\").lower()\n x, y = (pts[0] if pts else _center_xy(w, h))\n\n if atype in [\"click\", \"tap\", \"press\", \"click_view\"]:\n return {\"type\": \"touch\", \"x\": x, \"y\": y}\n\n if atype in [\"long_press\", \"long_tap\", \"long_click\"]:\n return {\"type\": \"long_touch\", \"x\": x, \"y\": y}\n\n if atype in [\"input_text\", \"set_text\", \"type_text\", \"enter_text\", \"text\"]:\n text = action.get(\"text\") or action.get(\"input_text\") or action.get(\"value\") or \"\"\n return {\"type\": \"set_text\", \"text\": str(text), \"x\": x, \"y\": y}\n\n if atype in [\"scroll\", \"swipe\"]:\n if len(pts) >= 2:\n cx = (pts[0][0] + pts[1][0]) // 2\n cy = (pts[0][1] + pts[1][1]) // 2\n else:\n cx, cy = _center_xy(w, h)\n return {\"type\": \"scroll\", \"direction\": _norm_dir(action.get(\"direction\")), \"x\": cx, \"y\": cy}\n\n if atype in [\"navigate_home\", \"home\", \"press_home\"]:\n return {\"type\": \"press\", \"key\": \"home\"}\n\n if atype in [\"navigate_back\", \"back\", \"press_back\"]:\n return {\"type\": \"press\", \"key\": \"back\"}\n\n # Others (open_app, wait, etc.) โ skip saving\n return None\n\ndef save_clean_image(img_bytes: bytes, episode_id: int, step_idx: int, base_dir: str = \"and_ctrl\") -> str:\n \"\"\"\n Save image as: out_episode_{EP}_step_{STEP:03d}.png (without overlay)\n \"\"\"\n Path(base_dir).mkdir(parents=True, exist_ok=True)\n fname = f\"out_episode_{episode_id}_step_{step_idx:03d}.png\"\n fpath = Path(base_dir) / fname\n Image.open(io.BytesIO(img_bytes)).convert(\"RGB\").save(fpath)\n # Return just the relative path from base_dir\n return f\"{base_dir}/{fname}\"\n\n# =========================\n# Export messages to JSON\n# =========================\ndef export_messages(ds, limit_episodes: int = 5, out_json: str = \"and_ctrl.json\", image_dir: str = \"and_ctrl\"):\n \"\"\"\n Extract allowed actions from first N episodes in TFRecordDataset\n and save them in request format to and_ctrl.json.\n \"\"\"\n all_items: List[Dict[str, Any]] = []\n ep_cnt = 0\n\n for raw in ds:\n ex = tf.train.Example()\n ex.ParseFromString(raw.numpy())\n ep = load_episode_from_example(ex)\n\n ep_id = ep[\"episode_id\"]\n for i, (action, inst) in enumerate(zip(ep[\"actions\"], ep[\"step_instructions\"])):\n w, h = ep[\"widths\"][i], ep[\"heights\"][i]\n img_bytes = ep[\"screenshots\"][i]\n pts = extract_points_from_action(action, w, h)\n mapped = map_action(action, w, h, pts)\n if not mapped:\n continue # Skip\n\n img_path = save_clean_image(img_bytes, ep_id, i, base_dir=image_dir)\n\n all_items.append({\n \"messages\": [\n {\"role\": \"user\", \"content\": f\"<image>\\n{inst}\"},\n # Save as Python dict string (single quotes), not JSON\n {\"role\": \"assistant\", \"content\": str(mapped)}\n ],\n \"images\": [img_path]\n })\n\n ep_cnt += 1\n if ep_cnt >= limit_episodes:\n break\n\n with open(out_json, \"w\", encoding=\"utf-8\") as f:\n json.dump(all_items, f, ensure_ascii=False, indent=2)\n\n print(f\"[DONE] episodes processed: {ep_cnt}, items saved: {len(all_items)} โ {out_json}\")\n\n# =========================\n# Main entry point\n# =========================\ndef main():\n # Adjust path pattern if needed\n filenames = tf.io.gfile.glob('gs://gresearch/android_control/android_control*')\n ds = tf.data.TFRecordDataset(filenames, compression_type='GZIP')\n export_messages(ds, limit_episodes=50, out_json=\"and_ctrl.json\", image_dir=\"and_ctrl\")\n\nif __name__ == \"__main__\":\n main()"
|
| 32 |
},
|
| 33 |
{
|
| 34 |
"cell_type": "code",
|
|
|
|
| 52 |
"source": [
|
| 53 |
"!gsutil du -sh gs://gresearch/android_control/android_control*\n"
|
| 54 |
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 55 |
}
|
| 56 |
],
|
| 57 |
"metadata": {
|