Spaces:
Runtime error
Runtime error
| import os | |
| from functools import lru_cache | |
| from typing import Dict, List | |
| import plotly.express as px | |
| import streamlit as st | |
| from datasets import Dataset, get_dataset_infos, load_dataset | |
| BASE_DATASET: str = "lion-ai/pl_med_data" | |
| read_key = os.environ.get('HF_TOKEN', None) | |
| dataset_names_map: Dict[str, str] = { | |
| "znany_lekarz": "Porady - pytania i odpowiedzi", | |
| "kor_epikryzy_qa": "Dokumentacja medyczna - pytania i odpowiedzi", | |
| "wikipedia": "Ogólna wiedza medyczna - pytania i opowiedzi", | |
| } | |
| reverse_dataset_names_map: Dict[str, str] = {v: k for k, v in dataset_names_map.items()} | |
| def list_datasets() -> Dict[str, Dataset]: | |
| """ | |
| Retrieves a list of dataset information. | |
| Returns: | |
| List[Dict[str, str]]: A list of dataset information. | |
| """ | |
| return get_dataset_infos(BASE_DATASET, token=read_key) | |
| def show_examples(dataset_name: str, split: str) -> None: | |
| dataset_name = reverse_dataset_names_map.get(dataset_name, dataset_name) | |
| dataset: Dataset = load_dataset(BASE_DATASET, dataset_name, split=f"{split}[:10]", token=read_key) | |
| st.data_editor(dataset.to_pandas(), use_container_width=True) | |
| def count_all_examples(datasets: Dict[str, Dataset]) -> None: | |
| count: int = 0 | |
| for dataset_name, dataset_info in datasets.items(): | |
| count += dataset_info.num_examples | |
| st.metric(label="Total no. of instructions", value=f"{count:,}") | |
| def filter_splits(dataset: Dict[str, Dataset], split: str) -> Dict[str, Dataset]: | |
| """ | |
| Filter the dataset based on the specified split. | |
| Args: | |
| dataset (Dict[str, Dataset]): A dictionary containing dataset information. | |
| split (str): The split to filter the dataset by. | |
| Returns: | |
| Dict[str, Dataset]: A dictionary containing the filtered dataset splits. | |
| """ | |
| dataset_splits: Dict[str, Dataset] = {} | |
| for dataset_name, dataset_info in dataset.items(): | |
| if split in dataset_info.splits: | |
| dataset_name = dataset_names_map.get(dataset_name, dataset_name) | |
| dataset_splits[dataset_name] = dataset_info.splits[split] | |
| return dataset_splits | |
| split: str = st.selectbox("splits", ["raw", "processed"]) | |
| datasets: Dict[str, Dataset] = list_datasets() | |
| # st.write(datasets) | |
| filtered_datasets: Dict[str, Dataset] = filter_splits(datasets, split) | |
| # st.write(filtered_datasets) | |
| count_all_examples(filtered_datasets) | |
| # Create a pie chart showing the number of examples per dataset | |
| fig = px.pie( | |
| values=[split.num_examples for split in filtered_datasets.values()], | |
| names=list(filtered_datasets.keys()), | |
| # title=f"Number of Examples per Dataset ({split} split)", | |
| labels={"label": "Dataset", "value": "Number of Examples"}, | |
| ) | |
| # Update layout for better readability | |
| fig.update_traces(textposition="inside", textinfo="value+label") | |
| fig.update_layout(legend_title_text="Datasets", uniformtext_minsize=12, uniformtext_mode="hide") | |
| chart = st.plotly_chart(fig, use_container_width=True) | |
| dataset_name = st.selectbox("Select a dataset", list(filtered_datasets.keys())) | |
| show_examples(dataset_name, split) |