habdine commited on
Commit
ea465a5
·
verified ·
1 Parent(s): d39085a

Upload 7 files

Browse files
Files changed (8) hide show
  1. .gitattributes +2 -0
  2. README.md +6 -7
  3. app.py +313 -0
  4. logo_dascim.png +3 -0
  5. model.png +3 -0
  6. pre-commit-config.yaml +60 -0
  7. requirements.txt +8 -0
  8. style.css +188 -0
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ logo_dascim.png filter=lfs diff=lfs merge=lfs -text
37
+ model.png filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,14 +1,13 @@
1
  ---
2
- title: Prot2Text V2
3
- emoji: 🏢
4
- colorFrom: red
5
- colorTo: indigo
6
  sdk: gradio
7
- sdk_version: 5.49.1
8
  app_file: app.py
9
  pinned: false
10
- license: apache-2.0
11
- short_description: Protein Function
12
  ---
13
 
14
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Prot2Text-V2
3
+ emoji: 🧬
4
+ colorFrom: indigo
5
+ colorTo: pink
6
  sdk: gradio
7
+ sdk_version: 5.1.0
8
  app_file: app.py
9
  pinned: false
10
+ short_description: Protein
 
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,313 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from threading import Thread
2
+ from typing import Iterator, List, Tuple
3
+
4
+ import gradio as gr
5
+ from gradio.themes import Soft
6
+ import spaces
7
+ import torch
8
+ from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
9
+
10
+ TEAM_LOGO_URL = "http://nlp.polytechnique.fr/static/images/logo_dascim.png"
11
+ PROTEIN_VISUAL_URL = "https://cas-bridge.xethub.hf.co/xet-bridge-us/68e677c594d3f20bbeecf13c/7cff6ae021d7c518ee4e2fcb70490516ad9e4999ec75c6a5dd164cc6ca64ae30?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Content-Sha256=UNSIGNED-PAYLOAD&X-Amz-Credential=cas%2F20251023%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20251023T094659Z&X-Amz-Expires=3600&X-Amz-Signature=6a7598d77a46df971e88e1f378bc5e06794a3893f31319a6ab3431e4323d755c&X-Amz-SignedHeaders=host&X-Xet-Cas-Uid=66448b4fecac3bc79b26304f&response-content-disposition=inline%3B+filename*%3DUTF-8%27%27model.png%3B+filename%3D%22model.png%22%3B&response-content-type=image%2Fpng&x-id=GetObject&Expires=1761216419&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTc2MTIxNjQxOX19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2FzLWJyaWRnZS54ZXRodWIuaGYuY28veGV0LWJyaWRnZS11cy82OGU2NzdjNTk0ZDNmMjBiYmVlY2YxM2MvN2NmZjZhZTAyMWQ3YzUxOGVlNGUyZmNiNzA0OTA1MTZhZDllNDk5OWVjNzVjNmE1ZGQxNjRjYzZjYTY0YWUzMCoifV19&Signature=YjrX1ZF%7EX1qw-m2nWOY8AxdSXwbrsidvlTZ5YWXZx3UPv0my0u68lWcpWIpIxzkGeWTtWPvlCfMcmnpmmwS2wHexorhgq9c7%7E3Ghw20evO0EMPvHBwP4vWYmXW8nHBqqqbw8Qy1pojDm9TvXV19O4-fCFxPi1aQ5FOTC2Kmn9gKxW%7EAN7vkWnfhU8QcCf18139hMbUvh9YoJ%7EesOWXoCFWgAbyz%7Eroajt5e3oM9b-IsU%7E2-UzMZ4%7EMA2MSOFmg487bhZDbr2IMD15-8O0jzWu3qyO3T1H06S-9kTdI%7EC6AYtXUY8YtSWKw%7EBzhARjXK6%7EuZ3c3kE1V7%7EdnLl1YM-2w__&Key-Pair-Id=K2L8F4GPSG1IFC"
12
+
13
+ PROTEIN_HERO = f"""
14
+ <div class="visual-card hero-card">
15
+ <img src="{PROTEIN_VISUAL_URL}" alt="Protein rendering" class="protein-visual">
16
+ </div>
17
+ """
18
+
19
+ DESCRIPTION = f"""\
20
+ ### Prot2Text-V2 Demo
21
+
22
+ {PROTEIN_HERO}
23
+
24
+ Prot2Text-V2 treats a protein sequence as if it were another language and translates it into English. Supply a raw amino acid sequence and the model returns a clear, human-readable paragraph describing what the protein does.
25
+
26
+ The paper describing Prot2Text-V2 has been accepted to the NeurIPS 2025 main conference and pairs fast experimentation with explainability-minded outputs.
27
+
28
+ - **Input**: protein sequence using IUPAC single-letter amino acid codes (20 canonical amino acids).
29
+ - **Output**: polished descriptions of predicted function, localization cues, and structural hints.
30
+ - **Why it matters**: accelerate protein characterization, lab annotations, or downstream hypothesis building.
31
+
32
+ **Model architecture at a glance**
33
+ - Protein language model encoder: facebook/esm2_t36_3B_UR50D.
34
+ - Modality adapter: lightweight bridge aligning protein embeddings with the language model.
35
+ - Natural language decoder: meta-llama/Llama-3.1-8B-Instruct for articulate descriptions.
36
+
37
+ **Resources**
38
+ - [Paper (NeurIPS 2025)](https://arxiv.org/abs/2505.11194)
39
+ - [Code repository](https://github.com/ColinFX/Prot2Text-V2)
40
+ - [Training data](https://huggingface.co/datasets/habdine/Prot2Text-Data)
41
+ """
42
+
43
+ EXAMPLE_SEQUENCES = [
44
+ ["AEQAERYEEMVEFMEKL"],
45
+ [
46
+ "MAVVLPAVVEELLSEMAAAVQESARIPDEYLLSLKFLFGSSATQALDLVDRQSITLISSPSGRRVYQVLGSSSKTYTCLASCHYCSCPAFAFSVLRKSDSILCKHLLAVYLSQVMRTCQQLSVSDKQLTDILLMEKKQEA"
47
+ ],
48
+ [
49
+ "MCYSANGNTFLIVDNTQKRIPEEKKPDFVRENVGDLDGVIFVELVDGKYFMDYYNRDGSMAAFCGNGARAFSQYLIDRGWIKEKEFTFLSRAGEIKVIVDDSIWVRMPGVSEKKEMKVDGYEGYFVVVGVPHFVMEVKGIDELDVEKLGRDLRYKTGANVDFYEVLPDRLKVRTYERGVERETKACGTGVTSVFVVYRDKTGAKEVKIQVPGGTLFLKEENGEIFLRGDVKRCSEE"
50
+ ],
51
+ [
52
+ "MTQEERFEQRIAQETAIEPQDWMPDAYRKTLIRQIGQHAHSEIVGMLPEGNWITRAPTLRRKAILLAKVQDEAGHGLYLYSAAETLGCAREDIYQKMLDGRMKYSSIFNYPTLSWADIGVIGWLVDGAAIVNQVALCRTSYGPYARAMVKICKEESFHQRQGFEACMALAQGSEAQKQMLQDAINRFWWPALMMFGPNDDNSPNSARSLTWKIKRFTNDELRQRFVDNTVPQVEMLGMTVPDPDLHFDTESGHYRFGEIDWQEFNEVINGRGICNQERLDAKRKAWEEGTWVREAALAHAQKQHARKVA"
53
+ ],
54
+ [
55
+ "MTTRMIILNGGSSAGKSGIVRCLQSVLPEPWLAFGVDSLIEAMPLKMQSAEGGIEFDADGGVSIGPEFRALEGAWAEGVVAMARAGARIIIDDVFLGGAAAQERWRSFVGDLDVLWVGVRCDGAVAEGRETARGDRVAGMAAKQAYVVHEGVEYDVEVDTTHKESIECAWAIAAHVVP"
56
+ ],
57
+ ]
58
+
59
+ MAX_MAX_NEW_TOKENS = 256
60
+ DEFAULT_MAX_NEW_TOKENS = 100
61
+
62
+
63
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
64
+
65
+ system_message = (
66
+ "You are a scientific assistant specialized in protein function "
67
+ "predictions. Given the sequence embeddings and other information "
68
+ "of a protein, describe its function clearly and concisely in "
69
+ "professional language. "
70
+ )
71
+
72
+ placeholder = '<|reserved_special_token_1|>'
73
+
74
+ esm_tokenizer = AutoTokenizer.from_pretrained("facebook/esm2_t36_3B_UR50D")
75
+ llama_tokenizer = AutoTokenizer.from_pretrained(
76
+ pretrained_model_name_or_path="meta-llama/Llama-3.1-8B-Instruct",
77
+ pad_token='<|reserved_special_token_0|>'
78
+ )
79
+ model = AutoModelForCausalLM.from_pretrained('xiao-fei/Prot2Text-V2-11B-Instruct-hf',
80
+ trust_remote_code=True,
81
+ torch_dtype=torch.bfloat16,).to(device)
82
+ model.eval()
83
+
84
+
85
+ @spaces.GPU(duration=90)
86
+ def stream_response(
87
+ message: str,
88
+ max_new_tokens: int = 1024,
89
+ do_sample: bool = False,
90
+ temperature: float = 0.6,
91
+ top_p: float = 0.9,
92
+ top_k: int = 50,
93
+ repetition_penalty: float = 1.2,
94
+ ) -> Iterator[str]:
95
+
96
+
97
+ streamer = TextIteratorStreamer(llama_tokenizer, timeout=20.0, skip_prompt=True, skip_special_tokens=True)
98
+
99
+ user_message = "Sequence embeddings: " + placeholder * (len(message)+2)
100
+ tokenized_prompt = llama_tokenizer.apply_chat_template(
101
+ [
102
+ {"role": "system", "content": system_message},
103
+ {"role": "user", "content": user_message}
104
+ ],
105
+ add_generation_prompt=True,
106
+ tokenize=True,
107
+ return_tensors="pt",
108
+ return_dict=True
109
+ )
110
+ tokenized_sequence = esm_tokenizer(
111
+ message,
112
+ return_tensors="pt"
113
+ )
114
+ model.eval()
115
+ generate_kwargs = dict(
116
+ inputs=tokenized_prompt["input_ids"].to(model.device),
117
+ attention_mask=tokenized_prompt["attention_mask"].to(model.device),
118
+ protein_input_ids=tokenized_sequence["input_ids"].to(model.device),
119
+ protein_attention_mask=tokenized_sequence["attention_mask"].to(model.device),
120
+ eos_token_id=128009,
121
+ pad_token_id=128002,
122
+ return_dict_in_generate=False,
123
+ num_beams=1,
124
+ # device=device,
125
+ streamer=streamer,
126
+ max_new_tokens=max_new_tokens,
127
+ do_sample=do_sample,
128
+ top_p=top_p,
129
+ top_k=top_k,
130
+ temperature=temperature,
131
+ repetition_penalty=repetition_penalty,
132
+ )
133
+ t = Thread(target=model.generate, kwargs=generate_kwargs)
134
+ t.start()
135
+
136
+ outputs = []
137
+ for text in streamer:
138
+ outputs.append(text)
139
+ yield "".join(outputs)
140
+
141
+
142
+ ChatHistory = List[Tuple[str, str]]
143
+
144
+
145
+ def handle_submit(
146
+ message: str,
147
+ history: ChatHistory,
148
+ max_new_tokens: int,
149
+ do_sample: bool,
150
+ temperature: float,
151
+ top_p: float,
152
+ top_k: int,
153
+ repetition_penalty: float,
154
+ ):
155
+ history = list(history or [])
156
+ message = message.strip()
157
+ if not message:
158
+ return
159
+
160
+ conversation = history.copy()
161
+ conversation.append((message, ""))
162
+
163
+ for partial_response in stream_response(
164
+ message=message,
165
+ max_new_tokens=max_new_tokens,
166
+ do_sample=do_sample,
167
+ temperature=temperature,
168
+ top_p=top_p,
169
+ top_k=top_k,
170
+ repetition_penalty=repetition_penalty,
171
+ ):
172
+ conversation[-1] = (message, partial_response)
173
+ snapshot = conversation.copy()
174
+ yield snapshot, snapshot, gr.update(value="")
175
+
176
+
177
+ def clear_conversation():
178
+ empty_history: ChatHistory = []
179
+ return empty_history, empty_history, gr.update(value="")
180
+
181
+ theme = Soft(
182
+ primary_hue="slate",
183
+ secondary_hue="stone",
184
+ neutral_hue="gray",
185
+ )
186
+
187
+ with gr.Blocks(theme=theme, css_paths="style.css", fill_height=True) as demo:
188
+ with gr.Row(equal_height=True):
189
+ with gr.Column(scale=5, min_width=320):
190
+ gr.HTML(
191
+ f"""
192
+ <div class="brand-header center">
193
+ <a href="https://www.lix.polytechnique.fr/dascim/" target="_blank" rel="noopener">
194
+ <img src="{TEAM_LOGO_URL}" alt="DASCIM team logo" class="team-logo">
195
+ </a>
196
+ </div>
197
+ """
198
+ )
199
+ gr.Markdown(DESCRIPTION)
200
+ with gr.Column(scale=7, min_width=400, elem_classes="interaction-column"):
201
+ history_state = gr.State([])
202
+
203
+
204
+ chatbot = gr.Chatbot(
205
+ label="Generated Function",
206
+ height=350,
207
+ show_copy_button=True,
208
+ )
209
+ with gr.Group(elem_classes="input-card"):
210
+ sequence_input = gr.Textbox(
211
+ placeholder="Paste your amino acid sequence here (e.g. MAVVLPAVVEELLSEMAAAVQESA...)",
212
+ label="Protein sequence",
213
+ lines=1,
214
+ max_lines=1,
215
+ autofocus=True,
216
+ )
217
+ with gr.Row(elem_classes="button-row"):
218
+ submit_button = gr.Button("Predict function", variant="primary", elem_classes="primary-btn")
219
+ stop_button = gr.Button("Stop generation", variant="stop", elem_classes="stop-btn")
220
+ gr.Examples(
221
+ examples=EXAMPLE_SEQUENCES,
222
+ inputs=sequence_input,
223
+ label="Sample sequences",
224
+ cache_examples=False,
225
+ run_on_click=False,
226
+ )
227
+
228
+ with gr.Accordion("Generation controls", open=False):
229
+ max_new_tokens_slider = gr.Slider(
230
+ label="Max new tokens",
231
+ minimum=1,
232
+ maximum=MAX_MAX_NEW_TOKENS,
233
+ step=1,
234
+ value=DEFAULT_MAX_NEW_TOKENS,
235
+ )
236
+ do_sample_checkbox = gr.Checkbox(label="Enable sampling", value=False)
237
+ temperature_slider = gr.Slider(
238
+ label="Temperature",
239
+ minimum=0.1,
240
+ maximum=4.0,
241
+ step=0.1,
242
+ value=0.6,
243
+ )
244
+ top_p_slider = gr.Slider(
245
+ label="Top-p (nucleus sampling)",
246
+ minimum=0.05,
247
+ maximum=1.0,
248
+ step=0.05,
249
+ value=0.9,
250
+ )
251
+ top_k_slider = gr.Slider(
252
+ label="Top-k",
253
+ minimum=1,
254
+ maximum=1000,
255
+ step=1,
256
+ value=50,
257
+ )
258
+ repetition_penalty_slider = gr.Slider(
259
+ label="Repetition penalty",
260
+ minimum=1.0,
261
+ maximum=2.0,
262
+ step=0.05,
263
+ value=1.0,
264
+ )
265
+
266
+ enter_event = sequence_input.submit(
267
+ handle_submit,
268
+ inputs=[
269
+ sequence_input,
270
+ history_state,
271
+ max_new_tokens_slider,
272
+ do_sample_checkbox,
273
+ temperature_slider,
274
+ top_p_slider,
275
+ top_k_slider,
276
+ repetition_penalty_slider,
277
+ ],
278
+ outputs=[chatbot, history_state, sequence_input],
279
+ queue=True,
280
+ )
281
+
282
+ submit_event = submit_button.click(
283
+ handle_submit,
284
+ inputs=[
285
+ sequence_input,
286
+ history_state,
287
+ max_new_tokens_slider,
288
+ do_sample_checkbox,
289
+ temperature_slider,
290
+ top_p_slider,
291
+ top_k_slider,
292
+ repetition_penalty_slider,
293
+ ],
294
+ outputs=[chatbot, history_state, sequence_input],
295
+ queue=True,
296
+ )
297
+
298
+ stop_button.click(
299
+ None,
300
+ inputs=None,
301
+ outputs=None,
302
+ cancels=[submit_event, enter_event],
303
+ )
304
+ with gr.Accordion("Model & usage notes", open=False):
305
+ gr.Markdown(
306
+ "- **Model stack**: Facebook ESM2 encoder + Llama 3.1 8B instruction-tuned decoder.\n"
307
+ "- **Token budget**: the generator truncates after the configured `Max new tokens`.\n"
308
+ "- **Attribution**: Outputs are predictions; validate experimentally before publication.\n"
309
+ )
310
+ gr.DuplicateButton(value="Duplicate Space for private use", elem_id="duplicate-button")
311
+
312
+ if __name__ == "__main__":
313
+ demo.queue(max_size=20).launch()
logo_dascim.png ADDED

Git LFS Details

  • SHA256: 2feefdd5320a2c1af774492f057a8d216e12f0bf719736fd532e9ab5294e9f81
  • Pointer size: 131 Bytes
  • Size of remote file: 200 kB
model.png ADDED

Git LFS Details

  • SHA256: faff935131de00279a4a6232ff4ae1fdaa73fd8ed15f35bfd64b84298f45bcdd
  • Pointer size: 131 Bytes
  • Size of remote file: 451 kB
pre-commit-config.yaml ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ repos:
2
+ - repo: https://github.com/pre-commit/pre-commit-hooks
3
+ rev: v4.6.0
4
+ hooks:
5
+ - id: check-executables-have-shebangs
6
+ - id: check-json
7
+ - id: check-merge-conflict
8
+ - id: check-shebang-scripts-are-executable
9
+ - id: check-toml
10
+ - id: check-yaml
11
+ - id: end-of-file-fixer
12
+ - id: mixed-line-ending
13
+ args: ["--fix=lf"]
14
+ - id: requirements-txt-fixer
15
+ - id: trailing-whitespace
16
+ - repo: https://github.com/myint/docformatter
17
+ rev: v1.7.5
18
+ hooks:
19
+ - id: docformatter
20
+ args: ["--in-place"]
21
+ - repo: https://github.com/pycqa/isort
22
+ rev: 5.13.2
23
+ hooks:
24
+ - id: isort
25
+ args: ["--profile", "black"]
26
+ - repo: https://github.com/pre-commit/mirrors-mypy
27
+ rev: v1.10.1
28
+ hooks:
29
+ - id: mypy
30
+ args: ["--ignore-missing-imports"]
31
+ additional_dependencies:
32
+ [
33
+ "types-python-slugify",
34
+ "types-requests",
35
+ "types-PyYAML",
36
+ "types-pytz",
37
+ ]
38
+ - repo: https://github.com/psf/black
39
+ rev: 24.4.2
40
+ hooks:
41
+ - id: black
42
+ language_version: python3.10
43
+ args: ["--line-length", "119"]
44
+ - repo: https://github.com/kynan/nbstripout
45
+ rev: 0.7.1
46
+ hooks:
47
+ - id: nbstripout
48
+ args:
49
+ [
50
+ "--extra-keys",
51
+ "metadata.interpreter metadata.kernelspec cell.metadata.pycharm",
52
+ ]
53
+ - repo: https://github.com/nbQA-dev/nbQA
54
+ rev: 1.8.5
55
+ hooks:
56
+ - id: nbqa-black
57
+ - id: nbqa-pyupgrade
58
+ args: ["--py37-plus"]
59
+ - id: nbqa-isort
60
+ args: ["--float-to-top"]
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ accelerate>=0.25.0
2
+ einops>=0.7.0
3
+ gradio>=5.0.0
4
+ huggingface-hub>=0.23.0
5
+ mistral-common>=1.4.0
6
+ sentencepiece>=0.1.99
7
+ torch>=2.1.0
8
+ transformers>=4.38.0
style.css ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ body {
2
+ background: radial-gradient(circle at top, #f7f7f8 0%, #e6e7e9 45%, #d6d8da 100%);
3
+ color: #2e3238;
4
+ }
5
+
6
+ .gradio-container.dark {
7
+ background: radial-gradient(circle at top, #1a1b1d 0%, #161719 50%, #121315 100%);
8
+ color: #f1f2f4;
9
+ }
10
+
11
+ #duplicate-button {
12
+ margin: auto;
13
+ color: #2e3238;
14
+ background: linear-gradient(135deg, #dadde1 0%, #c6c9cd 100%);
15
+ border-radius: 100vh;
16
+ border: 1px solid rgba(57, 62, 69, 0.25);
17
+ }
18
+
19
+ .gradio-container.dark #duplicate-button {
20
+ color: #f3f4f6;
21
+ background: linear-gradient(135deg, #2e3034 0%, #3a3d43 100%);
22
+ border: 1px solid rgba(176, 182, 190, 0.25);
23
+ }
24
+
25
+ .brand-header {
26
+ display: flex;
27
+ align-items: center;
28
+ justify-content: center;
29
+ margin: 0 0 1.25rem 0;
30
+ }
31
+
32
+ .interaction-column {
33
+ gap: 1rem;
34
+ }
35
+
36
+ .visual-card {
37
+ display: flex;
38
+ align-items: center;
39
+ justify-content: center;
40
+ background: linear-gradient(120deg, rgba(103, 109, 116, 0.18), rgba(189, 193, 198, 0.24));
41
+ border-radius: 20px;
42
+ padding: 0.75rem;
43
+ box-shadow: 0 12px 30px rgba(35, 43, 55, 0.16);
44
+ max-width: 100%;
45
+ margin: 0 auto;
46
+ }
47
+
48
+ .gradio-container.dark .visual-card {
49
+ background: linear-gradient(120deg, rgba(58, 60, 64, 0.45), rgba(32, 34, 37, 0.45));
50
+ box-shadow: 0 16px 32px rgba(0, 0, 0, 0.32);
51
+ }
52
+
53
+ .visual-card img.protein-visual {
54
+ width: auto;
55
+ max-width: 100%;
56
+ border-radius: 12px;
57
+ box-shadow: 0 8px 20px rgba(40, 50, 60, 0.24);
58
+ }
59
+
60
+ .gradio-container.dark .visual-card img.protein-visual {
61
+ box-shadow: 0 10px 24px rgba(0, 0, 0, 0.45);
62
+ }
63
+
64
+ .hero-card {
65
+ margin-bottom: 1.25rem;
66
+ }
67
+
68
+ .brand-header.center {
69
+ margin: 0 auto 1.5rem auto;
70
+ }
71
+
72
+ .input-card {
73
+ padding: 0.85rem;
74
+ background: transparent;
75
+ border-radius: 16px;
76
+ border: 1px solid rgba(45, 52, 60, 0.12);
77
+ box-shadow: none;
78
+ }
79
+
80
+ .gradio-container.dark .input-card {
81
+ border: 1px solid rgba(142, 148, 158, 0.25);
82
+ }
83
+
84
+ .input-card .gr-examples {
85
+ margin-bottom: 1rem;
86
+ background: transparent;
87
+ box-shadow: none;
88
+ }
89
+
90
+ .input-card .gr-examples>div,
91
+ .input-card .gr-examples table {
92
+ background: transparent !important;
93
+ }
94
+
95
+ .input-card .gr-textbox textarea {
96
+ border-radius: 10px;
97
+ border: 1px solid rgba(60, 67, 78, 0.45);
98
+ box-shadow: none;
99
+ resize: none;
100
+ height: 42px;
101
+ padding: 0.45rem 0.75rem;
102
+ }
103
+
104
+ .input-card .gr-textbox textarea:focus-visible {
105
+ outline: 2px solid rgba(60, 67, 78, 0.45);
106
+ }
107
+
108
+ .gradio-container.dark .input-card .gr-textbox textarea {
109
+ background: rgba(28, 29, 31, 0.85);
110
+ border: 1px solid rgba(149, 155, 166, 0.4);
111
+ color: #f5f6f8;
112
+ }
113
+
114
+ .gradio-container.dark .input-card .gr-textbox textarea:focus-visible {
115
+ outline: 2px solid rgba(193, 197, 205, 0.55);
116
+ }
117
+
118
+ .button-row {
119
+ display: flex;
120
+ gap: 0.75rem;
121
+ margin: 0.75rem 0;
122
+ }
123
+
124
+ .primary-btn,
125
+ .stop-btn {
126
+ display: inline-flex;
127
+ border-radius: 999px !important;
128
+ overflow: hidden;
129
+ }
130
+
131
+ .primary-btn button,
132
+ .stop-btn button {
133
+ border-radius: 999px !important;
134
+ border: none;
135
+ padding: 0.55rem 1.4rem;
136
+ transition: transform 0.12s ease, box-shadow 0.12s ease;
137
+ }
138
+
139
+ .primary-btn button {
140
+ background: rgba(255, 255, 255, 0.75);
141
+ color: #2d3138;
142
+ border: 1px solid rgba(60, 67, 78, 0.4);
143
+ box-shadow: 0 6px 14px rgba(49, 54, 61, 0.15);
144
+ }
145
+
146
+ .primary-btn button:hover {
147
+ background: rgba(255, 255, 255, 0.88);
148
+ }
149
+
150
+ .stop-btn button {
151
+ background: rgba(250, 232, 230, 0.75);
152
+ color: #7c2f2c;
153
+ border: 1px solid rgba(141, 61, 56, 0.45);
154
+ box-shadow: 0 6px 14px rgba(120, 54, 50, 0.18);
155
+ }
156
+
157
+ .stop-btn button:hover {
158
+ background: rgba(247, 219, 215, 0.92);
159
+ }
160
+
161
+ .gradio-container.dark .primary-btn button {
162
+ background: rgba(53, 56, 60, 0.85);
163
+ color: #f1f3f5;
164
+ border: 1px solid rgba(167, 173, 183, 0.45);
165
+ box-shadow: 0 8px 18px rgba(10, 12, 14, 0.45);
166
+ }
167
+
168
+ .gradio-container.dark .primary-btn button:hover {
169
+ background: rgba(64, 68, 74, 0.92);
170
+ }
171
+
172
+ .gradio-container.dark .stop-btn button {
173
+ background: rgba(96, 52, 48, 0.75);
174
+ color: #f7ddda;
175
+ border: 1px solid rgba(181, 96, 90, 0.6);
176
+ box-shadow: 0 8px 18px rgba(0, 0, 0, 0.45);
177
+ }
178
+
179
+ .gradio-container.dark .stop-btn button:hover {
180
+ background: rgba(108, 63, 58, 0.9);
181
+ }
182
+
183
+ body,
184
+ .gr-block *,
185
+ .gradio-container * {
186
+ font-family: "Times New Roman", Times, serif;
187
+ font-size: 0.9rem;
188
+ }