Spaces:
Sleeping
Sleeping
| import os | |
| os.environ["MPLCONFIGDIR"] = "/tmp" | |
| os.environ["STREAMLIT_BROWSER_GATHER_USAGE_STATS"] = "false" | |
| os.environ["STREAMLIT_SERVER_HEADLESS"] = "true" | |
| os.environ["STREAMLIT_SERVER_ENABLE_FILE_WATCHER"] = "false" | |
| os.environ["STREAMLIT_CONFIG_DIR"] = "/tmp/.streamlit" | |
| os.environ["HF_HOME"] = "/tmp/huggingface" | |
| os.environ["TRANSFORMERS_CACHE"] = "/tmp/huggingface" | |
| import streamlit as st | |
| import torch | |
| from transformers import AutoModelForCausalLM, AutoTokenizer | |
| # App config and title | |
| st.set_page_config(page_title="DeepSeek-R1 Chatbot", page_icon="π€") | |
| st.title("π§ DeepSeek-R1 CPU Chatbot") | |
| st.caption("Running entirely on CPU using Hugging Face Transformers") | |
| def load_model(): | |
| tokenizer = AutoTokenizer.from_pretrained("deepseek-ai/DeepSeek-Coder-1.3B-base") | |
| model = AutoModelForCausalLM.from_pretrained("deepseek-ai/DeepSeek-Coder-1.3B-base") | |
| return tokenizer, model | |
| tokenizer, model = load_model() | |
| user_input = st.text_area("π₯ Enter your prompt here:", "Explain what a neural network is.") | |
| if st.button("π§ Generate Response"): | |
| with st.spinner("Thinking..."): | |
| inputs = tokenizer(user_input, return_tensors="pt") | |
| outputs = model.generate(**inputs, max_new_tokens=100) | |
| response = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| st.markdown("### π€ Response:") | |
| st.write(response) | |