import streamlit as st from transformers import pipeline # Function to get response from LLaMA 2 model from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf") model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf") def getLLamaresponse(input_text, keywords, blog_style): # Load the LLaMA 2 model from Hugging Face #model_name = model #llm = pipeline('text-generation', model=model_name) # Prompt Template template = """ Generate project idea for {blog_style} by using keywords like {keywords} for the profession of {input_text}. """ # Format the prompt prompt = template.format(blog_style=blog_style, input_text=input_text, keywords=keywords) # Generate the response from the LLaMA 2 model response = model(prompt, max_length=250, temperature=0.01) return response[0]['generated_text'] st.set_page_config(page_title="Generate Project Idea", page_icon='🤖', layout='centered', initial_sidebar_state='collapsed') st.header("Generate Project Idea 🤖") input_text = st.text_input("Enter the Topic") # Creating two more columns for additional fields col1, col2 = st.columns([5, 5]) with col1: no_words = st.text_input('Keywords') with col2: blog_style = st.selectbox('Generating project idea for', ('Researchers', 'Data Scientist', 'Software Developer', 'Common People', " "), index=0) submit = st.button("Generate") # Final response if submit: response = getLLamaresponse(input_text, no_words, blog_style) st.write(response)