Spaces:
Running
Running
| import requests | |
| import os | |
| from transformers import Tool | |
| # Import other necessary libraries if needed | |
| class TextGenerationTool(Tool): | |
| name = "text_generator" | |
| description = ( | |
| "This is a tool for text generation. It takes a prompt as input and returns the generated text." | |
| ) | |
| inputs = ["text"] | |
| outputs = ["text"] | |
| def __call__(self, prompt: str): | |
| API_URL = "https://api-inference.huggingface.co/models/openchat/openchat_3.5" | |
| headers = {"Authorization": "Bearer " + os.environ['hf']} | |
| payload = { | |
| "inputs": prompt # Adjust this based on your model's input format | |
| } | |
| payload = { | |
| "inputs": "Can you please let us know more details about your ", | |
| } | |
| #def query(payload): | |
| generated_text = requests.post(API_URL, headers=headers, json=payload).json() | |
| print(generated_text) | |
| return generated_text["text"] | |
| # Define the payload for the request | |
| #payload = { | |
| # "inputs": prompt # Adjust this based on your model's input format | |
| #} | |
| # Make the request to the API | |
| #generated_text = requests.post(API_URL, headers=headers, json=payload).json() | |
| # Extract and return the generated text | |
| #return generated_text["generated_text"] | |
| # Uncomment and customize the following lines based on your text generation needs | |
| # text_generator = pipeline(model="gpt2") | |
| # generated_text = text_generator(prompt, max_length=500, num_return_sequences=1, temperature=0.7) | |
| # Print the generated text if needed | |
| # print(generated_text) | |