import gradio as gr import os api_key = os.getenv("TOKEN") from huggingface_hub import login login() def greet(name): return "Hello " + name + "!!" from huggingface_hub import InferenceClient # Use a pipeline as a high-level helper from transformers import pipeline messages = [ {"role": "user", "content": "Who are you?"}, ] pipe = pipeline("text-generation", model="meta-llama/Llama-3.1-8B-Instruct",token=api_key) pipe(messages) #demo = gr.Interface(fn=greet, inputs="text", outputs="text") #demo.launch()