Spaces:
Sleeping
Sleeping
from transformers import pipeline | |
from transformers import AutoTokenizer, AutoModelForSequenceClassification | |
import torch | |
import torch.nn.functional as F | |
import os | |
from huggingface_hub import login | |
def run_model(input_text) : | |
#classifier = pipeline('sentiment-analysis') | |
#response = classifier('I have been waiting for a HuggingFace course my whole life!') | |
#print(response) | |
#outputs = response | |
#generator = pipeline("text-generation", model = "distilgpt2") | |
#response = generator("In this course, we will teach you how to", | |
# max_length = 30, | |
# num_return_sequences = 2 | |
# ) | |
#print (response) | |
#classifier = pipeline("zero-shot-classification") | |
#response = classifier( | |
# "This is a course about Python list comprehension", | |
# candidate_labels = ["education", "politics", "business" | |
# ] | |
# ) | |
#print(response) | |
#tokenizer = AutoTokenizer.from_pretrained("D:\gouri_docs\generative_AI\llama\llama") | |
#model = AutoModelForSequenceClassification.from_pretrained("D:\gouri_docs\generative_AI\llama\llama") | |
##input_text = "Hello Llama! How are you?" | |
#inputs = tokenizer.encode(input_text, return_tensors="pt") | |
#outputs = model.generate( | |
# inputs, | |
# max_length=50, | |
# num_return_sequences=5, | |
# temperature=0.7 | |
# ) | |
#print("Generated Text:") | |
#for i, output in enumerate(outputs): | |
# print(f"{i}: {tokenizer.decode(output)}") | |
model_name = "meta-llama/Llama-2-70b-chat-hf" #"distilbert-base-uncased-finetuned-sst-2-english" | |
HF_TOKEN = os.environ["HF_TOKEN"] | |
#process.env.HF_ACCESS_TOKEN = HF_TOKEN | |
#os.environ["HF_ACCESS_TOKEN"] = HF_TOKEN | |
login(token = HF_TOKEN) | |
model = AutoModelForSequenceClassification.from_pretrained(model_name, token=HF_TOKEN) | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
text_Completion = pipeline( | |
'text-generation', #'sentiment-analysis', | |
model=model, | |
tokenizer=tokenizer) | |
x_train = ['I have been waiting for a HuggingFace course my whole life!', | |
'Python is great!'] | |
response = text_Completion(x_train) | |
print(response) | |
outputs = response | |
#batch = tokenizer(x_train, padding = True, truncation = True, max_length = 512, return_tensors = 'pt') | |
#print('\n\nbatch =', batch, '\n\n') | |
#with torch.no_grad(): | |
# outputs = model(**batch) | |
# print('outputs = ', outputs) | |
# predictions = F.softmax(outputs.logits, dim=1) | |
# print('predictions =', predictions) | |
# labels = torch.argmax(predictions, dim=1) | |
# print('labels =', labels, '\n\n') | |
return outputs |