Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -56,7 +56,7 @@ print(HF_TOKEN)
|
|
56 |
|
57 |
|
58 |
m = AutoModelForCausalLM.from_pretrained(
|
59 |
-
"
|
60 |
torch_dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32,
|
61 |
trust_remote_code=True,token=HF_TOKEN
|
62 |
)
|
@@ -64,7 +64,7 @@ m = AutoModelForCausalLM.from_pretrained(
|
|
64 |
embedding_func=m.get_input_embeddings()
|
65 |
embedding_func.weight.requires_grad=False
|
66 |
|
67 |
-
tok = AutoTokenizer.from_pretrained("
|
68 |
trust_remote_code=True,token=HF_TOKEN
|
69 |
)
|
70 |
tok.padding_side = "left"
|
@@ -100,7 +100,7 @@ def embedding_shift(original_embedding,shift_embeddings,prefix_embedding,suffix_
|
|
100 |
)
|
101 |
return input_embeddings
|
102 |
|
103 |
-
@spaces.GPU(duration=
|
104 |
def engine(input_embeds):
|
105 |
m.to("cuda")
|
106 |
output_text = []
|
|
|
56 |
|
57 |
|
58 |
m = AutoModelForCausalLM.from_pretrained(
|
59 |
+
"ibm-granite/granite-guardian-3.0-2b",
|
60 |
torch_dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32,
|
61 |
trust_remote_code=True,token=HF_TOKEN
|
62 |
)
|
|
|
64 |
embedding_func=m.get_input_embeddings()
|
65 |
embedding_func.weight.requires_grad=False
|
66 |
|
67 |
+
tok = AutoTokenizer.from_pretrained("ibm-granite/granite-guardian-3.0-2b",
|
68 |
trust_remote_code=True,token=HF_TOKEN
|
69 |
)
|
70 |
tok.padding_side = "left"
|
|
|
100 |
)
|
101 |
return input_embeddings
|
102 |
|
103 |
+
@spaces.GPU(duration=30)
|
104 |
def engine(input_embeds):
|
105 |
m.to("cuda")
|
106 |
output_text = []
|