Spaces:
Sleeping
Sleeping
XufengDuan
commited on
Commit
•
1ac6033
1
Parent(s):
9fca7e9
update scripts
Browse files- .DS_Store +0 -0
- src/backend/model_operations.py +4 -3
- src/envs.py +1 -0
.DS_Store
CHANGED
Binary files a/.DS_Store and b/.DS_Store differ
|
|
src/backend/model_operations.py
CHANGED
@@ -29,6 +29,7 @@ import src.envs as envs
|
|
29 |
# import scipy
|
30 |
from scipy.spatial.distance import jensenshannon
|
31 |
import numpy as np
|
|
|
32 |
|
33 |
|
34 |
|
@@ -45,7 +46,7 @@ logging.basicConfig(level=logging.INFO,
|
|
45 |
try:
|
46 |
nlp1 = spacy.load("en_core_web_trf")
|
47 |
except OSError:
|
48 |
-
print("Downloading language model for the spaCy '
|
49 |
spacy.cli.download("en_core_web_trf")
|
50 |
nlp = spacy.load("en_core_web_trf")
|
51 |
|
@@ -454,10 +455,10 @@ class SummaryGenerator:
|
|
454 |
# result = response['choices'][0]['message']['content']
|
455 |
# print(result)
|
456 |
from huggingface_hub import InferenceClient
|
457 |
-
|
458 |
client = InferenceClient(self.model_id,api_key=envs.TOKEN,headers={"X-use-cache": "false"})
|
459 |
messages = [{"role": "system", "content": system_prompt},{"role": "user", "content": user_prompt}]
|
460 |
-
outputs = client.chat_completion(messages, max_tokens=50)
|
461 |
result = None
|
462 |
while result is None:
|
463 |
outputs = client.chat_completion(messages, max_tokens=50)
|
|
|
29 |
# import scipy
|
30 |
from scipy.spatial.distance import jensenshannon
|
31 |
import numpy as np
|
32 |
+
import spacy_transformers
|
33 |
|
34 |
|
35 |
|
|
|
46 |
try:
|
47 |
nlp1 = spacy.load("en_core_web_trf")
|
48 |
except OSError:
|
49 |
+
print("Downloading language model for the spaCy 'en_core_web_trf'...")
|
50 |
spacy.cli.download("en_core_web_trf")
|
51 |
nlp = spacy.load("en_core_web_trf")
|
52 |
|
|
|
455 |
# result = response['choices'][0]['message']['content']
|
456 |
# print(result)
|
457 |
from huggingface_hub import InferenceClient
|
458 |
+
print("token_for_request:",envs.TOKEN)
|
459 |
client = InferenceClient(self.model_id,api_key=envs.TOKEN,headers={"X-use-cache": "false"})
|
460 |
messages = [{"role": "system", "content": system_prompt},{"role": "user", "content": user_prompt}]
|
461 |
+
# outputs = client.chat_completion(messages, max_tokens=50)
|
462 |
result = None
|
463 |
while result is None:
|
464 |
outputs = client.chat_completion(messages, max_tokens=50)
|
src/envs.py
CHANGED
@@ -6,6 +6,7 @@ from huggingface_hub import HfApi
|
|
6 |
# replace this with our token
|
7 |
# TOKEN = os.environ.get("HF_TOKEN", None)
|
8 |
TOKEN = os.getenv("H4_TOKEN")
|
|
|
9 |
# print(TOKEN)
|
10 |
# OWNER = "vectara"
|
11 |
# REPO_ID = f"{OWNER}/Humanlike"
|
|
|
6 |
# replace this with our token
|
7 |
# TOKEN = os.environ.get("HF_TOKEN", None)
|
8 |
TOKEN = os.getenv("H4_TOKEN")
|
9 |
+
print("token:", TOKEN)
|
10 |
# print(TOKEN)
|
11 |
# OWNER = "vectara"
|
12 |
# REPO_ID = f"{OWNER}/Humanlike"
|