Doron Adler commited on
Commit
07c279b
โ€ข
1 Parent(s): 6522282

Updated App name and text to reflect the model being used

Browse files
Files changed (3) hide show
  1. README.md +1 -1
  2. app.py +6 -4
  3. requirements.txt +1 -1
README.md CHANGED
@@ -1,5 +1,5 @@
1
  ---
2
- title: Hebrew Poetry - GPT Neo (Small)
3
  emoji: ๐Ÿ“š
4
  colorFrom: blue
5
  colorTo: gray
 
1
  ---
2
+ title: Hebrew GPT Neo (Small)
3
  emoji: ๐Ÿ“š
4
  colorFrom: blue
5
  colorTo: gray
app.py CHANGED
@@ -74,10 +74,10 @@ def extend(input_text, max_size=20, top_k=50, top_p=0.95):
74
 
75
 
76
  if __name__ == "__main__":
77
- st.title("Hebrew Poetry - GPT Neo (Small)")
78
 
79
- model, tokenizer = load_model("Norod78/hebrew-gpt_neo-small")
80
- #model, tokenizer = load_model("Norod78/hebrew_poetry-gpt_neo-tiny")
81
 
82
  stop_token = "<|endoftext|>"
83
  new_lines = "\n\n\n"
@@ -101,7 +101,7 @@ if __name__ == "__main__":
101
  top_p = st.sidebar.slider("Top-P", 0.0, 1.0, 0.95, help="If set to float < 1, only the most probable tokens with probabilities that add up to top_p or higher are kept for generation.")
102
 
103
  st.markdown(
104
- """Hebrew poetry text generation model based on EleutherAI's gpt-neo. Each was trained on a TPUv3-8 which was made avilable to me via the [TPU Research Cloud Program](https://sites.research.google/trc/). """
105
  )
106
 
107
  prompt = "ื”ืื™ืฉ ื”ืื—ืจื•ืŸ ื‘ืขื•ืœื ื™ืฉื‘ ืœื‘ื“ ื‘ื—ื“ืจื• ื›ืฉืœืคืชืข ื ืฉืžืขื” ื ืงื™ืฉื”"
@@ -121,4 +121,6 @@ if __name__ == "__main__":
121
  st.markdown(f"<p dir=\"rtl\" style=\"text-align:right;\"> {result} </p>", unsafe_allow_html=True)
122
  st.write("\n\nResult length: " + str(len(result)) + " bytes")
123
  print(f"\"{result}\"")
 
 
124
 
 
74
 
75
 
76
  if __name__ == "__main__":
77
+ st.title("Hebrew GPT Neo (Small)")
78
 
79
+ tokenizer = AutoTokenizer.from_pretrained("Norod78/hebrew-gpt_neo-small")
80
+ model = AutoModelForCausalLM.from_pretrained("Norod78/hebrew-gpt_neo-small")
81
 
82
  stop_token = "<|endoftext|>"
83
  new_lines = "\n\n\n"
 
101
  top_p = st.sidebar.slider("Top-P", 0.0, 1.0, 0.95, help="If set to float < 1, only the most probable tokens with probabilities that add up to top_p or higher are kept for generation.")
102
 
103
  st.markdown(
104
+ """Hebrew text generation model based on EleutherAI's gpt-neo. Each was trained on a TPUv3-8 which was made avilable to me via the [TPU Research Cloud Program](https://sites.research.google/trc/). """
105
  )
106
 
107
  prompt = "ื”ืื™ืฉ ื”ืื—ืจื•ืŸ ื‘ืขื•ืœื ื™ืฉื‘ ืœื‘ื“ ื‘ื—ื“ืจื• ื›ืฉืœืคืชืข ื ืฉืžืขื” ื ืงื™ืฉื”"
 
121
  st.markdown(f"<p dir=\"rtl\" style=\"text-align:right;\"> {result} </p>", unsafe_allow_html=True)
122
  st.write("\n\nResult length: " + str(len(result)) + " bytes")
123
  print(f"\"{result}\"")
124
+
125
+ st.markdown("<footer><hr><p style=\"font-size:12px\">By <a href=\"https://linktr.ee/Norod78\">Doron Adler</a></p></footer> ", unsafe_allow_html=True)
126
 
requirements.txt CHANGED
@@ -1,4 +1,4 @@
1
- streamlit==0.80.0
2
  transformers
3
  tokenizers
4
  torch
 
1
+ streamlit
2
  transformers
3
  tokenizers
4
  torch