Update app.py
Browse files
app.py
CHANGED
@@ -95,7 +95,7 @@ response = requests.get(url)
|
|
95 |
with open(local_filename, 'wb') as f:
|
96 |
f.write(response.content)
|
97 |
'''
|
98 |
-
model = tf.keras.
|
99 |
|
100 |
|
101 |
|
@@ -116,7 +116,7 @@ def generate_arabic_text(seed_text, next_words=50):
|
|
116 |
for _ in range(next_words):
|
117 |
token_list = tokenizer.encode(generated_text, add_special_tokens=False)
|
118 |
token_list = pad_sequences([token_list], maxlen=max_sequence_len-1, padding='pre')
|
119 |
-
predicted = np.argmax(model(token_list), axis=-1)
|
120 |
output_word = tokenizer.decode(predicted[0])
|
121 |
generated_text += " " + output_word
|
122 |
reconnected_text = generated_text.replace(" ##", "")
|
|
|
95 |
with open(local_filename, 'wb') as f:
|
96 |
f.write(response.content)
|
97 |
'''
|
98 |
+
model = tf.keras.models.load('my_model')
|
99 |
|
100 |
|
101 |
|
|
|
116 |
for _ in range(next_words):
|
117 |
token_list = tokenizer.encode(generated_text, add_special_tokens=False)
|
118 |
token_list = pad_sequences([token_list], maxlen=max_sequence_len-1, padding='pre')
|
119 |
+
predicted = np.argmax(model.predict(token_list), axis=-1)
|
120 |
output_word = tokenizer.decode(predicted[0])
|
121 |
generated_text += " " + output_word
|
122 |
reconnected_text = generated_text.replace(" ##", "")
|