sashtech commited on
Commit
0f2a23a
1 Parent(s): 6d60f72

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +129 -21
app.py CHANGED
@@ -1,26 +1,134 @@
 
1
  import gradio as gr
 
 
 
 
 
2
  from gramformer import Gramformer
3
 
4
- # Initialize the Gramformer model (using default settings for now)
5
- gf = Gramformer(models=1, use_gpu=False)
6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  def correct_grammar(text):
8
- # Correct the input text using Gramformer
9
- corrected_sentences = gf.correct(text)
10
- return " ".join(corrected_sentences)
11
-
12
- # Gradio Interface
13
- def main():
14
- interface = gr.Interface(
15
- fn=correct_grammar,
16
- inputs=gr.Textbox(lines=2, placeholder="Enter text here..."),
17
- outputs="text",
18
- title="Grammar Correction App",
19
- description="This app corrects grammar using the Gramformer model. Enter a sentence to correct its grammar.",
20
- )
21
-
22
- # Launch the Gradio interface
23
- interface.launch()
24
-
25
- if __name__ == "__main__":
26
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
  import gradio as gr
3
+ from transformers import pipeline
4
+ import spacy
5
+ import subprocess
6
+ import nltk
7
+ from nltk.corpus import wordnet
8
  from gramformer import Gramformer
9
 
10
+ # Initialize the English text classification pipeline for AI detection
11
+ pipeline_en = pipeline(task="text-classification", model="Hello-SimpleAI/chatgpt-detector-roberta")
12
 
13
+ # Function to predict the label and score for English text (AI Detection)
14
+ def predict_en(text):
15
+ res = pipeline_en(text)[0]
16
+ return res['label'], res['score']
17
+
18
+ # Ensure necessary NLTK data is downloaded for Humanifier
19
+ nltk.download('wordnet')
20
+ nltk.download('omw-1.4')
21
+
22
+ # Ensure the SpaCy model is installed for Humanifier
23
+ try:
24
+ nlp = spacy.load("en_core_web_sm")
25
+ except OSError:
26
+ subprocess.run(["python", "-m", "spacy", "download", "en_core_web_sm"])
27
+ nlp = spacy.load("en_core_web_sm")
28
+
29
+ # Initialize Gramformer for grammar correction
30
+ gf = Gramformer(models=1, use_gpu=False) # You can set use_gpu=True if running on a machine with a GPU
31
+
32
+ # Function to correct grammar using Gramformer
33
  def correct_grammar(text):
34
+ corrections = gf.correct(text)
35
+ return ' '.join(corrections)
36
+
37
+ # Function to get synonyms using NLTK WordNet (Humanifier)
38
+ def get_synonyms_nltk(word, pos):
39
+ synsets = wordnet.synsets(word, pos=pos)
40
+ if synsets:
41
+ lemmas = synsets[0].lemmas()
42
+ return [lemma.name() for lemma in lemmas]
43
+ return []
44
+
45
+ # Function to capitalize the first letter of sentences and proper nouns (Humanifier)
46
+ def capitalize_sentences_and_nouns(text):
47
+ doc = nlp(text)
48
+ corrected_text = []
49
+
50
+ for sent in doc.sents:
51
+ sentence = []
52
+ for token in sent:
53
+ if token.i == sent.start: # First word of the sentence
54
+ sentence.append(token.text.capitalize())
55
+ elif token.pos_ == "PROPN": # Proper noun
56
+ sentence.append(token.text.capitalize())
57
+ else:
58
+ sentence.append(token.text)
59
+ corrected_text.append(' '.join(sentence))
60
+
61
+ return ' '.join(corrected_text)
62
+
63
+ # Paraphrasing function using SpaCy and NLTK (Humanifier)
64
+ def paraphrase_with_spacy_nltk(text):
65
+ doc = nlp(text)
66
+ paraphrased_words = []
67
+
68
+ for token in doc:
69
+ # Map SpaCy POS tags to WordNet POS tags
70
+ pos = None
71
+ if token.pos_ in {"NOUN"}:
72
+ pos = wordnet.NOUN
73
+ elif token.pos_ in {"VERB"}:
74
+ pos = wordnet.VERB
75
+ elif token.pos_ in {"ADJ"}:
76
+ pos = wordnet.ADJ
77
+ elif token.pos_ in {"ADV"}:
78
+ pos = wordnet.ADV
79
+
80
+ synonyms = get_synonyms_nltk(token.text.lower(), pos) if pos else []
81
+
82
+ # Replace with a synonym only if it makes sense
83
+ if synonyms and token.pos_ in {"NOUN", "VERB", "ADJ", "ADV"} and synonyms[0] != token.text.lower():
84
+ paraphrased_words.append(synonyms[0])
85
+ else:
86
+ paraphrased_words.append(token.text)
87
+
88
+ # Join the words back into a sentence
89
+ paraphrased_sentence = ' '.join(paraphrased_words)
90
+
91
+ # Capitalize sentences and proper nouns
92
+ corrected_text = capitalize_sentences_and_nouns(paraphrased_sentence)
93
+
94
+ return corrected_text
95
+
96
+ # Combined function: Paraphrase -> Capitalization (Humanifier)
97
+ def paraphrase_and_correct(text):
98
+ # Step 1: Paraphrase the text
99
+ paraphrased_text = paraphrase_with_spacy_nltk(text)
100
+
101
+ # Step 2: Capitalize sentences and proper nouns
102
+ final_text = capitalize_sentences_and_nouns(paraphrased_text)
103
+
104
+ return final_text
105
+
106
+ # Gradio app setup with three tabs
107
+ with gr.Blocks() as demo:
108
+ with gr.Tab("AI Detection"):
109
+ t1 = gr.Textbox(lines=5, label='Text')
110
+ button1 = gr.Button("🤖 Predict!")
111
+ label1 = gr.Textbox(lines=1, label='Predicted Label 🎃')
112
+ score1 = gr.Textbox(lines=1, label='Prob')
113
+
114
+ # Connect the prediction function to the button
115
+ button1.click(predict_en, inputs=[t1], outputs=[label1, score1], api_name='predict_en')
116
+
117
+ with gr.Tab("Humanifier"):
118
+ text_input = gr.Textbox(lines=5, label="Input Text")
119
+ paraphrase_button = gr.Button("Paraphrase & Correct")
120
+ output_text = gr.Textbox(label="Paraphrased Text")
121
+
122
+ # Connect the paraphrasing function to the button
123
+ paraphrase_button.click(paraphrase_and_correct, inputs=text_input, outputs=output_text)
124
+
125
+ with gr.Tab("Grammar Correction"):
126
+ grammar_input = gr.Textbox(lines=5, label="Input Text")
127
+ grammar_button = gr.Button("Correct Grammar")
128
+ grammar_output = gr.Textbox(label="Corrected Text")
129
+
130
+ # Connect the grammar correction function to the button
131
+ grammar_button.click(correct_grammar, inputs=grammar_input, outputs=grammar_output)
132
+
133
+ # Launch the app with all functionalities
134
+ demo.launch()