|
import gradio as gr |
|
from io import BytesIO |
|
import torch |
|
import os |
|
import pdfplumber |
|
import re |
|
from transformers import pipeline, AutoModelForQuestionAnswering, AutoTokenizer |
|
from transformers import BertTokenizer, EncoderDecoderModel |
|
|
|
def process_pdf(path): |
|
results_dict = {} |
|
results_dict["1. Kurzbeschreibung"] = \ |
|
read_section(path, "1. Kurzbeschreibung", "2. Einordnung des Moduls") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
results_dict["4.1 Bewertungen von Zielen, Zielgruppen, Wirkungshypothesen und Indikatoren"] = \ |
|
read_section(path, "4.1 Bewertungen von Zielen, Zielgruppen, Wirkungshypothesen und Indikatoren", |
|
"4.2 ") |
|
results_dict["4.2 Umgesetzte Maßnahmen / Aktivitäten während des Berichtszeitraums"] = \ |
|
read_section(path, "4.2 ", "4.3 ") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
results_dict["4.6 Bewertung der Wirkungen und Risiken"] = \ |
|
read_section(path, "4.6 ", "5. Übergeordnete Empfehlungen") |
|
results_dict["5. Übergeordnete Empfehlungen"] = \ |
|
read_section(path, "5. Übergeordnete Empfehlungen", |
|
"5.2 Lernerfahrungen, die für die Länderstrategie und zukünftige") |
|
|
|
|
|
|
|
|
|
return results_dict |
|
|
|
def read_section(path, wanted_section, next_section): |
|
|
|
doc = pdfplumber.open(BytesIO(path)) |
|
start_page = [] |
|
end_page = [] |
|
|
|
for page in range(len(doc.pages)): |
|
if len(doc.pages[page].search(wanted_section, return_chars = False, case = False)) > 0: |
|
start_page.append(page) |
|
if len(doc.pages[page].search(next_section, return_chars = False, case = False)) > 0: |
|
end_page.append(page) |
|
print(wanted_section) |
|
print(max(start_page)) |
|
print(max(end_page)+1) |
|
|
|
text = [] |
|
for page_num in range(max(start_page), max(end_page)+1): |
|
page = doc.pages[page_num] |
|
text.append(page.extract_text()) |
|
text = " ".join(text) |
|
text = text.replace("\n", " ") |
|
|
|
return wanted_section + str(extract_between(text, wanted_section, next_section)) |
|
|
|
def extract_between(text, start_string, end_string): |
|
pattern = re.escape(start_string) + '(.*?)' + re.escape(end_string) |
|
match = re.search(pattern, text, re.DOTALL) |
|
if match: |
|
return match.group(1) |
|
else: |
|
return None |
|
|
|
def format_section1(section1_text): |
|
result_section1_dict = {} |
|
result_section1_dict['TOPIC'] = extract_between(section1_text, "Sektor", "EZ-Programm") |
|
result_section1_dict['PROGRAM'] = extract_between(section1_text, "Sektor", "EZ-Programm") |
|
result_section1_dict['PROJECT DESCRIPTION'] = extract_between(section1_text, "EZ-Programmziel", "Datum der letzten BE") |
|
result_section1_dict['PROJECT NAME'] = extract_between(section1_text, "Modul", "Modulziel") |
|
result_section1_dict['OBJECTIVE'] = extract_between(section1_text, "Modulziel", "Berichtszeitraum") |
|
result_section1_dict['PROGRESS'] = extract_between(section1_text, "Zielerreichung des Moduls", "Massnahme im Zeitplan") |
|
result_section1_dict['STATUS'] = extract_between(section1_text, "Massnahme im Zeitplan", "Risikoeinschätzung") |
|
result_section1_dict['RECOMMENDATIONS'] = extract_between(section1_text, "Vorschläge zur Modulanpas-", "Voraussichtliche") |
|
return result_section1_dict |
|
|
|
def initialize_question_answering(): |
|
model_name = "deepset/gelectra-large-germanquad" |
|
model = AutoModelForQuestionAnswering.from_pretrained(model_name) |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
qa_pipeline = pipeline("question-answering", model=model, tokenizer=tokenizer) |
|
return qa_pipeline |
|
|
|
def answer_questions_section_1(text, language="de"): |
|
qa_pipeline = initialize_question_answering() |
|
questions = [ |
|
"Welches ist das Titel des Moduls?", |
|
"Welches ist das Sektor oder das Kernthema?", |
|
"Welches ist das Land?", |
|
"Zu welchem Program oder Programm gehort das Projekt?", |
|
|
|
"Wurde das Ziel des Moduls erreicht?", |
|
"Welche ist die Risikoeinschätzung des Moduls?", |
|
"Ist die Maßnahme im Zeitplan?" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
] |
|
answers_dict = {} |
|
for question in questions: |
|
result = qa_pipeline(question=question, context=text) |
|
print(f"Question: {question}") |
|
print(f"Answer: {result['answer']}\n") |
|
answers_dict[question] = result['answer'] |
|
return answers_dict |
|
|
|
def summarize_german_text(text): |
|
model_name = "mrm8488/bert2bert_shared-german-finetuned-summarization" |
|
tokenizer = BertTokenizer.from_pretrained(model_name) |
|
model = EncoderDecoderModel.from_pretrained(model_name) |
|
inputs = tokenizer(text, padding="max_length", truncation=True, max_length=512, return_tensors="pt") |
|
summary_ids = model.generate(inputs['input_ids'], num_beams=4, max_length=500, early_stopping=True) |
|
summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True) |
|
return summary |
|
|
|
def extract_details(path): |
|
sections_dict = process_pdf(path) |
|
results = {} |
|
results["Section 4.1 summary"] = summarize_german_text(sections_dict["4.1 Bewertungen von Zielen, Zielgruppen, Wirkungshypothesen und Indikatoren"]) |
|
results["Section 4.2 summary"] = summarize_german_text(sections_dict["4.2 Umgesetzte Maßnahmen / Aktivitäten während des Berichtszeitraums"]) |
|
results["Section 4.6 summary"] = summarize_german_text(sections_dict["4.6 Bewertung der Wirkungen und Risiken"]) |
|
results["Section 5.1 summary"] = summarize_german_text(sections_dict["5. Übergeordnete Empfehlungen"]) |
|
return results |
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
demo = gr.Interface(fn=extract_details, |
|
inputs=gr.File(type="binary", label="Upload PDF"), |
|
outputs=gr.Textbox(label="Extracted Text"), |
|
title="PDF Text Extractor", |
|
description="Upload a PDF file to extract.") |
|
demo.launch() |
|
|