Spaces:
GIZ
/
Running on CPU Upgrade

File size: 4,229 Bytes
4a6159c
7de7bf4
4a6159c
 
 
685552c
570b6e4
685552c
 
 
 
4a6159c
685552c
 
 
 
 
4a6159c
048a702
fb4cce0
4a6159c
 
 
 
685552c
 
4a6159c
 
 
 
 
 
 
 
 
 
596accd
685552c
fb4cce0
7de7bf4
4a6159c
 
 
 
 
 
 
7de7bf4
 
 
4a6159c
 
 
 
 
 
 
 
f9949bb
4a6159c
 
685552c
 
7de7bf4
1a4b146
048a702
1a4b146
685552c
1a4b146
3f0df44
570b6e4
4a6159c
 
 
 
 
 
 
 
 
685552c
4a6159c
 
 
048a702
f9949bb
048a702
 
685552c
 
 
 
 
048a702
2caced7
 
 
 
 
 
 
4a6159c
685552c
4a6159c
 
 
1d3978a
685552c
1d3978a
4a6159c
 
 
 
685552c
4a6159c
1d3978a
 
4a6159c
685552c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
from haystack.nodes import TransformersDocumentClassifier
from haystack.schema import Document
from typing import List, Tuple
import configparser
import logging
from pandas import DataFrame, Series
from utils.preprocessing import processingpipeline
try:
    import streamlit as st
except ImportError:
    logging.info("Streamlit not installed")
config = configparser.ConfigParser()
try:
    config.read_file(open('paramconfig.cfg'))
except Exception:
    logging.info("paramconfig file not found")
    st.info("Please place the paramconfig file in the same directory as app.py")


@st.cache(allow_output_mutation=True)
def load_sdgClassifier():
    """
    loads the document classifier using haystack, where the name/path of model
    in HF-hub as string is used to fetch the model object.
    1. https://docs.haystack.deepset.ai/reference/document-classifier-api
    2. https://docs.haystack.deepset.ai/docs/document_classifier

    Return: document classifier model
    """
    logging.info("Loading classifier")
    doc_classifier_model = config.get('sdg','MODEL')
    doc_classifier = TransformersDocumentClassifier(
        model_name_or_path=doc_classifier_model,
        task="text-classification")
    return doc_classifier



@st.cache(allow_output_mutation=True)
def sdg_classification(haystackdoc:List[Document])->Tuple[DataFrame,Series]:
    """
    Text-Classification on the list of texts provided. Classifier provides the 
    most appropriate label for each text. these labels are in terms of if text 
    belongs to which particular Sustainable Devleopment Goal (SDG).

    Params
    ---------
    haystackdoc: List of haystack Documents. The output of Preprocessing Pipeline 
    contains the list of paragraphs in different format,here the list of 
    Haystack Documents is used.

    Returns
    ----------
    df: Dataframe with two columns['SDG:int', 'text']
    x: Series object with the unique SDG covered in the document uploaded and 
    the number of times it is covered/discussed/count_of_paragraphs. 

    """
    logging.info("Working on SDG Classification")
    threshold = float(config.get('sdg','THRESHOLD'))

    
    classifier = load_sdgClassifier()
    results = classifier.predict(haystackdoc)


    labels_= [(l.meta['classification']['label'],
            l.meta['classification']['score'],l.content,) for l in results]

    df = DataFrame(labels_, columns=["SDG","Relevancy","text"])
    
    df = df.sort_values(by="Relevancy", ascending=False).reset_index(drop=True)  
    df.index += 1
    df =df[df['Relevancy']>threshold]
    x = df['SDG'].value_counts()
    df= df.drop(['Relevancy'], axis = 1)
    

    return df, x

def runSDGPreprocessingPipeline(file_path, file_name)->List[Document]:
    """
    creates the pipeline and runs the preprocessing pipeline, 
    the params for pipeline are fetched from paramconfig

    Params
    ------------

    file_name: filename, in case of streamlit application use 
    st.session_state['filename']
    file_path: filepath, in case of streamlit application use 
    st.session_state['filepath']


    Return
    --------------
    List[Document]: When preprocessing pipeline is run, the output dictionary 
    has four objects. For the Haysatck implementation of SDG classification we, 
    need to use the List of Haystack Document, which can be fetched by 
    key = 'documents' on output.

    """

    sdg_processing_pipeline = processingpipeline()
    split_by = config.get('sdg','SPLIT_BY')
    split_length = int(config.get('sdg','SPLIT_LENGTH'))
    split_overlap = int(config.get('sdg','SPLIT_OVERLAP'))
    remove_punc = bool(int(config.get('sdg','REMOVE_PUNC')))


    output_sdg_pre = sdg_processing_pipeline.run(file_paths = file_path, 
                            params= {"FileConverter": {"file_path": file_path, \
                                        "file_name": file_name}, 
                                     "UdfPreProcessor": {"removePunc": remove_punc, \
                                            "split_by": split_by, \
                                            "split_length":split_length,\
                                            "split_overlap": split_overlap}})
    
    return output_sdg_pre