File size: 2,738 Bytes
031e5e2
 
 
 
6d737a4
 
 
031e5e2
 
 
 
 
6d737a4
 
 
 
 
 
 
 
031e5e2
6d737a4
144d528
6d737a4
031e5e2
bc82aca
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
312bafc
e0984c6
b8f6c0c
e0984c6
b8f6c0c
 
e0984c6
dc55918
bc82aca
 
6d737a4
 
155bcb1
6118d20
dc55918
6d737a4
 
 
acff600
6d737a4
bc82aca
312bafc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
import appStore.target as target_extraction
import appStore.netzero as netzero
import appStore.sector as sector
import appStore.adapmit as adapmit
import appStore.ghg as ghg
import appStore.doc_processing as processing
from utils.uploadAndExample import add_upload
import streamlit as st

st.set_page_config(page_title = 'Climate Policy Intelligence', 
                   initial_sidebar_state='expanded', layout="wide") 

with st.sidebar:
    # upload and example doc
    choice = st.sidebar.radio(label = 'Select the Document',
                            help = 'You can upload the document \
                            or else you can try a example document', 
                            options = ('Upload Document', 'Try Example'), 
                            horizontal = True)
    add_upload(choice) 

with st.container():
        st.markdown("<h2 style='text-align: center; color: black;'> Climate Policy Understanding App </h2>", unsafe_allow_html=True)
        st.write(' ')

with st.expander("ℹ️ - About this app", expanded=False):
    st.write(
        """
        Climate Policy Understanding App is an open-source\
        digital tool which aims to assist policy analysts and \
        other users in extracting and filtering relevant \
        information from public documents.

        What Happens in background?

        - Step 1: Once the document is provided to app, it undergoes *Pre-processing*.\
        In this step the document is broken into smaller paragraphs \
        (based on word/sentence count).
        - Step 2: The paragraphs are fed to **Target Classifier** which detects if
        the paragraph contains any *Target* related information or not.
        - Step 3: The paragraphs which are detected containing some target \
        related information are then fed to multiple classifier to enrich the 
        Information Extraction.
        
        Classifers:
          - **Netzero**: Detects if any Netzero commitment is present in paragraph or not.
          - **GHG**: Detects if any GHG related information present in paragraph or not.
          - **Sector**: Detects which sectors are spoken/discussed about in paragraph.
          - **Adaptation-Mitigation**: Detects if the paragraph is related to Adaptation and/or Mitigation.

               
        """)
    st.write("")
apps = [processing.app, target_extraction.app, netzero.app, ghg.app,
        sector.app, adapmit.app]
multiplier_val =1/len(apps)
if st.button("Analyze Document"):
    prg = st.progress(0.0)
    for i,func in enumerate(apps):
        func()
        prg.progress((i+1)*multiplier_val)

    if 'key1' in st.session_state:
        target_extraction.target_display()
        # st.write(st.session_state.key1)