import re import argparse import torch import gradio as gr import pandas as pd import plotly.express as px import numpy as np from data import load_tokenizer from model import load_model from datetime import datetime from dateutil import parser from demo_assets import * from typing import List, Dict, Any def get_args(): parser = argparse.ArgumentParser() parser.add_argument('--data_dir', default='/data/mohamed/data') parser.add_argument('--aim_repo', default='/data/mohamed/') parser.add_argument('--ckpt', default='electra-base.pt') parser.add_argument('--aim_exp', default='mimic-decisions-1215') parser.add_argument('--label_encoding', default='multiclass') parser.add_argument('--multiclass', action='store_true') parser.add_argument('--debug', action='store_true') parser.add_argument('--save_losses', action='store_true') parser.add_argument('--task', default='token', choices=['seq', 'token']) parser.add_argument('--max_len', type=int, default=512) parser.add_argument('--num_layers', type=int, default=3) parser.add_argument('--kernels', nargs=3, type=int, default=[1,2,3]) parser.add_argument('--model', default='roberta-base',) parser.add_argument('--model_name', default='google/electra-base-discriminator',) parser.add_argument('--gpu', default='0') parser.add_argument('--grad_accumulation', default=2, type=int) parser.add_argument('--pheno_id', type=int) parser.add_argument('--unseen_pheno', type=int) parser.add_argument('--text_subset') parser.add_argument('--pheno_n', type=int, default=500) parser.add_argument('--hidden_size', type=int, default=100) parser.add_argument('--emb_size', type=int, default=400) parser.add_argument('--total_steps', type=int, default=5000) parser.add_argument('--train_log', type=int, default=500) parser.add_argument('--val_log', type=int, default=1000) parser.add_argument('--seed', default = '0') parser.add_argument('--num_phenos', type=int, default=10) parser.add_argument('--num_decs', type=int, default=9) parser.add_argument('--num_umls_tags', type=int, default=33) parser.add_argument('--batch_size', type=int, default=8) parser.add_argument('--pos_weight', type=float, default=1.25) parser.add_argument('--alpha_distil', type=float, default=1) parser.add_argument('--distil', action='store_true') parser.add_argument('--distil_att', action='store_true') parser.add_argument('--distil_ckpt') parser.add_argument('--use_umls', action='store_true') parser.add_argument('--include_nolabel', action='store_true') parser.add_argument('--truncate_train', action='store_true') parser.add_argument('--truncate_eval', action='store_true') parser.add_argument('--load_ckpt', action='store_true') parser.add_argument('--gradio', action='store_true') parser.add_argument('--optuna', action='store_true') parser.add_argument('--mimic_data', action='store_true') parser.add_argument('--eval_only', action='store_true') parser.add_argument('--lr', type=float, default=4e-5) parser.add_argument('--resample', default='') parser.add_argument('--verbose', type=bool, default=True) parser.add_argument('--use_crf', type=bool) parser.add_argument('--print_spans', action='store_true') return parser.parse_args() args = get_args() if args.task == 'seq' and args.pheno_id is not None: args.num_labels = 1 elif args.task == 'seq': args.num_labels = args.num_phenos elif args.task == 'token': if args.use_umls: args.num_labels = args.num_umls_tags else: args.num_labels = args.num_decs if args.label_encoding == 'multiclass': args.num_labels = args.num_labels * 2 + 1 elif args.label_encoding == 'bo': args.num_labels *= 2 elif args.label_encoding == 'boe': args.num_labels *= 3 categories = ['Contact related', 'Gathering additional information', 'Defining problem', 'Treatment goal', 'Drug related', 'Therapeutic procedure related', 'Evaluating test result', 'Deferment', 'Advice and precaution', 'Legal and insurance related'] unicode_symbols = [ "\U0001F91D", # Handshake "\U0001F50D", # Magnifying glass "\U0001F9E9", # Puzzle piece "\U0001F3AF", # Target "\U0001F48A", # Pill "\U00002702", # Surgical scissors "\U0001F9EA", # Test tube "\U000023F0", # Alarm clock "\U000026A0", # Warning sign "\U0001F4C4" # Document ] OTHERS_ID = 18 def postprocess_labels(text, logits, t2c): tags = [None for _ in text] labels = logits.argmax(-1) for i,cat in enumerate(labels): if cat != OTHERS_ID: char_ids = t2c(i) if char_ids is None: continue for idx in range(char_ids.start, char_ids.end): if tags[idx] is None and idx < len(text): tags[idx] = categories[cat // 2] for i in range(len(text)-1): if text[i] == ' ' and (text[i+1] == ' ' or tags[i-1] == tags[i+1]): tags[i] = tags[i-1] return tags def indicators_to_spans(labels, t2c = None): def add_span(c, start, end): if t2c(start) is None or t2c(end) is None: start, end = -1, -1 else: start = t2c(start).start end = t2c(end).end span = (c, start, end) spans.add(span) spans = set() num_tokens = len(labels) num_classes = OTHERS_ID // 2 start = None cls = None for t in range(num_tokens): if start and labels[t] == cls + 1: continue elif start: add_span(cls // 2, start, t - 1) start = None # if not start and labels[t] in [2*x for x in range(num_classes)]: if not start and labels[t] != OTHERS_ID: start = t cls = int(labels[t]) // 2 * 2 return spans def extract_date(text): pattern = r'(?<=Date: )\s*(\[\*\*.*?\*\*\]|\d{1,4}[-/]\d{1,2}[-/]\d{1,4})' match = re.search(pattern, text).group(1) start, end = None, None for i, c in enumerate(match): if start is None and c.isnumeric(): start = i elif c.isnumeric(): end = i + 1 match = match[start:end] return match device = torch.device("cuda" if torch.cuda.is_available() else "cpu") tokenizer = load_tokenizer(args.model_name) model = load_model(args, device)[0] model.eval() torch.set_grad_enabled(False) def predict(text): encoding = tokenizer.encode_plus(text) x = torch.tensor(encoding['input_ids']).unsqueeze(0).to(device) mask = torch.ones_like(x) output = model.generate(x, mask)[0] return output, encoding.token_to_chars def process(text): if text is not None: output, t2c = predict(text) tags = postprocess_labels(text, output, t2c) with open('log.csv', 'a') as f: f.write(f'{datetime.now()},{text}\n') return list(zip(text, tags)) else: return text def process_sum(*inputs): global sum_c dates = {} for i in range(sum_c): text = inputs[i] output, t2c = predict(text) spans = indicators_to_spans(output.argmax(-1), t2c) date = extract_date(text) present_decs = set(cat for cat, _, _ in spans) decs = {k: [] for k in sorted(present_decs)} for c, s, e in spans: decs[c].append(text[s:e]) dates[date] = decs out = "" for date in sorted(dates.keys(), key = lambda x: parser.parse(x)): out += f'## **[{date}]**\n\n' decs = dates[date] for c in decs: out += f'### {unicode_symbols[c]} ***{categories[c]}***\n\n' for dec in decs[c]: out += f'{dec}\n\n' return out def get_structured_data(*inputs): global sum_c data = [] for i in range(sum_c): text = inputs[i] output, t2c = predict(text) spans = indicators_to_spans(output.argmax(-1), t2c) date = extract_date(text) for c, s, e in spans: data.append({ 'date': date, 'timestamp': parser.parse(date), 'decision_type': categories[c], 'details': text[s:e]}) return data def update_inputs(inputs): outputs = [] if inputs is None: c = 0 else: inputs = [open(f.name).read() for f in inputs] for i, text in enumerate(inputs): outputs.append(gr.update(value=text, visible=True)) c = len(inputs) n = SUM_INPUTS for i in range(n - c): outputs.append(gr.update(value='', visible=False)) global sum_c; sum_c = c global structured_data structured_data = get_structured_data(*inputs) if inputs is not None else [] return outputs def add_ex(*inputs): global sum_c new_idx = sum_c if new_idx < SUM_INPUTS: out = inputs[:new_idx] + (gr.update(visible=True),) + inputs[new_idx+1:] sum_c += 1 else: out = inputs return out def sub_ex(*inputs): global sum_c new_idx = sum_c - 1 if new_idx > 0: out = inputs[:new_idx] + (gr.update(visible=False),) + inputs[new_idx+1:] sum_c -= 1 else: out = inputs return out def create_timeline_plot(data: List[Dict[str, Any]]): df = pd.DataFrame(data) # df['int_cat'] = pd.factorize(df['decision_type'])[0] # df['int_cat_jittered'] = df['int_cat'] + np.random.uniform(-0.1, 0.1, size=len(df)) # fig = px.scatter(df, x='date', y='int_cat_jittered', color='decision_type', hover_data=['details'], # title='Patient Timeline') # fig.update_layout( # yaxis=dict( # tickmode='array', # tickvals=df['int_cat'].unique(), # ticktext=df['decision_type'].unique()), # xaxis_title='Date', # yaxis_title='Category') fig = px.strip(df, x='date', y='decision_type', color='decision_type', hover_data=['details'], stripmode = "overlay", title='Patient Timeline') fig.update_traces(jitter=1.0, marker=dict(size=10, opacity=0.6)) fig.update_layout(height=600) return fig def filter_timeline(decision_type: str, start_date: str, end_date: str) -> px.scatter: global structured_data filtered_data = structured_data if 'All' not in decision_types: filtered_data = [event for event in filtered_data if event['decision_type'] in decision_types] start = parser.parse(start_date) end = parser.parse(end_date) filtered_data = [event for event in filtered_data if start <= event['timestamp'] <= end] return create_timeline_plot(filtered_data) def generate_summary(*inputs) -> str: global structured_data structured_data = get_structured_data(*inputs) decision_types = {} for event in structured_data: decision_type = event['decision_type'] decision_types[decision_type] = decision_types.get(decision_type, 0) + 1 summary = "Decision Type Summary:\n" for decision_type, count in decision_types.items(): summary += f"{decision_type}: {count}\n" return summary, create_timeline_plot(structured_data) global sum_c sum_c = 1 SUM_INPUTS = 20 structured_data = [] device = model.backbone.device # colors = ['aqua', 'blue', 'fuchsia', 'teal', 'green', 'olive', 'lime', 'silver', 'purple', 'red', # 'yellow', 'navy', 'gray', 'white', 'maroon', 'black'] colors = ['#8dd3c7', '#ffffb3', '#bebada', '#fb8072', '#80b1d3', '#fdb462', '#b3de69', '#fccde5', '#d9d9d9', '#bc80bd'] color_map = {cat: colors[i] for i,cat in enumerate(categories)} det_desc = ['Admit, discharge, follow-up, referral', 'Ordering test, consulting colleague, seeking external information', 'Diagnostic conclusion, evaluation of health state, etiological inference, prognostic judgment', 'Quantitative or qualitative', 'Start, stop, alter, maintain, refrain', 'Start, stop, alter, maintain, refrain', 'Positive, negative, ambiguous test results', 'Transfer responsibility, wait and see, change subject', 'Advice or precaution', 'Sick leave, drug refund, insurance, disability'] desc = '### Zones (categories)\n' desc += '| | |\n| --- | --- |\n' for i,cat in enumerate(categories): desc += f'| {unicode_symbols[i]} **{cat}** | {det_desc[i]}|\n' #colors #markdown labels #legend and desc #css font-size css = '.category-legend {border:1px dashed black;}'\ '.text-sm {font-size: 1.5rem; line-height: 200%;}'\ '.gr-sample-textbox {width: 1000px; white-space: nowrap; overflow: hidden; text-overflow: ellipsis;}'\ '.text-limit label textarea {height: 150px !important; overflow: scroll; }'\ '.text-gray-500 {color: #111827; font-weight: 600; font-size: 1.25em; margin-top: 1.6em; margin-bottom: 0.6em;'\ 'line-height: 1.6;}'\ '#sum-out {border: 2px solid #007bff; padding: 20px; border-radius: 10px; box-shadow: 0 0 10px rgba(0, 0, 0, 0.1);' title='Clinical Decision Zoning' with gr.Blocks(title=title, css=css) as demo: gr.Markdown(f'# {title}') with gr.Tab("Label a Clinical Note"): with gr.Row(): with gr.Column(): gr.Markdown("## Enter a Discharge Summary or Clinical Note"), text_input = gr.Textbox( # value=examples[0], label="", placeholder="Enter text here...") text_btn = gr.Button('Run') with gr.Column(): gr.Markdown("## Labeled Summary or Note"), text_out = gr.Highlight(label="", combine_adjacent=True, show_legend=False, color_map=color_map) gr.Examples(text_examples, inputs=text_input) with gr.Tab("Summarize Patient History"): with gr.Row(): with gr.Column(): sum_inputs = [gr.Text(label='Clinical Note 1', elem_classes='text-limit')] sum_inputs.extend([gr.Text(label='Clinical Note %d'%i, visible=False, elem_classes='text-limit') for i in range(2, SUM_INPUTS + 1)]) sum_btn = gr.Button('Run') with gr.Row(): ex_add = gr.Button("+") ex_sub = gr.Button("-") upload = gr.File(label='Upload clinical notes', file_types=['text'], file_count='multiple') gr.Examples(sum_examples, inputs=upload, fn = update_inputs, outputs=sum_inputs, run_on_click=True) with gr.Column(): gr.Markdown("## Summarized Clinical Decision History") sum_out = gr.Markdown(elem_id='sum-out') with gr.Tab("Timeline Visualization Tool"): with gr.Column(): sum_inputs2 = [gr.Text(label='Clinical Note 1', elem_classes='text-limit')] sum_inputs2.extend([gr.Text(label='Clinical Note %d'%i, visible=False, elem_classes='text-limit') for i in range(2, SUM_INPUTS + 1)]) with gr.Row(): ex_add2 = gr.Button("+") ex_sub2 = gr.Button("-") upload2 = gr.File(label='Upload clinical notes', file_types=['text'], file_count='multiple') gr.Examples(sum_examples, inputs=upload2, fn = update_inputs, outputs=sum_inputs2, run_on_click=True) with gr.Column(): with gr.Row(): decision_type = gr.Dropdown(["All"] + categories, multiselect=True, label="Decision Type", value="All") start_date = gr.Textbox(label="Start Date (MM/DD/YYYY)", value="01/01/2006") end_date = gr.Textbox(label="End Date (MM/DD/YYYY)", value="12/31/2024") filter_button = gr.Button("Filter Timeline") timeline_plot = gr.Plot() summary_button = gr.Button("Generate Summary") summary_output = gr.Textbox(label="Summary") gr.Markdown(desc) # Functions text_input.submit(process, inputs=text_input, outputs=text_out) text_btn.click(process, inputs=text_input, outputs=text_out) upload.change(update_inputs, inputs=upload, outputs=sum_inputs) upload2.change(update_inputs, inputs=upload2, outputs=sum_inputs2) ex_add.click(add_ex, inputs=sum_inputs, outputs=sum_inputs) ex_sub.click(sub_ex, inputs=sum_inputs, outputs=sum_inputs) ex_add2.click(add_ex, inputs=sum_inputs2, outputs=sum_inputs2) ex_sub2.click(sub_ex, inputs=sum_inputs2, outputs=sum_inputs2) sum_btn.click(process_sum, inputs=sum_inputs, outputs=sum_out) filter_button.click(filter_timeline, inputs=[decision_type, start_date, end_date], outputs=timeline_plot) summary_button.click(generate_summary, inputs=sum_inputs2, outputs=[summary_output, timeline_plot]) demo.launch(share=True)