import trafilatura
import requests
import lzma
import os
import re
import time
from datetime import datetime
import json
from pprint import pprint
import subprocess
import config
from utils import *
from text_utils import *
from llm import *
## Cách lấy cookies và headers sử dụng https://curlconverter.com
cookies = {
'ASP.NET_SessionId': '42i3ivvgk14yd2tnxmddybvq',
'Culture': 'vi',
'Cookie_VB': 'close',
'ruirophaply-covi19': '24',
'SLG_G_WPT_TO': 'vi',
'G_ENABLED_IDPS': 'google',
'SLG_GWPT_Show_Hide_tmp': '1',
'SLG_wptGlobTipTmp': '1',
'__zlcmid': '1NOmxyopHgawxjN',
'45C5EF': '96780c17-dee3-49b2-9bf7-6335c4348d4f',
'vqc': '0',
}
headers = {
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
'accept-language': 'en-US,en;q=0.9',
'cache-control': 'max-age=0',
'priority': 'u=0, i',
'sec-ch-ua': '"Opera GX";v="111", "Chromium";v="125", "Not.A/Brand";v="24"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'sec-fetch-dest': 'document',
'sec-fetch-mode': 'navigate',
'sec-fetch-site': 'none',
'sec-fetch-user': '?1',
'sec-gpc': '1',
'upgrade-insecure-requests': '1',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 OPR/111.0.0.0',
# 'User-Agent': "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Mobile Safari/537.36",
}
def get_url_content(url):
_, filename = norm_url_and_gen_filename(url)
text = open(filename + ".txt", "rt").read()
meta = get_meta(url)
summ = meta["llm_generated"]["summary"]
summ = re.sub(r'.*?', '', summ, flags = re.IGNORECASE | re.MULTILINE)
return f"""
{summ}
{text}
"""
def docchat(urls_input, prompt):
urls = get_urls(urls_input)
documents = "\n".join([ get_url_content(x) for x in urls ])
return query_documents(documents, prompt)
def get_urls(urls_input):
return urls_input.strip().split()
def got_all_urls(urls_input):
urls = get_urls(urls_input)
for x in urls:
_, filename = norm_url_and_gen_filename(x)
if not os.path.exists(filename + ".txt"):
return False
return True
def norm_url_and_gen_filename(url):
url = url.strip() # loại bỏ ký tự trống ở đầu và cuối
if url[-1] == "/": url = url[:-1] # loại bỏ "/" ở cuối
# init filename và tạo sub folder nếu chưa có
filename = f'.cache/{url}'
return url, filename
def reset_content(url):
_, filename = norm_url_and_gen_filename(url)
text_filename = filename + ".txt"
json_filename = filename + ".json"
html_filename = filename + ".html"
xz_filename = filename + ".xz"
for filename in [
text_filename,
json_filename,
html_filename,
xz_filename,
]:
cmd = f"rm -rf '{filename}'"; print(cmd)
subprocess.run(cmd, shell = True)
# from functools import lru_cache
# @lru_cache(maxsize=128)
def url_content(url, update_text=None):
url, filename = norm_url_and_gen_filename(url)
parts = filename.split("/")
for i in range(1, len(parts)):
path = "/".join(parts[:i])
# print(path) # DEBUG
os.makedirs(path, exist_ok=True)
text_filename = filename + ".txt"
json_filename = filename + ".json"
html_filename = filename + ".html"
xz_filename = filename + ".xz"
# Nếu được yêu cầu update nội dung text của url thì update
if update_text is not None:
print("\nUPDATE TEXT", filename)
text, noise_ratio, max_noise = normalize_text(update_text, get_noise_info = True)
# update meta, Lưu lại text và meta
meta = json.load(open(json_filename))
meta["updated_at"] = str(datetime.now())
meta["updates_count"] += 1
meta["noise_ratio"] = noise_ratio
meta["max_noise"] = max_noise
# Cắt ngắn text nếu đầu vào quá dài
text = cut_if_too_long(text, meta)
with open(json_filename, "wt") as f:
f.write(json.dumps(meta, ensure_ascii = False))
with open(text_filename, "wt") as f:
f.write(text)
# gọi gen_clear_view.py (chạy chậm) ở process riêng
get_clear_view(filename)
# Re-gen llm contents
get_llm_gen_contents(url, use_cache = False)
print(CYAN,end=""); print(meta); print(RESET,end="", flush=True)
# Nếu tồn tại text_filename thì trả về,
# đây có thể là nội dung text đã được chỉnh sửa nên ưu tiên trả về trước
if os.path.exists(text_filename):
print("\nGOT TEXT", filename, flush=True)
norm_text = open(text_filename, "rt").read()
return norm_text
html = None
# Thử đọc nội dung html đã được cache
if os.path.exists(xz_filename):
try:
html = lzma.open(xz_filename,"rt").read()
print("\nGOT HTML", filename, flush=True)
except:
pass
blacklist = """
Your connection is not private
-----BEGIN CERTIFICATE-----
""".strip().split("\n")
## Thử các cách lấy HTML: requests vs crawl4ai vs reader
get_html_method = "requests"
if html is None:
# Thử lần 1 bằng requests
print("\nGET HTML", filename, flush=True)
try:
html = requests.get(url, cookies=cookies, headers=headers)
html = str(html.text)
# Lưu lại nội dung html vào xz_filename
with lzma.open(xz_filename, "wt") as f: f.write(html)
except Exception as e:
print(f"!!! REQUESTS Error {e} !!!")
if isinstance(html, str):
for x in blacklist:
if x in html:
print(f"--{x}--")
i = html.find(x)
print(f"{RED}!!! REQUESTS đọc lỗi {html[i-30:i+200]} !!!{RESET}")
html = None
break
meta = None
# Lấy meta / html bằng phương pháp khác ...
# Tạm bỏ crawl4ai vì không cài được lên HF
if html is None or len(html) < 500:
# Thử lần 3 bằng reader api
print("GET HTML READER", filename, flush=True)
get_html_method = "reader"
try:
reader_url = "https://r.jina.ai/" + url
# Use below header make https://jina.ai/reader return text not markdown by default
html = requests.get(reader_url, headers = { 'X-Return-Format': 'html', }).text
# Lưu lại nội dung html vào xz_filename
with lzma.open(xz_filename, "wt") as f: f.write(html)
except Exception as e:
print(f"!!! READER Error {e} !!!")
if isinstance(html, str):
for x in blacklist:
if x in html:
i = html.find(x)
print(f"{RED}!!! READER đọc lỗi {html[i-30:i+200]} !!!{RESET}")
html = None
break
## Thử các cách extract text: trafilatura vs llm vs reader
extract_method = "trafilatura"
# https://trafilatura.readthedocs.io/en/latest/corefunctions.html#extract
try:
text = trafilatura.extract(html,
# favor_recall = True,
include_tables = True,
include_comments = False,
with_metadata = False,
)
except:
text = ""
if meta is None: # Có thể meta đã đc lấy ở crawl4ai
try:
meta = trafilatura.extract(html, only_with_metadata = True)
if meta and len(meta) > 0:
# print(meta); input() # DEBUG
meta = meta.split("---")[1]
splits = re.findall(r'\S+: [^\n]+', meta)
meta = { x.split(": ", 1)[0].strip() : x.split(": ", 1)[1].strip() for x in splits }
else:
meta = {}
except:
meta = {}
# Chuẩn hóa text
if text is None: text = ""
text, noise_ratio, max_noise = normalize_text(text, get_noise_info = True)
print(f">>> {RED}noise_ratio {pretty_num(noise_ratio)}, max_noise {max_noise}{RESET}")
MEANINGFUL = 500
MAX_NOISE_RATIO = 0.3
too_short = ( len(text) < MEANINGFUL )
too_noise = ( noise_ratio > MAX_NOISE_RATIO or max_noise > MEANINGFUL )
# ko lấy đc text hoặc text quá ngắn (cào trượt), hoặc text quá noise
if text is None or too_short or too_noise:
# Lấy text thông qua phương pháp khác
print("!!! Đoạn text dưới do trafilatura triết xuất có vấn đề?")
print("too short", too_short)
print("too noise", too_noise)
print("- - - "*6)
print(f"{YELLOW}{text}{RESET}")
print("- - - "*6)
print("!!! Dùng Jina Reader ...")
reader_url = "https://r.jina.ai/" + url
# Use below header make https://jina.ai/reader return text not markdown by default
reader_text = requests.get(reader_url, headers = { 'X-Return-Format': 'text', }).text
reader_text, reader_noise_ratio, reader_max_noise = normalize_text(reader_text, get_noise_info = True)
# Chuẩn hóa text
reader_text, reader_noise_ratio, reader_max_noise = normalize_text(reader_text, get_noise_info = True)
reader_too_noise = ( reader_noise_ratio > MAX_NOISE_RATIO or reader_max_noise > MEANINGFUL )
print(f">>> {RED}reader_noise_ratio {pretty_num(reader_noise_ratio)}, reader_max_noise {reader_max_noise}{RESET}")
print(f">>> {RED}reader_too_noise {reader_too_noise}{RESET}")
signal = int( len(text) * (1 - noise_ratio) ) + 1
reader_signal = int( len(reader_text) * (1 - reader_noise_ratio) ) + 1
samesame = ( abs(signal - reader_signal) / reader_signal ) < 0.2
print(f">>> {RED}samesame {samesame}, original_signal {pretty_num(signal)}, \
reader_signal {pretty_num(reader_signal)}{RESET}")
# Nếu bản gốc quá ngắn nhưng bản reader quá noise thì thà chọn ngắn còn hơn
original_too_shot_but_reader_too_noise = (
too_short and (samesame or reader_noise_ratio >= 0.5 )
)
original_too_noise_but_reader_even_more_noise = (
too_noise and noise_ratio < reader_noise_ratio and max_noise < reader_max_noise
)
if original_too_shot_but_reader_too_noise:
print("!!! reader quá noise, chọn bản trafilatura too_short còn hơn.")
if original_too_noise_but_reader_even_more_noise:
print("!!! reader còn noise hơn bản trafilatura, bỏ qua.")
if not original_too_shot_but_reader_too_noise and \
not original_too_noise_but_reader_even_more_noise:
extract_method = "reader"
text = reader_text
noise_ratio = reader_noise_ratio
max_noise = reader_max_noise
# update meta, Lưu lại text và meta
meta["url"] = url
meta["get_html_method"] = get_html_method
meta["extract_method"] = extract_method
meta["created_at"] = str(datetime.now())
meta["updates_count"] = 0
meta["noise_ratio"] = noise_ratio
meta["max_noise"] = max_noise
meta["text_origin_len"] = len(text)
if "hostname" in meta: meta.pop("hostname")
if "sitename" in meta: meta.pop("sitename")
# Thêm title và description vào text (nếu có)
norm_text = normalize_text(text)
text = add_title_desc_to_text(norm_text, meta)
# Cắt ngắn text nếu đầu vào quá dài
text = cut_if_too_long(text, meta)
print(CYAN,end=""); print(meta); print(RESET,end="")
with open(json_filename, "wt") as f:
f.write(json.dumps(meta, ensure_ascii = False))
with open(text_filename, "wt") as f:
f.write(text)
get_clear_view(filename)
get_llm_gen_contents(url, use_cache = False)
return text
def get_clear_view(filename):
# gọi gen_clear_view.py (chạy chậm) ở process riêng
subprocess.run(f"nohup python3 gen_clear_view.py '{filename}' &", shell = True)
import time; time.sleep(1) # chờ 1 giây
def cut_if_too_long(text, meta, max_words = config.text_max_words):
words = text.split()
if len(words) > max_words:
words = words[ : max_words]
threshold = len(" ".join(words))
meta["text_cutoff"] = True
meta["text_cutoff_len"] = threshold
return text[ : threshold ]
else:
return text
def add_title_desc_to_text(text, meta):
content = []
title = meta["title"] if "title" in meta else None
description = meta["description"] if "description" in meta else None
if title is not None and len(title) > 5:
content.append(f"**title**: {title}")
if description is not None and len(description) > 10:
content.append(f"**description**: {description}")
content.append(text)
return "\n\n".join(content)
def normalize_text(text, get_noise_info = False):
text = text.strip()
chunks = re.split(r'\s*(?:\n\s*)+', text, flags = re.MULTILINE)
text = "\n\n".join([ x for x in chunks if len(x) > 20 ])
if get_noise_info:
noise_len = 1
total_len = 1
max_noise = 0
continuous_noise = 0
for x in chunks:
n = len(x)
total_len += n
if n < 80:
noise_len += n
continuous_noise += n
if continuous_noise > max_noise:
max_noise = continuous_noise
else:
continuous_noise = 0
noise_ratio = noise_len / total_len
return text, noise_ratio, max_noise
else:
return text
def get_clean_view(url):
url, filename = norm_url_and_gen_filename(url)
clean_view_filename = filename + "__clean_view.txt"
if os.path.exists(clean_view_filename):
return open(clean_view_filename, "rt").read()
else:
return None
def get_meta(url):
url, filename = norm_url_and_gen_filename(url)
json_filename = filename + ".json"
return json.load(open(json_filename))
TAGS = "keyphrases figures summary".split()
###
def get_llm_gen_contents(url, use_cache = True):
url, filename = norm_url_and_gen_filename(url)
json_filename = filename + ".json"
text_filename = filename + ".txt"
if os.path.exists(json_filename):
meta = json.load(open(json_filename, "rt"))
generated = ( "llm_generated" in meta )
if not use_cache or not generated:
text = open(text_filename, "rt").read()
marked_text, chunks = add_chunk_markers(text, para = True)
raw = extract_keyphrases_figures_summary(marked_text)
result = extract_xmls(raw, TAGS)
result["raw"] = raw
meta["llm_generated"] = result
with open(json_filename, "wt") as f:
f.write(json.dumps(meta, ensure_ascii = False))
return meta["llm_generated"]
else:
return {
"summary": "Tóm tắt nội dung ... văn bản nói về ...",
"keyphrases": ["keywords 1", "keywords 2", "keywords 3"]
}
default_urls_input = """
https://thuvienphapluat.vn/phap-luat/ho-so-dien-tu-thuc-hien-thu-tuc-hanh-chinh-la-gi-huong-dan-chuan-bi-va-nop-ho-so-dien-tu-khi-thuc-h-155754-140107.html
https://video.vnexpress.net/bon-ngay-chong-choi-lu-ngap-gan-3-m-cua-nguoi-dan-thai-nguyen-4791440.html
http://danvan.vn/Home/Tin-hoat-dong/Ban-dan-van/18706/Ban-Dan-van-Trung-uong-va-Hoi-Chu-thap-do-Viet-Nam-tham-tang-qua-nhan-dan-bi-anh-huong-bao-so-3-tai-Thai-Nguyen
https://baodauthau.vn/thai-nguyen-144-ty-dong-nang-cap-duong-cach-mang-thang-8-tp-song-cong-post164486.html
https://baothainguyen.vn/chinh-tri/202409/chu-tich-quoc-hoi-tran-thanh-man-lam-viec-voi-tinh-thai-nguyen-ve-cong-tackhac-phuc-hau-qua-bao-so-3-3f9253f/
https://baothainguyen.vn/giao-duc/202409/dam-bao-dieu-kien-de-hoc-sinh-tro-lai-truong-cham-nhat-ngay-16-9-9742985/
https://baothainguyen.vn/tai-nguyen-moi-truong/202409/khu-khuan-dien-rong-nhung-vung-bi-ngap-lut-tai-tp-thai-nguyen-585273d/
https://baothainguyen.vn/thoi-su-thai-nguyen/202409/dien-luc-tp-thai-nguyen-no-luccap-dien-tro-lai-cho-tren-2000-hotrong-ngay-12-9-da21a20/
https://baothainguyen.vn/xa-hoi/202409/tao-sinh-ke-giam-ngheo-vung-dong-bao-dan-toc-thieu-so-b8f041c/
https://baotintuc.vn/xa-hoi/ngap-ung-va-thiet-hai-tren-202000-ha-lua-20240913095621343.htm
https://daidoanket.vn/thai-nguyen-hai-nguoi-tu-vong-thiet-hai-hon-600-ty-dong-do-bao-yagi-10290104.html
https://dangcongsan.vn/xay-dung-dang/thai-nguyen-cong-bo-cac-quyet-dinh-ve-cong-tac-can-bo-677747.html
https://danviet.vn/62-y-bac-si-cua-binh-dinh-den-thai-nguyen-yen-bai-quyet-tam-cung-dong-bao-vuot-qua-kho-khan-20240913101402511.htm
https://laodong.vn/thoi-su/chu-tich-quoc-hoi-kiem-tra-cong-tac-khac-phuc-hau-qua-mua-lu-o-thai-nguyen-1393445.ldo
https://nhandan.vn/anh-chu-tich-quoc-hoi-tran-thanh-man-kiem-tra-cong-tac-khac-phuc-hau-qua-bao-so-3-tai-tinh-thai-nguyen-post830447.html
https://nld.com.vn/toi-7-gio-13-9-336-nguoi-chet-va-mat-tich-hon-130-ngan-nguoi-dan-phai-di-doi-do-bao-lu-196240913101124546.htm
https://phunuvietnam.vn/thai-nguyen-hoi-vien-phu-nu-chung-tay-khac-phuc-hau-qua-ngap-lut-20240912154801867.htm
https://phunuvietnam.vn/thai-nguyen-trien-khai-cong-tac-phong-chong-dich-sau-thien-tai-20240912174641866.htm
https://thainguyen.dcs.vn/hoat-dong-cua-cac-dang-bo/dang-bo-tp-thai-nguyen/hoi-nghi-ban-thuong-vu-thanh-uy-thai-nguyen-lan-thu-102-857.html
https://thainguyen.dms.gov.vn/tin-chi-tiet/-/chi-tiet/thai-nguyen-%C4%91am-bao-nguon-hang-hoa-phuc-vu-nhan-dan-89820-1404.html
https://thuonghieucongluan.com.vn/thai-nguyen-tiep-nhan-5-tan-gao-ho-tro-nhan-dan-bi-anh-huong-ngap-lut-a235642.html
https://tienphong.vn/nam-thanh-nien-o-thai-nguyen-bi-lu-cuon-khi-di-bat-ca-post1672693.tpo
https://tienphong.vn/ngan-hang-dau-tien-cong-bo-giam-lai-suat-cho-vay-sau-bao-so-3-post1672728.tpo
https://tuoitre.vn/chu-tich-quoc-hoi-tran-thanh-man-trao-30-ti-dong-ho-tro-khac-phuc-bao-lu-tai-thai-nguyen-20240912191724375.htm
https://tuoitre.vn/sau-lu-nguoi-dan-thai-nguyen-noi-chua-bao-gio-bun-ngap-nhieu-den-vay-202409121653144.htm
https://vietnamnet.vn/muc-nuoc-song-cau-o-thai-nguyen-giam-dan-nguoi-dan-tat-bat-don-dep-sau-lu-2321461.html
https://vtcnews.vn/trieu-nu-cuoi-huong-ve-thai-nguyen-sau-con-bao-ar895714.html
""".strip()
if os.getenv("test", 0) == "1":
default_urls_input = """
https://vnexpress.net/sam-altman-ai-thong-minh-hon-con-nguoi-trong-vai-nghin-ngay-toi-4796649.html
https://vnexpress.net/may-tram-chay-ai-gia-tram-trieu-dong-tai-viet-nam-4796490.html
https://www.vngcloud.vn/blog/what-are-large-language-models
https://arxiv.org/pdf/2305.13673
""".strip()