Tuchuanhuhuhu commited on
Commit
5457fcd
1 Parent(s): 0d9e8c6

修复依赖

Browse files
Files changed (1) hide show
  1. overwrites.py +4 -61
overwrites.py CHANGED
@@ -1,70 +1,13 @@
1
  from __future__ import annotations
2
- import os
3
-
4
- import llama_index
5
-
6
- from llama_index import (
7
- LLMPredictor,
8
- GPTTreeIndex,
9
- Document,
10
- GPTSimpleVectorIndex,
11
- SimpleDirectoryReader,
12
- RefinePrompt,
13
- QuestionAnswerPrompt,
14
- GPTListIndex,
15
- PromptHelper,
16
- )
17
- from pathlib import Path
18
- from docx import Document as DocxDocument
19
- from tqdm import tqdm
20
- import re
21
- from langchain.llms import OpenAIChat, OpenAI
22
- from llama_index.composability import ComposableGraph
23
- from IPython.display import Markdown, display
24
- import json
25
- from llama_index import Prompt
26
- from typing import TYPE_CHECKING, Any, Callable, Dict, List, Tuple, Type
27
-
28
- import logging
29
- import sys
30
-
31
- from typing import TYPE_CHECKING, Any, Callable, Dict, List, Tuple, Type
32
  import logging
33
- import json
34
- import gradio as gr
35
-
36
- # import openai
37
- import os
38
- import traceback
39
- import requests
40
 
41
- # import markdown
42
- import csv
43
  import mdtex2html
44
- from pypinyin import lazy_pinyin
45
  from presets import *
46
  from llama_func import *
47
- import tiktoken
48
- from tqdm import tqdm
49
- import colorama
50
- import os
51
- from llama_index import (
52
- GPTSimpleVectorIndex,
53
- GPTTreeIndex,
54
- GPTKeywordTableIndex,
55
- GPTListIndex,
56
- )
57
- from llama_index import SimpleDirectoryReader, download_loader
58
- from llama_index import (
59
- Document,
60
- LLMPredictor,
61
- PromptHelper,
62
- QuestionAnswerPrompt,
63
- RefinePrompt,
64
- )
65
- from langchain.llms import OpenAIChat, OpenAI
66
- from duckduckgo_search import ddg
67
- import datetime
68
 
69
  def compact_text_chunks(self, prompt: Prompt, text_chunks: List[str]) -> List[str]:
70
  logging.debug("Compacting text chunks...🚀🚀🚀")
 
1
  from __future__ import annotations
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  import logging
 
 
 
 
 
 
 
3
 
4
+ from llama_index import Prompt
5
+ from typing import List, Tuple
6
  import mdtex2html
7
+
8
  from presets import *
9
  from llama_func import *
10
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
  def compact_text_chunks(self, prompt: Prompt, text_chunks: List[str]) -> List[str]:
13
  logging.debug("Compacting text chunks...🚀🚀🚀")