monra commited on
Commit
53cd5d4
1 Parent(s): f7379ea

Synced repo using 'sync_with_huggingface' Github Action

Browse files
client/html/index.html CHANGED
@@ -93,9 +93,9 @@
93
  <div class="buttons">
94
  <div class="field">
95
  <select class="dropdown" name="model" id="model">
96
- <option value="gpt-3.5-turbo" selected>GPT-3.5</option>
97
  <option value="gpt-3.5-turbo-16k">GPT-3.5-turbo-16k</option>
98
- <option value="gpt-4">GPT-4</option>
99
  </select>
100
  </div>
101
  <div class="field">
 
93
  <div class="buttons">
94
  <div class="field">
95
  <select class="dropdown" name="model" id="model">
96
+ <option value="gpt-3.5-turbo">GPT-3.5</option>
97
  <option value="gpt-3.5-turbo-16k">GPT-3.5-turbo-16k</option>
98
+ <option value="gpt-4" selected>GPT-4</option>
99
  </select>
100
  </div>
101
  <div class="field">
g4f/Provider/Providers/AiService.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import requests
3
+ from ...typing import get_type_hints
4
+
5
+ url = "https://aiservice.vercel.app/api/chat/answer"
6
+ model = ['gpt-3.5-turbo']
7
+ supports_stream = False
8
+ needs_auth = False
9
+
10
+
11
+ def _create_completion(model: str, messages: list, stream: bool, **kwargs):
12
+ base = ''
13
+ for message in messages:
14
+ base += '%s: %s\n' % (message['role'], message['content'])
15
+ base += 'assistant:'
16
+
17
+ headers = {
18
+ "accept": "*/*",
19
+ "content-type": "text/plain;charset=UTF-8",
20
+ "sec-fetch-dest": "empty",
21
+ "sec-fetch-mode": "cors",
22
+ "sec-fetch-site": "same-origin",
23
+ "Referer": "https://aiservice.vercel.app/chat",
24
+ }
25
+ data = {
26
+ "input": base
27
+ }
28
+ response = requests.post(url, headers=headers, json=data)
29
+ if response.status_code == 200:
30
+ _json = response.json()
31
+ yield _json['data']
32
+ else:
33
+ print(f"Error Occurred::{response.status_code}")
34
+ return None
35
+
36
+
37
+
38
+ params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
39
+ '(%s)' % ', '.join(
40
+ [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
g4f/Provider/Providers/Better.py CHANGED
@@ -4,13 +4,13 @@ import requests
4
  from typing import Dict, get_type_hints
5
 
6
  url = 'https://openai-proxy-api.vercel.app/v1/'
7
- model = {
8
  'gpt-3.5-turbo',
9
- 'gpt-3.5-turbo-0613'
10
  'gpt-3.5-turbo-16k',
11
  'gpt-3.5-turbo-16k-0613',
12
  'gpt-4',
13
- }
14
 
15
  supports_stream = True
16
  needs_auth = False
 
4
  from typing import Dict, get_type_hints
5
 
6
  url = 'https://openai-proxy-api.vercel.app/v1/'
7
+ model = [
8
  'gpt-3.5-turbo',
9
+ 'gpt-3.5-turbo-0613',
10
  'gpt-3.5-turbo-16k',
11
  'gpt-3.5-turbo-16k-0613',
12
  'gpt-4',
13
+ ]
14
 
15
  supports_stream = True
16
  needs_auth = False
g4f/Provider/Providers/ChatFree.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, requests
2
+ from ...typing import sha256, Dict, get_type_hints
3
+ import json
4
+
5
+ url = "https://v.chatfree.cc"
6
+ model = ['gpt-3.5-turbo', 'gpt-3.5-turbo-16k']
7
+ supports_stream = False
8
+ needs_auth = False
9
+
10
+
11
+ def _create_completion(model: str, messages: list, stream: bool, **kwargs):
12
+ headers = {
13
+ 'authority': 'chat.dfehub.com',
14
+ 'accept': '*/*',
15
+ 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
16
+ 'content-type': 'application/json',
17
+ 'origin': 'https://v.chatfree.cc',
18
+ 'referer': 'https://v.chatfree.cc/',
19
+ 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
20
+ 'sec-ch-ua-mobile': '?0',
21
+ 'sec-ch-ua-platform': '"macOS"',
22
+ 'sec-fetch-dest': 'empty',
23
+ 'sec-fetch-mode': 'cors',
24
+ 'sec-fetch-site': 'same-origin',
25
+ 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
26
+ 'x-requested-with': 'XMLHttpRequest',
27
+ }
28
+
29
+ json_data = {
30
+ 'messages': messages,
31
+ 'stream': True,
32
+ 'model': model,
33
+ 'temperature': 0.5,
34
+ 'presence_penalty': 0,
35
+ 'frequency_penalty': 0,
36
+ 'top_p': 1,
37
+ }
38
+
39
+ response = requests.post('https://v.chatfree.cc/api/openai/v1/chat/completions',
40
+ headers=headers, json=json_data)
41
+
42
+ for chunk in response.iter_lines():
43
+ if b'content' in chunk:
44
+ data = json.loads(chunk.decode().split('data: ')[1])
45
+ yield (data['choices'][0]['delta']['content'])
46
+
47
+ params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
48
+ '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
g4f/Provider/Providers/Fakeopen.py CHANGED
@@ -6,7 +6,7 @@ from typing import Dict, get_type_hints
6
  url = 'https://ai.fakeopen.com/v1/'
7
  model = [
8
  'gpt-3.5-turbo',
9
- 'gpt-3.5-turbo-0613'
10
  'gpt-3.5-turbo-16k',
11
  'gpt-3.5-turbo-16k-0613',
12
  ]
 
6
  url = 'https://ai.fakeopen.com/v1/'
7
  model = [
8
  'gpt-3.5-turbo',
9
+ 'gpt-3.5-turbo-0613',
10
  'gpt-3.5-turbo-16k',
11
  'gpt-3.5-turbo-16k-0613',
12
  ]
g4f/Provider/Providers/Wewordle.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import requests
3
+ import json
4
+ import random
5
+ import time
6
+ import string
7
+ from ...typing import sha256, Dict, get_type_hints
8
+
9
+ url = "https://wewordle.org/gptapi/v1/android/turbo"
10
+ model = ['gpt-3.5-turbo']
11
+ supports_stream = False
12
+ needs_auth = False
13
+
14
+
15
+ def _create_completion(model: str, messages: list, stream: bool, **kwargs):
16
+ base = ''
17
+ for message in messages:
18
+ base += '%s: %s\n' % (message['role'], message['content'])
19
+ base += 'assistant:'
20
+ # randomize user id and app id
21
+ _user_id = ''.join(random.choices(
22
+ f'{string.ascii_lowercase}{string.digits}', k=16))
23
+ _app_id = ''.join(random.choices(
24
+ f'{string.ascii_lowercase}{string.digits}', k=31))
25
+ # make current date with format utc
26
+ _request_date = time.strftime("%Y-%m-%dT%H:%M:%S.000Z", time.gmtime())
27
+ headers = {
28
+ 'accept': '*/*',
29
+ 'pragma': 'no-cache',
30
+ 'Content-Type': 'application/json',
31
+ 'Connection': 'keep-alive'
32
+ }
33
+ data = {
34
+ "user": _user_id,
35
+ "messages": [
36
+ {"role": "user", "content": base}
37
+ ],
38
+ "subscriber": {
39
+ "originalPurchaseDate": None,
40
+ "originalApplicationVersion": None,
41
+ "allPurchaseDatesMillis": {},
42
+ "entitlements": {
43
+ "active": {},
44
+ "all": {}
45
+ },
46
+ "allPurchaseDates": {},
47
+ "allExpirationDatesMillis": {},
48
+ "allExpirationDates": {},
49
+ "originalAppUserId": f"$RCAnonymousID:{_app_id}",
50
+ "latestExpirationDate": None,
51
+ "requestDate": _request_date,
52
+ "latestExpirationDateMillis": None,
53
+ "nonSubscriptionTransactions": [],
54
+ "originalPurchaseDateMillis": None,
55
+ "managementURL": None,
56
+ "allPurchasedProductIdentifiers": [],
57
+ "firstSeen": _request_date,
58
+ "activeSubscriptions": []
59
+ }
60
+ }
61
+ response = requests.post(url, headers=headers, data=json.dumps(data))
62
+ if response.status_code == 200:
63
+ _json = response.json()
64
+ if 'message' in _json:
65
+ yield _json['message']['content']
66
+ else:
67
+ print(f"Error Occurred::{response.status_code}")
68
+ return None
69
+
70
+
71
+ params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
72
+ '(%s)' % ', '.join(
73
+ [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
g4f/Provider/__init__.py CHANGED
@@ -2,9 +2,11 @@ from . import Provider
2
  from .Providers import (
3
  Aichat,
4
  Ails,
 
5
  Bard,
6
  Better,
7
  Bing,
 
8
  ChatgptAi,
9
  ChatgptLogin,
10
  ChatgptLogin,
@@ -27,7 +29,8 @@ from .Providers import (
27
  Xiaor,
28
  Yqcloud,
29
  You,
30
- Zeabur
 
31
  )
32
 
33
  Palm = Bard
 
2
  from .Providers import (
3
  Aichat,
4
  Ails,
5
+ AiService,
6
  Bard,
7
  Better,
8
  Bing,
9
+ ChatFree,
10
  ChatgptAi,
11
  ChatgptLogin,
12
  ChatgptLogin,
 
29
  Xiaor,
30
  Yqcloud,
31
  You,
32
+ Zeabur,
33
+ Wewordle
34
  )
35
 
36
  Palm = Bard
g4f/models.py CHANGED
@@ -30,7 +30,7 @@ class Model:
30
  class gpt_35_turbo_16k:
31
  name: str = 'gpt-3.5-turbo-16k'
32
  base_provider: str = 'openai'
33
- best_provider: Provider.Provider = Provider.Ezcht
34
 
35
  class gpt_4_dev:
36
  name: str = 'gpt-4-for-dev'
@@ -163,10 +163,6 @@ class Model:
163
  base_provider: str = 'google'
164
  best_provider: Provider.Provider = Provider.Bard
165
 
166
- """ 'falcon-40b': Model.falcon_40b,
167
- 'falcon-7b': Model.falcon_7b,
168
- 'llama-13b': Model.llama_13b,"""
169
-
170
  class falcon_40b:
171
  name: str = 'falcon-40b'
172
  base_provider: str = 'huggingface'
 
30
  class gpt_35_turbo_16k:
31
  name: str = 'gpt-3.5-turbo-16k'
32
  base_provider: str = 'openai'
33
+ best_provider: Provider.Provider = Provider.ChatFree
34
 
35
  class gpt_4_dev:
36
  name: str = 'gpt-4-for-dev'
 
163
  base_provider: str = 'google'
164
  best_provider: Provider.Provider = Provider.Bard
165
 
 
 
 
 
166
  class falcon_40b:
167
  name: str = 'falcon-40b'
168
  base_provider: str = 'huggingface'
server/backend.py CHANGED
@@ -37,7 +37,6 @@ class Backend_Api:
37
  # Generate response
38
  response = ChatCompletion.create(
39
  model=model,
40
- stream=True,
41
  chatId=conversation_id,
42
  messages=messages
43
  )
 
37
  # Generate response
38
  response = ChatCompletion.create(
39
  model=model,
 
40
  chatId=conversation_id,
41
  messages=messages
42
  )