summaryrefslogtreecommitdiffstats
path: root/g4f/Provider/deprecated
diff options
context:
space:
mode:
authorⲘrṨhส∂ow <71973368+MrShadowDev@users.noreply.github.com>2023-10-23 09:46:25 +0200
committerGitHub <noreply@github.com>2023-10-23 09:46:25 +0200
commit3982f39424ea037aca1086d45c6f657b4bfc457c (patch)
tree987290c5dc5822cb0197e789df68488536b1637c /g4f/Provider/deprecated
parent~ | g4f `v-0.1.7.5` (diff)
downloadgpt4free-3982f39424ea037aca1086d45c6f657b4bfc457c.tar
gpt4free-3982f39424ea037aca1086d45c6f657b4bfc457c.tar.gz
gpt4free-3982f39424ea037aca1086d45c6f657b4bfc457c.tar.bz2
gpt4free-3982f39424ea037aca1086d45c6f657b4bfc457c.tar.lz
gpt4free-3982f39424ea037aca1086d45c6f657b4bfc457c.tar.xz
gpt4free-3982f39424ea037aca1086d45c6f657b4bfc457c.tar.zst
gpt4free-3982f39424ea037aca1086d45c6f657b4bfc457c.zip
Diffstat (limited to 'g4f/Provider/deprecated')
-rw-r--r--g4f/Provider/deprecated/AiService.py9
-rw-r--r--g4f/Provider/deprecated/CodeLinkAva.py23
-rw-r--r--g4f/Provider/deprecated/EasyChat.py40
-rw-r--r--g4f/Provider/deprecated/Equing.py11
-rw-r--r--g4f/Provider/deprecated/FastGpt.py13
-rw-r--r--g4f/Provider/deprecated/H2o.py2
-rw-r--r--g4f/Provider/deprecated/Lockchat.py7
-rw-r--r--g4f/Provider/deprecated/V50.py21
-rw-r--r--g4f/Provider/deprecated/Vitalentum.py23
-rw-r--r--g4f/Provider/deprecated/Wuguokai.py19
10 files changed, 88 insertions, 80 deletions
diff --git a/g4f/Provider/deprecated/AiService.py b/g4f/Provider/deprecated/AiService.py
index d1d15859..325af670 100644
--- a/g4f/Provider/deprecated/AiService.py
+++ b/g4f/Provider/deprecated/AiService.py
@@ -18,9 +18,12 @@ class AiService(BaseProvider):
stream: bool,
**kwargs: Any,
) -> CreateResult:
- base = "\n".join(f"{message['role']}: {message['content']}" for message in messages)
- base += "\nassistant: "
-
+ base = (
+ "\n".join(
+ f"{message['role']}: {message['content']}" for message in messages
+ )
+ + "\nassistant: "
+ )
headers = {
"accept": "*/*",
"content-type": "text/plain;charset=UTF-8",
diff --git a/g4f/Provider/deprecated/CodeLinkAva.py b/g4f/Provider/deprecated/CodeLinkAva.py
index 8407ebb9..64ce1af9 100644
--- a/g4f/Provider/deprecated/CodeLinkAva.py
+++ b/g4f/Provider/deprecated/CodeLinkAva.py
@@ -20,18 +20,18 @@ class CodeLinkAva(AsyncGeneratorProvider):
**kwargs
) -> AsyncGenerator:
headers = {
- "User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
- "Accept" : "*/*",
- "Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
- "Origin" : cls.url,
- "Referer" : cls.url + "/",
- "Sec-Fetch-Dest" : "empty",
- "Sec-Fetch-Mode" : "cors",
- "Sec-Fetch-Site" : "same-origin",
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
+ "Accept": "*/*",
+ "Accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
+ "Origin": cls.url,
+ "Referer": f"{cls.url}/",
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
}
async with ClientSession(
- headers=headers
- ) as session:
+ headers=headers
+ ) as session:
data = {
"messages": messages,
"temperature": 0.6,
@@ -46,8 +46,7 @@ class CodeLinkAva(AsyncGeneratorProvider):
if line.startswith("data: [DONE]"):
break
line = json.loads(line[6:-1])
- content = line["choices"][0]["delta"].get("content")
- if content:
+ if content := line["choices"][0]["delta"].get("content"):
yield content
diff --git a/g4f/Provider/deprecated/EasyChat.py b/g4f/Provider/deprecated/EasyChat.py
index ffe9a785..bd49c09c 100644
--- a/g4f/Provider/deprecated/EasyChat.py
+++ b/g4f/Provider/deprecated/EasyChat.py
@@ -30,7 +30,7 @@ class EasyChat(BaseProvider):
"https://chat4.fastgpt.me",
"https://gxos1h1ddt.fastgpt.me"
]
-
+
server = active_servers[kwargs.get("active_server", random.randint(0, 5))]
headers = {
"authority" : f"{server}".replace("https://", ""),
@@ -68,30 +68,26 @@ class EasyChat(BaseProvider):
response = session.post(f"{server}/api/openai/v1/chat/completions",
headers=headers, json=json_data, stream=stream)
-
- if response.status_code == 200:
-
- if stream == False:
- json_data = response.json()
-
- if "choices" in json_data:
- yield json_data["choices"][0]["message"]["content"]
- else:
- raise Exception("No response from server")
-
+
+ if response.status_code != 200:
+ raise Exception(f"Error {response.status_code} from server : {response.reason}")
+ if not stream:
+ json_data = response.json()
+
+ if "choices" in json_data:
+ yield json_data["choices"][0]["message"]["content"]
else:
+ raise Exception("No response from server")
+
+ else:
- for chunk in response.iter_lines():
+ for chunk in response.iter_lines():
- if b"content" in chunk:
- splitData = chunk.decode().split("data:")
-
- if len(splitData) > 1:
- yield json.loads(splitData[1])["choices"][0]["delta"]["content"]
- else:
- continue
- else:
- raise Exception(f"Error {response.status_code} from server : {response.reason}")
+ if b"content" in chunk:
+ splitData = chunk.decode().split("data:")
+
+ if len(splitData) > 1:
+ yield json.loads(splitData[1])["choices"][0]["delta"]["content"]
@classmethod
diff --git a/g4f/Provider/deprecated/Equing.py b/g4f/Provider/deprecated/Equing.py
index 794274f2..5ba125a3 100644
--- a/g4f/Provider/deprecated/Equing.py
+++ b/g4f/Provider/deprecated/Equing.py
@@ -56,18 +56,17 @@ class Equing(BaseProvider):
response = requests.post('https://next.eqing.tech/api/openai/v1/chat/completions',
headers=headers, json=json_data, stream=stream)
-
+
if not stream:
yield response.json()["choices"][0]["message"]["content"]
return
-
+
for line in response.iter_content(chunk_size=1024):
if line:
if b'content' in line:
- line_json = json.loads(line.decode('utf-8').split('data: ')[1])
- token = line_json['choices'][0]['delta'].get('content')
- if token:
- yield token
+ line_json = json.loads(line.decode('utf-8').split('data: ')[1])
+ if token := line_json['choices'][0]['delta'].get('content'):
+ yield token
@classmethod
@property
diff --git a/g4f/Provider/deprecated/FastGpt.py b/g4f/Provider/deprecated/FastGpt.py
index 65efa29d..17b21b37 100644
--- a/g4f/Provider/deprecated/FastGpt.py
+++ b/g4f/Provider/deprecated/FastGpt.py
@@ -55,7 +55,7 @@ class FastGpt(BaseProvider):
'frequency_penalty' : kwargs.get('frequency_penalty', 0),
'top_p' : kwargs.get('top_p', 1),
}
-
+
subdomain = random.choice([
'jdaen979ew',
'chat9'
@@ -63,15 +63,16 @@ class FastGpt(BaseProvider):
response = requests.post(f'https://{subdomain}.fastgpt.me/api/openai/v1/chat/completions',
headers=headers, json=json_data, stream=stream)
-
+
for line in response.iter_lines():
if line:
try:
if b'content' in line:
- line_json = json.loads(line.decode('utf-8').split('data: ')[1])
- token = line_json['choices'][0]['delta'].get('content')
- if token:
- yield token
+ line_json = json.loads(line.decode('utf-8').split('data: ')[1])
+ if token := line_json['choices'][0]['delta'].get(
+ 'content'
+ ):
+ yield token
except:
continue
diff --git a/g4f/Provider/deprecated/H2o.py b/g4f/Provider/deprecated/H2o.py
index 47290a3e..cead17e1 100644
--- a/g4f/Provider/deprecated/H2o.py
+++ b/g4f/Provider/deprecated/H2o.py
@@ -22,7 +22,7 @@ class H2o(AsyncGeneratorProvider):
**kwargs
) -> AsyncResult:
model = model if model else cls.model
- headers = {"Referer": cls.url + "/"}
+ headers = {"Referer": f"{cls.url}/"}
async with ClientSession(
headers=headers
diff --git a/g4f/Provider/deprecated/Lockchat.py b/g4f/Provider/deprecated/Lockchat.py
index 4bd7c5fe..5acfbfbf 100644
--- a/g4f/Provider/deprecated/Lockchat.py
+++ b/g4f/Provider/deprecated/Lockchat.py
@@ -33,7 +33,7 @@ class Lockchat(BaseProvider):
}
response = requests.post("http://supertest.lockchat.app/v1/chat/completions",
json=payload, headers=headers, stream=True)
-
+
response.raise_for_status()
for token in response.iter_lines():
if b"The model: `gpt-4` does not exist" in token:
@@ -44,11 +44,10 @@ class Lockchat(BaseProvider):
stream = stream,
temperature = temperature,
**kwargs)
-
+
if b"content" in token:
token = json.loads(token.decode("utf-8").split("data: ")[1])
- token = token["choices"][0]["delta"].get("content")
- if token:
+ if token := token["choices"][0]["delta"].get("content"):
yield (token)
@classmethod
diff --git a/g4f/Provider/deprecated/V50.py b/g4f/Provider/deprecated/V50.py
index 9a8b032c..f4f4d823 100644
--- a/g4f/Provider/deprecated/V50.py
+++ b/g4f/Provider/deprecated/V50.py
@@ -21,9 +21,12 @@ class V50(BaseProvider):
messages: list[dict[str, str]],
stream: bool, **kwargs: Any) -> CreateResult:
- conversation = "\n".join(f"{message['role']}: {message['content']}" for message in messages)
- conversation += "\nassistant: "
-
+ conversation = (
+ "\n".join(
+ f"{message['role']}: {message['content']}" for message in messages
+ )
+ + "\nassistant: "
+ )
payload = {
"prompt" : conversation,
"options" : {},
@@ -33,7 +36,7 @@ class V50(BaseProvider):
"model" : model,
"user" : str(uuid.uuid4())
}
-
+
headers = {
'authority' : 'p5.v50.ltd',
'accept' : 'application/json, text/plain, */*',
@@ -47,9 +50,13 @@ class V50(BaseProvider):
'sec-fetch-site' : 'same-origin',
'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36'
}
- response = requests.post("https://p5.v50.ltd/api/chat-process",
- json=payload, headers=headers, proxies=kwargs['proxy'] if 'proxy' in kwargs else {})
-
+ response = requests.post(
+ "https://p5.v50.ltd/api/chat-process",
+ json=payload,
+ headers=headers,
+ proxies=kwargs.get('proxy', {}),
+ )
+
if "https://fk1.v50.ltd" not in response.text:
yield response.text
diff --git a/g4f/Provider/deprecated/Vitalentum.py b/g4f/Provider/deprecated/Vitalentum.py
index 8deb1f50..d6ba9336 100644
--- a/g4f/Provider/deprecated/Vitalentum.py
+++ b/g4f/Provider/deprecated/Vitalentum.py
@@ -20,14 +20,14 @@ class Vitalentum(AsyncGeneratorProvider):
**kwargs
) -> AsyncResult:
headers = {
- "User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
- "Accept" : "text/event-stream",
- "Accept-language" : "de,en-US;q=0.7,en;q=0.3",
- "Origin" : cls.url,
- "Referer" : cls.url + "/",
- "Sec-Fetch-Dest" : "empty",
- "Sec-Fetch-Mode" : "cors",
- "Sec-Fetch-Site" : "same-origin",
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
+ "Accept": "text/event-stream",
+ "Accept-language": "de,en-US;q=0.7,en;q=0.3",
+ "Origin": cls.url,
+ "Referer": f"{cls.url}/",
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
}
conversation = json.dumps({"history": [{
"speaker": "human" if message["role"] == "user" else "bot",
@@ -39,8 +39,8 @@ class Vitalentum(AsyncGeneratorProvider):
**kwargs
}
async with ClientSession(
- headers=headers
- ) as session:
+ headers=headers
+ ) as session:
async with session.post(f"{cls.url}/api/converse-edge", json=data, proxy=proxy) as response:
response.raise_for_status()
async for line in response.content:
@@ -49,8 +49,7 @@ class Vitalentum(AsyncGeneratorProvider):
if line.startswith("data: [DONE]"):
break
line = json.loads(line[6:-1])
- content = line["choices"][0]["delta"].get("content")
- if content:
+ if content := line["choices"][0]["delta"].get("content"):
yield content
diff --git a/g4f/Provider/deprecated/Wuguokai.py b/g4f/Provider/deprecated/Wuguokai.py
index 311131cf..079f0541 100644
--- a/g4f/Provider/deprecated/Wuguokai.py
+++ b/g4f/Provider/deprecated/Wuguokai.py
@@ -41,15 +41,20 @@ class Wuguokai(BaseProvider):
"userId": f"#/chat/{random.randint(1,99999999)}",
"usingContext": True
}
- response = requests.post("https://ai-api20.wuguokai.xyz/api/chat-process", headers=headers, timeout=3, json=data, proxies=kwargs['proxy'] if 'proxy' in kwargs else {})
+ response = requests.post(
+ "https://ai-api20.wuguokai.xyz/api/chat-process",
+ headers=headers,
+ timeout=3,
+ json=data,
+ proxies=kwargs.get('proxy', {}),
+ )
_split = response.text.split("> 若回答失败请重试或多刷新几次界面后重试")
- if response.status_code == 200:
- if len(_split) > 1:
- yield _split[1].strip()
- else:
- yield _split[0].strip()
- else:
+ if response.status_code != 200:
raise Exception(f"Error: {response.status_code} {response.reason}")
+ if len(_split) > 1:
+ yield _split[1].strip()
+ else:
+ yield _split[0].strip()
@classmethod
@property