summaryrefslogtreecommitdiffstats
path: root/g4f/Provider
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--g4f/Provider/Providers/H2o.py198
1 files changed, 93 insertions, 105 deletions
diff --git a/g4f/Provider/Providers/H2o.py b/g4f/Provider/Providers/H2o.py
index eabf94e2..4400f3a9 100644
--- a/g4f/Provider/Providers/H2o.py
+++ b/g4f/Provider/Providers/H2o.py
@@ -1,106 +1,94 @@
-from requests import Session
-from uuid import uuid4
-from json import loads
-import os
-import json
-import requests
-from ...typing import sha256, Dict, get_type_hints
-
-url = 'https://gpt-gm.h2o.ai'
-model = ['falcon-40b', 'falcon-7b', 'llama-13b']
-supports_stream = True
-needs_auth = False
-
-models = {
- 'falcon-7b': 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-7b-v3',
- 'falcon-40b': 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1',
- 'llama-13b': 'h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-13b'
-}
-
-def _create_completion(model: str, messages: list, stream: bool, **kwargs):
- conversation = 'instruction: this is a conversation beween, a user and an AI assistant, respond to the latest message, referring to the conversation if needed\n'
- for message in messages:
- conversation += '%s: %s\n' % (message['role'], message['content'])
- conversation += 'assistant:'
-
- client = Session()
- client.headers = {
- 'authority': 'gpt-gm.h2o.ai',
- 'origin': 'https://gpt-gm.h2o.ai',
- 'referer': 'https://gpt-gm.h2o.ai/',
- 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
- 'sec-ch-ua-mobile': '?0',
- 'sec-ch-ua-platform': '"Windows"',
- 'sec-fetch-dest': 'document',
- 'sec-fetch-mode': 'navigate',
- 'sec-fetch-site': 'same-origin',
- 'sec-fetch-user': '?1',
- 'upgrade-insecure-requests': '1',
- 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
- }
-
- client.get('https://gpt-gm.h2o.ai/')
- response = client.post('https://gpt-gm.h2o.ai/settings', data={
- 'ethicsModalAccepted': 'true',
- 'shareConversationsWithModelAuthors': 'true',
- 'ethicsModalAcceptedAt': '',
- 'activeModel': 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1',
- 'searchEnabled': 'true',
- })
-
- headers = {
- 'authority': 'gpt-gm.h2o.ai',
- 'accept': '*/*',
- 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
- 'origin': 'https://gpt-gm.h2o.ai',
- 'referer': 'https://gpt-gm.h2o.ai/',
- 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
- 'sec-ch-ua-mobile': '?0',
- 'sec-ch-ua-platform': '"Windows"',
- 'sec-fetch-dest': 'empty',
- 'sec-fetch-mode': 'cors',
- 'sec-fetch-site': 'same-origin',
- 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
- }
-
- json_data = {
- 'model': models[model]
- }
-
- response = client.post('https://gpt-gm.h2o.ai/conversation',
- headers=headers, json=json_data)
- conversationId = response.json()['conversationId']
-
-
- completion = client.post(f'https://gpt-gm.h2o.ai/conversation/{conversationId}', stream=True, json = {
- 'inputs': conversation,
- 'parameters': {
- 'temperature': kwargs.get('temperature', 0.4),
- 'truncate': kwargs.get('truncate', 2048),
- 'max_new_tokens': kwargs.get('max_new_tokens', 1024),
- 'do_sample': kwargs.get('do_sample', True),
- 'repetition_penalty': kwargs.get('repetition_penalty', 1.2),
- 'return_full_text': kwargs.get('return_full_text', False)
- },
- 'stream': True,
- 'options': {
- 'id': kwargs.get('id', str(uuid4())),
- 'response_id': kwargs.get('response_id', str(uuid4())),
- 'is_retry': False,
- 'use_cache': False,
- 'web_search_id': ''
- }
- })
-
- for line in completion.iter_lines():
- if b'data' in line:
- line = loads(line.decode('utf-8').replace('data:', ''))
- token = line['token']['text']
-
- if token == '<|endoftext|>':
- break
- else:
- yield (token)
-
-params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
+from requests import Session
+from uuid import uuid4
+from json import loads
+import os
+import json
+import requests
+from ...typing import sha256, Dict, get_type_hints
+
+url = 'https://gpt-gm.h2o.ai'
+model = ['falcon-40b', 'falcon-7b', 'llama-13b']
+supports_stream = True
+needs_auth = False
+
+models = {
+ 'falcon-7b': 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-7b-v3',
+ 'falcon-40b': 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1',
+ 'llama-13b': 'h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-13b'
+}
+
+def _create_completion(model: str, messages: list, stream: bool, **kwargs):
+ conversation = ''
+ for message in messages:
+ conversation += '%s: %s\n' % (message['role'], message['content'])
+
+ conversation += 'assistant: '
+ session = requests.Session()
+
+ response = session.get("https://gpt-gm.h2o.ai/")
+ headers = {
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0",
+ "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
+ "Accept-Language": "ru-RU,ru;q=0.8,en-US;q=0.5,en;q=0.3",
+ "Content-Type": "application/x-www-form-urlencoded",
+ "Upgrade-Insecure-Requests": "1",
+ "Sec-Fetch-Dest": "document",
+ "Sec-Fetch-Mode": "navigate",
+ "Sec-Fetch-Site": "same-origin",
+ "Sec-Fetch-User": "?1",
+ "Referer": "https://gpt-gm.h2o.ai/r/jGfKSwU"
+ }
+ data = {
+ "ethicsModalAccepted": "true",
+ "shareConversationsWithModelAuthors": "true",
+ "ethicsModalAcceptedAt": "",
+ "activeModel": "h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1",
+ "searchEnabled": "true"
+ }
+ response = session.post("https://gpt-gm.h2o.ai/settings", headers=headers, data=data)
+
+
+
+ headers = {
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0",
+ "Accept": "*/*",
+ "Accept-Language": "ru-RU,ru;q=0.8,en-US;q=0.5,en;q=0.3",
+ "Content-Type": "application/json",
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
+ "Referer": "https://gpt-gm.h2o.ai/"
+ }
+ data = {
+ "model": models[model]
+ }
+
+ conversation_id = session.post("https://gpt-gm.h2o.ai/conversation", headers=headers, json=data)
+ data = {
+ "inputs": conversation,
+ "parameters": {
+ "temperature": kwargs.get('temperature', 0.4),
+ "truncate": kwargs.get('truncate', 2048),
+ "max_new_tokens": kwargs.get('max_new_tokens', 1024),
+ "do_sample": kwargs.get('do_sample', True),
+ "repetition_penalty": kwargs.get('repetition_penalty', 1.2),
+ "return_full_text": kwargs.get('return_full_text', False)
+ },
+ "stream": True,
+ "options": {
+ "id": kwargs.get('id', str(uuid4())),
+ "response_id": kwargs.get('response_id', str(uuid4())),
+ "is_retry": False,
+ "use_cache": False,
+ "web_search_id": ""
+ }
+ }
+
+ response = session.post(f"https://gpt-gm.h2o.ai/conversation/{conversation_id.json()['conversationId']}", headers=headers, json=data)
+ generated_text = response.text.replace("\n", "").split("data:")
+ generated_text = json.loads(generated_text[-1])
+
+ return generated_text["generated_text"]
+
+params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file