summaryrefslogblamecommitdiffstats
path: root/g4f/Provider/DeepInfra.py
blob: 12bd49c7e258e8762961e5a4715f910fee3b9a74 (plain) (tree)
1
2
3
4
5
6
7

                                  



                                                     
 




                                        
 
                 








                                     

                                                    
                   

                                                   


                                               









                                                                                                                                                  
         

                                                        















                                                                                           


                                                      
                        





                                                                           
                                 


                                                                              
                                     


                                                      
                                     
                                                               
from __future__ import annotations

import json
from ..typing           import AsyncResult, Messages
from .base_provider     import AsyncGeneratorProvider
from ..requests         import StreamSession

class DeepInfra(AsyncGeneratorProvider):
    url = "https://deepinfra.com"
    working = True
    supports_stream = True
    supports_message_history = True

    @staticmethod
    async def create_async_generator(
        model: str,
        messages: Messages,
        stream: bool,
        proxy: str = None,
        timeout: int = 120,
        auth: str = None,
        **kwargs
    ) -> AsyncResult:
        if not model:
            model = 'meta-llama/Llama-2-70b-chat-hf'
        headers = {
            'Accept-Encoding': 'gzip, deflate, br',
            'Accept-Language': 'en-US',
            'Connection': 'keep-alive',
            'Content-Type': 'application/json',
            'Origin': 'https://deepinfra.com',
            'Referer': 'https://deepinfra.com/',
            'Sec-Fetch-Dest': 'empty',
            'Sec-Fetch-Mode': 'cors',
            'Sec-Fetch-Site': 'same-site',
            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
            'X-Deepinfra-Source': 'web-embed',
            'accept': 'text/event-stream',
            'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
            'sec-ch-ua-mobile': '?0',
            'sec-ch-ua-platform': '"macOS"',
        }
        if auth:
            headers['Authorization'] = f"bearer {auth}" 
            
        async with StreamSession(headers=headers,
            timeout=timeout,
            proxies={"https": proxy},
            impersonate="chrome110"
        ) as session:
            json_data = {
                'model'   : model,
                'messages': messages,
                'stream'  : True
            }
            async with session.post('https://api.deepinfra.com/v1/openai/chat/completions',
                                    json=json_data) as response:
                response.raise_for_status()
                first = True
                async for line in response.iter_lines():
                    if not line.startswith(b"data: "):
                        continue

                    try:
                        decoded_line = line.decode().lstrip("data: ")
                        json_line = json.loads(decoded_line)

                        choices = json_line.get("choices", [{}])
                        finish_reason = choices[0].get("finish_reason", "")
                        if finish_reason:
                            break
                        token = choices[0].get("delta", {}).get("content", "")

                        if token:
                            if first:
                                token = token.lstrip()
                                first = False
                            yield token
                    except Exception:
                        raise RuntimeError(f"Response: {line}")