diff options
-rw-r--r-- | etc/testing/test_api.py | 27 | ||||
-rw-r--r-- | g4f/api/__init__.py | 32 |
2 files changed, 39 insertions, 20 deletions
diff --git a/etc/testing/test_api.py b/etc/testing/test_api.py new file mode 100644 index 00000000..57e2f117 --- /dev/null +++ b/etc/testing/test_api.py @@ -0,0 +1,27 @@ +import openai + +# Set your Hugging Face token as the API key if you use embeddings +# If you don't use embeddings, leave it empty +openai.api_key = "YOUR_HUGGING_FACE_TOKEN" # Replace with your actual token + +# Set the API base URL if needed, e.g., for a local development environment +openai.api_base = "http://localhost:1337/v1" + +def main(): + response = openai.ChatCompletion.create( + model="gpt-3.5-turbo", + messages=[{"role": "user", "content": "write a poem about a tree"}], + stream=True, + ) + if isinstance(response, dict): + # Not streaming + print(response.choices[0].message.content) + else: + # Streaming + for token in response: + content = token["choices"][0]["delta"].get("content") + if content is not None: + print(content, end="", flush=True) + +if __name__ == "__main__": + main()
\ No newline at end of file diff --git a/g4f/api/__init__.py b/g4f/api/__init__.py index a79da7b0..8369d70f 100644 --- a/g4f/api/__init__.py +++ b/g4f/api/__init__.py @@ -83,28 +83,17 @@ class Api: model = item_data.get('model') stream = True if item_data.get("stream") == "True" else False messages = item_data.get('messages') - conversation = item_data.get('conversation') if item_data.get('conversation') != None else None - provider = item_data.get('provider').replace('g4f.Provider.', '') + provider = item_data.get('provider', '').replace('g4f.Provider.', '') provider = provider if provider and provider != "Auto" else None - if provider != None: - provider = g4f.Provider.ProviderUtils.convert.get(provider) try: - if model == 'pi': - response = g4f.ChatCompletion.create( - model=model, - stream=stream, - messages=messages, - conversation=conversation, - provider = provider, - ignored=self.list_ignored_providers) - else: - response = g4f.ChatCompletion.create( - model=model, - stream=stream, - messages=messages, - provider = provider, - ignored=self.list_ignored_providers) + response = g4f.ChatCompletion.create( + model=model, + stream=stream, + messages=messages, + provider = provider, + ignored=self.list_ignored_providers + ) except Exception as e: logging.exception(e) return Response(content=json.dumps({"error": "An error occurred while generating the response."}, indent=4), media_type="application/json") @@ -179,9 +168,12 @@ class Api: content = json.dumps(end_completion_data, separators=(',', ':')) yield f'data: {content}\n\n' - except GeneratorExit: pass + except Exception as e: + logging.exception(e) + content=json.dumps({"error": "An error occurred while generating the response."}, indent=4) + yield f'data: {content}\n\n' return StreamingResponse(streaming(), media_type="text/event-stream") |