summaryrefslogtreecommitdiffstats
path: root/gui
diff options
context:
space:
mode:
authornoptuno <repollo.marrero@gmail.com>2023-04-28 02:29:30 +0200
committernoptuno <repollo.marrero@gmail.com>2023-04-28 02:29:30 +0200
commit355dee533bb34a571b9367820a63cccb668cf866 (patch)
tree838af886b4fec07320aeb10f0d1e74ba79e79b5c /gui
parentadded pyproject.toml file (diff)
downloadgpt4free-355dee533bb34a571b9367820a63cccb668cf866.tar
gpt4free-355dee533bb34a571b9367820a63cccb668cf866.tar.gz
gpt4free-355dee533bb34a571b9367820a63cccb668cf866.tar.bz2
gpt4free-355dee533bb34a571b9367820a63cccb668cf866.tar.lz
gpt4free-355dee533bb34a571b9367820a63cccb668cf866.tar.xz
gpt4free-355dee533bb34a571b9367820a63cccb668cf866.tar.zst
gpt4free-355dee533bb34a571b9367820a63cccb668cf866.zip
Diffstat (limited to 'gui')
-rw-r--r--gui/README.md67
-rw-r--r--gui/__init__.py0
-rw-r--r--gui/image1.pngbin0 -> 172284 bytes
-rw-r--r--gui/image2.pngbin0 -> 343564 bytes
-rw-r--r--gui/query_methods.py103
-rw-r--r--gui/streamlit_chat_app.py96
6 files changed, 263 insertions, 3 deletions
diff --git a/gui/README.md b/gui/README.md
index 133abe4b..c638c4dc 100644
--- a/gui/README.md
+++ b/gui/README.md
@@ -1,11 +1,72 @@
# gpt4free gui
-mode `streamlit_app.py` into base folder to run
+This code provides a Graphical User Interface (GUI) for gpt4free. Users can ask questions and get answers from GPT-4 API's, utilizing multiple API implementations. The project contains two different Streamlit applications: `streamlit_app.py` and `streamlit_chat_app.py`.
+Installation
+------------
+
+1. Clone the repository.
+2. Install the required dependencies with: `pip install -r requirements.txt`.
+3. To use `streamlit_chat_app.py`, note that it depends on a pull request (PR #24) from the https://github.com/AI-Yash/st-chat/ repository, which may change in the future. The current dependency library can be found at https://github.com/AI-Yash/st-chat/archive/refs/pull/24/head.zip.
+
+Usage
+-----
+
+Choose one of the Streamlit applications to run:
+
+### streamlit\_app.py
+
+This application provides a simple interface for asking GPT-4 questions and receiving answers.
+
+To run the application:
+
+run:
+```arduino
+streamlit run gui/streamlit_app.py
+```
+<br>
+
+<img width="724" alt="image" src="https://user-images.githubusercontent.com/98614666/234232449-0d5cd092-a29d-4759-8197-e00ba712cb1a.png">
+
+<br>
+<br>
preview:
+
<img width="1125" alt="image" src="https://user-images.githubusercontent.com/98614666/234232398-09e9d3c5-08e6-4b8a-b4f2-0666e9790c7d.png">
-run:
-<img width="724" alt="image" src="https://user-images.githubusercontent.com/98614666/234232449-0d5cd092-a29d-4759-8197-e00ba712cb1a.png">
+### streamlit\_chat\_app.py
+
+This application provides a chat-like interface for asking GPT-4 questions and receiving answers. It supports multiple query methods, and users can select the desired API for their queries. The application also maintains a conversation history.
+
+To run the application:
+
+```arduino
+streamlit run streamlit_chat_app.py
+```
+
+<br>
+
+<img width="724" alt="image" src="image1.png">
+
+<br>
+<br>
+
+preview:
+
+<img width="1125" alt="image" src="image2.png">
+
+Contributing
+------------
+
+Feel free to submit pull requests, report bugs, or request new features by opening issues on the GitHub repository.
+
+Bug
+----
+There is a bug in `streamlit_chat_app.py` right now that I haven't pinpointed yet, probably is really simple but havent had the time to look for it. Whenever you open a new conversation or access an old conversation it will only start prompt-answering after the second time you input to the text input, other than that, everything else seems to work accordingly.
+
+License
+-------
+
+This project is licensed under the MIT License. \ No newline at end of file
diff --git a/gui/__init__.py b/gui/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/gui/__init__.py
diff --git a/gui/image1.png b/gui/image1.png
new file mode 100644
index 00000000..fb84ae7f
--- /dev/null
+++ b/gui/image1.png
Binary files differ
diff --git a/gui/image2.png b/gui/image2.png
new file mode 100644
index 00000000..c5fc91c9
--- /dev/null
+++ b/gui/image2.png
Binary files differ
diff --git a/gui/query_methods.py b/gui/query_methods.py
new file mode 100644
index 00000000..733c3a07
--- /dev/null
+++ b/gui/query_methods.py
@@ -0,0 +1,103 @@
+import openai_rev
+from openai_rev import forefront, quora, theb, you
+import random
+
+
+def query_forefront(question: str) -> str:
+ # create an account
+ token = forefront.Account.create(logging=True)
+
+ # get a response
+ try:
+ result = forefront.StreamingCompletion.create(token = token, prompt = 'hello world', model='gpt-4')
+
+ return result['response']
+
+ except Exception as e:
+ # Return error message if an exception occurs
+ return f'An error occurred: {e}. Please make sure you are using a valid cloudflare clearance token and user agent.'
+
+
+def query_quora(question: str) -> str:
+ token = quora.Account.create(logging=False, enable_bot_creation=True)
+ response = quora.Completion.create(
+ model='gpt-4',
+ prompt=question,
+ token=token
+ )
+
+ return response.completion.choices[0].tex
+
+
+def query_theb(question: str) -> str:
+ # Set cloudflare clearance cookie and get answer from GPT-4 model
+ try:
+ result = theb.Completion.create(
+ prompt = question)
+
+ return result['response']
+
+ except Exception as e:
+ # Return error message if an exception occurs
+ return f'An error occurred: {e}. Please make sure you are using a valid cloudflare clearance token and user agent.'
+
+
+def query_you(question: str) -> str:
+ # Set cloudflare clearance cookie and get answer from GPT-4 model
+ try:
+ result = you.Completion.create(
+ prompt = question)
+
+ return result.text
+
+ except Exception as e:
+ # Return error message if an exception occurs
+ return f'An error occurred: {e}. Please make sure you are using a valid cloudflare clearance token and user agent.'
+
+# Define a dictionary containing all query methods
+avail_query_methods = {
+ "Forefront": query_forefront,
+ "Poe": query_quora,
+ "Theb": query_theb,
+ "You": query_you,
+ # "Writesonic": query_writesonic,
+ # "T3nsor": query_t3nsor,
+ # "Phind": query_phind,
+ # "Ora": query_ora,
+}
+
+def query(user_input: str, selected_method: str = "Random") -> str:
+
+ # If a specific query method is selected (not "Random") and the method is in the dictionary, try to call it
+ if selected_method != "Random" and selected_method in avail_query_methods:
+ try:
+ return avail_query_methods[selected_method](user_input)
+ except Exception as e:
+ print(f"Error with {selected_method}: {e}")
+ return "😵 Sorry, some error occurred please try again."
+
+ # Initialize variables for determining success and storing the result
+ success = False
+ result = "😵 Sorry, some error occurred please try again."
+ # Create a list of available query methods
+ query_methods_list = list(avail_query_methods.values())
+
+ # Continue trying different methods until a successful result is obtained or all methods have been tried
+ while not success and query_methods_list:
+ # Choose a random method from the list
+ chosen_query = random.choice(query_methods_list)
+ # Find the name of the chosen method
+ chosen_query_name = [k for k, v in avail_query_methods.items() if v == chosen_query][0]
+ try:
+ # Try to call the chosen method with the user input
+ result = chosen_query(user_input)
+ success = True
+ except Exception as e:
+ print(f"Error with {chosen_query_name}: {e}")
+ # Remove the failed method from the list of available methods
+ query_methods_list.remove(chosen_query)
+
+ return result
+
+
+__all__ = ['query', 'avail_query_methods']
diff --git a/gui/streamlit_chat_app.py b/gui/streamlit_chat_app.py
new file mode 100644
index 00000000..f64d43f0
--- /dev/null
+++ b/gui/streamlit_chat_app.py
@@ -0,0 +1,96 @@
+import os
+import sys
+
+sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir))
+
+import streamlit as st
+from streamlit_chat import message
+from query_methods import query, avail_query_methods
+import pickle
+import openai_rev
+
+conversations_file = "conversations.pkl"
+
+def load_conversations():
+ try:
+ with open(conversations_file, "rb") as f:
+ return pickle.load(f)
+ except FileNotFoundError:
+ return []
+
+def save_conversations(conversations, current_conversation):
+ updated = False
+ for i, conversation in enumerate(conversations):
+ if conversation == current_conversation:
+ conversations[i] = current_conversation
+ updated = True
+ break
+ if not updated:
+ conversations.append(current_conversation)
+ with open(conversations_file, "wb") as f:
+ pickle.dump(conversations, f)
+
+st.header("Chat Placeholder")
+
+if 'conversations' not in st.session_state:
+ st.session_state['conversations'] = load_conversations()
+
+if 'input_text' not in st.session_state:
+ st.session_state['input_text'] = ''
+
+if 'selected_conversation' not in st.session_state:
+ st.session_state['selected_conversation'] = None
+
+if 'input_field_key' not in st.session_state:
+ st.session_state['input_field_key'] = 0
+
+if 'query_method' not in st.session_state:
+ st.session_state['query_method'] = query
+
+# Initialize new conversation
+if 'current_conversation' not in st.session_state or st.session_state['current_conversation'] is None:
+ st.session_state['current_conversation'] = {'user_inputs': [], 'generated_responses': []}
+
+
+input_placeholder = st.empty()
+user_input = input_placeholder.text_input('You:', key=f'input_text_{len(st.session_state["current_conversation"]["user_inputs"])}')
+submit_button = st.button("Submit")
+
+if user_input or submit_button:
+ output = query(user_input, st.session_state['query_method'])
+
+ st.session_state.current_conversation['user_inputs'].append(user_input)
+ st.session_state.current_conversation['generated_responses'].append(output)
+ save_conversations(st.session_state.conversations, st.session_state.current_conversation)
+ user_input = input_placeholder.text_input('You:', value='', key=f'input_text_{len(st.session_state["current_conversation"]["user_inputs"])}') # Clear the input field
+
+
+# Add a button to create a new conversation
+if st.sidebar.button("New Conversation"):
+ st.session_state['selected_conversation'] = None
+ st.session_state['current_conversation'] = {'user_inputs': [], 'generated_responses': []}
+ st.session_state['input_field_key'] += 1
+
+st.session_state['query_method'] = st.sidebar.selectbox(
+ "Select API:",
+ options=openai_rev.Provider.__members__.keys(),
+ index=0
+)
+
+# Sidebar
+st.sidebar.header("Conversation History")
+
+for i, conversation in enumerate(st.session_state.conversations):
+ if st.sidebar.button(f"Conversation {i + 1}: {conversation['user_inputs'][0]}", key=f"sidebar_btn_{i}"):
+ st.session_state['selected_conversation'] = i
+ st.session_state['current_conversation'] = st.session_state.conversations[i]
+
+if st.session_state['selected_conversation'] is not None:
+ conversation_to_display = st.session_state.conversations[st.session_state['selected_conversation']]
+else:
+ conversation_to_display = st.session_state.current_conversation
+
+if conversation_to_display['generated_responses']:
+ for i in range(len(conversation_to_display['generated_responses']) - 1, -1, -1):
+ message(conversation_to_display["generated_responses"][i], key=f"display_generated_{i}")
+ message(conversation_to_display['user_inputs'][i], is_user=True, key=f"display_user_{i}")