summaryrefslogtreecommitdiffstats
path: root/g4f/.v1/gui
diff options
context:
space:
mode:
Diffstat (limited to 'g4f/.v1/gui')
-rw-r--r--g4f/.v1/gui/README.md78
-rw-r--r--g4f/.v1/gui/__init__.py0
-rw-r--r--g4f/.v1/gui/image1.pngbin0 -> 172284 bytes
-rw-r--r--g4f/.v1/gui/image2.pngbin0 -> 343564 bytes
-rw-r--r--g4f/.v1/gui/pywebio-gui/README.md24
-rw-r--r--g4f/.v1/gui/pywebio-gui/pywebio-usesless.py59
-rw-r--r--g4f/.v1/gui/query_methods.py100
-rw-r--r--g4f/.v1/gui/streamlit_app.py52
-rw-r--r--g4f/.v1/gui/streamlit_chat_app.py156
9 files changed, 469 insertions, 0 deletions
diff --git a/g4f/.v1/gui/README.md b/g4f/.v1/gui/README.md
new file mode 100644
index 00000000..c0406216
--- /dev/null
+++ b/g4f/.v1/gui/README.md
@@ -0,0 +1,78 @@
+# gpt4free gui
+
+This code provides a Graphical User Interface (GUI) for gpt4free. Users can ask questions and get answers from GPT-4 API's, utilizing multiple API implementations. The project contains two different Streamlit applications: `streamlit_app.py` and `streamlit_chat_app.py`.
+
+In addition, a new GUI script specifically implemented using PyWebIO has been added and can be found in the pywebio-gui folder. If there are errors with the Streamlit version, you can try using the PyWebIO version instead
+
+Installation
+------------
+
+1. Clone the repository.
+2. Install the required dependencies with: `pip install -r requirements.txt`.
+3. To use `streamlit_chat_app.py`, note that it depends on a pull request (PR #24) from the https://github.com/AI-Yash/st-chat/ repository, which may change in the future. The current dependency library can be found at https://github.com/AI-Yash/st-chat/archive/refs/pull/24/head.zip.
+
+Analytics Disclaimer
+-----
+The streamlit browser app collects heavy analytics even when running locally. This includes events for every page load, form submission including metadata on queries (like length), browser and client information including host ips. These are all transmitted to a 3rd party analytics group, Segment.com.
+
+Usage
+-----
+
+Choose one of the Streamlit applications to run:
+
+### streamlit\_app.py
+
+This application provides a simple interface for asking GPT-4 questions and receiving answers.
+
+To run the application:
+
+run:
+```arduino
+streamlit run gui/streamlit_app.py
+```
+<br>
+
+<img width="724" alt="image" src="https://user-images.githubusercontent.com/98614666/234232449-0d5cd092-a29d-4759-8197-e00ba712cb1a.png">
+
+<br>
+<br>
+
+preview:
+
+<img width="1125" alt="image" src="https://user-images.githubusercontent.com/98614666/234232398-09e9d3c5-08e6-4b8a-b4f2-0666e9790c7d.png">
+
+
+### streamlit\_chat\_app.py
+
+This application provides a chat-like interface for asking GPT-4 questions and receiving answers. It supports multiple query methods, and users can select the desired API for their queries. The application also maintains a conversation history.
+
+To run the application:
+
+```arduino
+streamlit run streamlit_chat_app.py
+```
+
+<br>
+
+<img width="724" alt="image" src="image1.png">
+
+<br>
+<br>
+
+preview:
+
+<img width="1125" alt="image" src="image2.png">
+
+Contributing
+------------
+
+Feel free to submit pull requests, report bugs, or request new features by opening issues on the GitHub repository.
+
+Bug
+----
+There is a bug in `streamlit_chat_app.py` right now that I haven't pinpointed yet, probably is really simple but havent had the time to look for it. Whenever you open a new conversation or access an old conversation it will only start prompt-answering after the second time you input to the text input, other than that, everything else seems to work accordingly.
+
+License
+-------
+
+This project is licensed under the MIT License.
diff --git a/g4f/.v1/gui/__init__.py b/g4f/.v1/gui/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/g4f/.v1/gui/__init__.py
diff --git a/g4f/.v1/gui/image1.png b/g4f/.v1/gui/image1.png
new file mode 100644
index 00000000..fb84ae7f
--- /dev/null
+++ b/g4f/.v1/gui/image1.png
Binary files differ
diff --git a/g4f/.v1/gui/image2.png b/g4f/.v1/gui/image2.png
new file mode 100644
index 00000000..c5fc91c9
--- /dev/null
+++ b/g4f/.v1/gui/image2.png
Binary files differ
diff --git a/g4f/.v1/gui/pywebio-gui/README.md b/g4f/.v1/gui/pywebio-gui/README.md
new file mode 100644
index 00000000..2b99c075
--- /dev/null
+++ b/g4f/.v1/gui/pywebio-gui/README.md
@@ -0,0 +1,24 @@
+# GUI with PyWebIO
+Simple, fast, and with fewer errors
+Only requires
+```bash
+pip install gpt4free
+pip install pywebio
+```
+clicking on 'pywebio-usesless.py' will run it
+
+PS: Currently, only 'usesless' is implemented, and the GUI is expected to be updated infrequently, with a focus on stability.
+
+↓ Here is the introduction in zh-Hans-CN below.
+
+# 使用pywebio实现的极简GUI
+简单,快捷,报错少
+只需要
+```bash
+pip install gpt4free
+pip install pywebio
+```
+
+双击pywebio-usesless.py即可运行
+
+ps:目前仅实现usesless,这个gui更新频率应该会比较少,目的是追求稳定
diff --git a/g4f/.v1/gui/pywebio-gui/pywebio-usesless.py b/g4f/.v1/gui/pywebio-gui/pywebio-usesless.py
new file mode 100644
index 00000000..177fa7c8
--- /dev/null
+++ b/g4f/.v1/gui/pywebio-gui/pywebio-usesless.py
@@ -0,0 +1,59 @@
+from gpt4free import usesless
+import time
+from pywebio import start_server,config
+from pywebio.input import *
+from pywebio.output import *
+from pywebio.session import local
+message_id = ""
+def status():
+ try:
+ req = usesless.Completion.create(prompt="hello", parentMessageId=message_id)
+ print(f"Answer: {req['text']}")
+ put_success(f"Answer: {req['text']}",scope="body")
+ except:
+ put_error("Program Error",scope="body")
+
+def ask(prompt):
+ req = usesless.Completion.create(prompt=prompt, parentMessageId=local.message_id)
+ rp=req['text']
+ local.message_id=req["id"]
+ print("AI:\n"+rp)
+ local.conversation.extend([
+ {"role": "user", "content": prompt},
+ {"role": "assistant", "content": rp}
+ ])
+ print(local.conversation)
+ return rp
+
+def msg():
+ while True:
+ text= input_group("You:",[textarea('You:',name='text',rows=3, placeholder='请输入问题')])
+ if not(bool(text)):
+ break
+ if not(bool(text["text"])):
+ continue
+ time.sleep(0.5)
+ put_code("You:"+text["text"],scope="body")
+ print("Question:"+text["text"])
+ with use_scope('foot'):
+ put_loading(color="info")
+ rp= ask(text["text"])
+ clear(scope="foot")
+ time.sleep(0.5)
+ put_markdown("Bot:\n"+rp,scope="body")
+ time.sleep(0.7)
+
+@config(title="AIchat",theme="dark")
+def main():
+ put_scope("heads")
+ with use_scope('heads'):
+ put_html("<h1><center>AI Chat</center></h1>")
+ put_scope("body")
+ put_scope("foot")
+ status()
+ local.conversation=[]
+ local.message_id=""
+ msg()
+
+print("Click link to chat page")
+start_server(main, port=8099,allowed_origins="*",auto_open_webbrowser=True,debug=True)
diff --git a/g4f/.v1/gui/query_methods.py b/g4f/.v1/gui/query_methods.py
new file mode 100644
index 00000000..2d6adacd
--- /dev/null
+++ b/g4f/.v1/gui/query_methods.py
@@ -0,0 +1,100 @@
+import os
+import sys
+from typing import Optional
+
+sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir))
+
+from gpt4free import quora, forefront, theb, you
+import random
+
+
+def query_forefront(question: str, proxy: Optional[str] = None) -> str:
+ # create an account
+ token = forefront.Account.create(logging=False, proxy=proxy)
+
+ response = ""
+ # get a response
+ try:
+ return forefront.Completion.create(token=token, prompt='hello world', model='gpt-4', proxy=proxy).text
+ except Exception as e:
+ # Return error message if an exception occurs
+ return (
+ f'An error occurred: {e}. Please make sure you are using a valid cloudflare clearance token and user agent.'
+ )
+
+
+def query_quora(question: str, proxy: Optional[str] = None) -> str:
+ token = quora.Account.create(logging=False, enable_bot_creation=True, proxy=proxy)
+ return quora.Completion.create(model='gpt-4', prompt=question, token=token, proxy=proxy).text
+
+
+def query_theb(question: str, proxy: Optional[str] = None) -> str:
+ # Set cloudflare clearance cookie and get answer from GPT-4 model
+ response = ""
+ try:
+ return ''.join(theb.Completion.create(prompt=question, proxy=proxy))
+
+ except Exception as e:
+ # Return error message if an exception occurs
+ return (
+ f'An error occurred: {e}. Please make sure you are using a valid cloudflare clearance token and user agent.'
+ )
+
+
+def query_you(question: str, proxy: Optional[str] = None) -> str:
+ # Set cloudflare clearance cookie and get answer from GPT-4 model
+ try:
+ result = you.Completion.create(prompt=question, proxy=proxy)
+ return result.text
+
+ except Exception as e:
+ # Return error message if an exception occurs
+ return (
+ f'An error occurred: {e}. Please make sure you are using a valid cloudflare clearance token and user agent.'
+ )
+
+
+# Define a dictionary containing all query methods
+avail_query_methods = {
+ "Forefront": query_forefront,
+ "Poe": query_quora,
+ "Theb": query_theb,
+ "You": query_you,
+ # "Writesonic": query_writesonic,
+ # "T3nsor": query_t3nsor,
+ # "Phind": query_phind,
+ # "Ora": query_ora,
+}
+
+
+def query(user_input: str, selected_method: str = "Random", proxy: Optional[str] = None) -> str:
+ # If a specific query method is selected (not "Random") and the method is in the dictionary, try to call it
+ if selected_method != "Random" and selected_method in avail_query_methods:
+ try:
+ return avail_query_methods[selected_method](user_input, proxy=proxy)
+ except Exception as e:
+ print(f"Error with {selected_method}: {e}")
+ return "😵 Sorry, some error occurred please try again."
+
+ # Initialize variables for determining success and storing the result
+ success = False
+ result = "😵 Sorry, some error occurred please try again."
+ # Create a list of available query methods
+ query_methods_list = list(avail_query_methods.values())
+
+ # Continue trying different methods until a successful result is obtained or all methods have been tried
+ while not success and query_methods_list:
+ # Choose a random method from the list
+ chosen_query = random.choice(query_methods_list)
+ # Find the name of the chosen method
+ chosen_query_name = [k for k, v in avail_query_methods.items() if v == chosen_query][0]
+ try:
+ # Try to call the chosen method with the user input
+ result = chosen_query(user_input, proxy=proxy)
+ success = True
+ except Exception as e:
+ print(f"Error with {chosen_query_name}: {e}")
+ # Remove the failed method from the list of available methods
+ query_methods_list.remove(chosen_query)
+
+ return result
diff --git a/g4f/.v1/gui/streamlit_app.py b/g4f/.v1/gui/streamlit_app.py
new file mode 100644
index 00000000..2dba0a7b
--- /dev/null
+++ b/g4f/.v1/gui/streamlit_app.py
@@ -0,0 +1,52 @@
+import os
+import sys
+
+sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir))
+
+import streamlit as st
+from gpt4free import you
+
+
+def get_answer(question: str) -> str:
+ # Set cloudflare clearance cookie and get answer from GPT-4 model
+ try:
+ result = you.Completion.create(prompt=question)
+
+ return result.text
+
+ except Exception as e:
+ # Return error message if an exception occurs
+ return (
+ f'An error occurred: {e}. Please make sure you are using a valid cloudflare clearance token and user agent.'
+ )
+
+
+# Set page configuration and add header
+st.set_page_config(
+ page_title="gpt4freeGUI",
+ initial_sidebar_state="expanded",
+ page_icon="🧠",
+ menu_items={
+ 'Get Help': 'https://github.com/xtekky/gpt4free/blob/main/README.md',
+ 'Report a bug': "https://github.com/xtekky/gpt4free/issues",
+ 'About': "### gptfree GUI",
+ },
+)
+st.header('GPT4free GUI')
+
+# Add text area for user input and button to get answer
+question_text_area = st.text_area('🤖 Ask Any Question :', placeholder='Explain quantum computing in 50 words')
+if st.button('🧠 Think'):
+ answer = get_answer(question_text_area)
+ escaped = answer.encode('utf-8').decode('unicode-escape')
+ # Display answer
+ st.caption("Answer :")
+ st.markdown(escaped)
+
+# Hide Streamlit footer
+hide_streamlit_style = """
+ <style>
+ footer {visibility: hidden;}
+ </style>
+ """
+st.markdown(hide_streamlit_style, unsafe_allow_html=True)
diff --git a/g4f/.v1/gui/streamlit_chat_app.py b/g4f/.v1/gui/streamlit_chat_app.py
new file mode 100644
index 00000000..af3969e6
--- /dev/null
+++ b/g4f/.v1/gui/streamlit_chat_app.py
@@ -0,0 +1,156 @@
+import atexit
+import Levenshtein
+import os
+import sys
+
+sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir))
+
+import streamlit as st
+from streamlit_chat import message
+from query_methods import query, avail_query_methods
+import pickle
+
+conversations_file = "conversations.pkl"
+
+def load_conversations():
+ try:
+ with open(conversations_file, "rb") as f:
+ return pickle.load(f)
+ except FileNotFoundError:
+ return []
+ except EOFError:
+ return []
+
+
+def save_conversations(conversations, current_conversation):
+ updated = False
+ for idx, conversation in enumerate(conversations):
+ if conversation == current_conversation:
+ conversations[idx] = current_conversation
+ updated = True
+ break
+ if not updated:
+ conversations.append(current_conversation)
+
+ temp_conversations_file = "temp_" + conversations_file
+ with open(temp_conversations_file, "wb") as f:
+ pickle.dump(conversations, f)
+
+ os.replace(temp_conversations_file, conversations_file)
+
+def delete_conversation(conversations, current_conversation):
+ for idx, conversation in enumerate(conversations):
+ conversations[idx] = current_conversation
+ break
+ conversations.remove(current_conversation)
+
+ temp_conversations_file = "temp_" + conversations_file
+ with open(temp_conversations_file, "wb") as f:
+ pickle.dump(conversations, f)
+
+ os.replace(temp_conversations_file, conversations_file)
+
+def exit_handler():
+ print("Exiting, saving data...")
+ # Perform cleanup operations here, like saving data or closing open files.
+ save_conversations(st.session_state.conversations, st.session_state.current_conversation)
+
+
+# Register the exit_handler function to be called when the program is closing.
+atexit.register(exit_handler)
+
+st.header("Chat Placeholder")
+
+if 'conversations' not in st.session_state:
+ st.session_state['conversations'] = load_conversations()
+
+if 'input_text' not in st.session_state:
+ st.session_state['input_text'] = ''
+
+if 'selected_conversation' not in st.session_state:
+ st.session_state['selected_conversation'] = None
+
+if 'input_field_key' not in st.session_state:
+ st.session_state['input_field_key'] = 0
+
+if 'query_method' not in st.session_state:
+ st.session_state['query_method'] = query
+
+if 'search_query' not in st.session_state:
+ st.session_state['search_query'] = ''
+
+# Initialize new conversation
+if 'current_conversation' not in st.session_state or st.session_state['current_conversation'] is None:
+ st.session_state['current_conversation'] = {'user_inputs': [], 'generated_responses': []}
+
+input_placeholder = st.empty()
+user_input = input_placeholder.text_input(
+ 'You:', value=st.session_state['input_text'], key=f'input_text_-1'#{st.session_state["input_field_key"]}
+)
+submit_button = st.button("Submit")
+
+if (user_input and user_input != st.session_state['input_text']) or submit_button:
+ output = query(user_input, st.session_state['query_method'])
+
+ escaped_output = output.encode('utf-8').decode('unicode-escape')
+
+ st.session_state['current_conversation']['user_inputs'].append(user_input)
+ st.session_state.current_conversation['generated_responses'].append(escaped_output)
+ save_conversations(st.session_state.conversations, st.session_state.current_conversation)
+ st.session_state['input_text'] = ''
+ st.session_state['input_field_key'] += 1 # Increment key value for new widget
+ user_input = input_placeholder.text_input(
+ 'You:', value=st.session_state['input_text'], key=f'input_text_{st.session_state["input_field_key"]}'
+ ) # Clear the input field
+
+# Add a button to create a new conversation
+if st.sidebar.button("New Conversation"):
+ st.session_state['selected_conversation'] = None
+ st.session_state['current_conversation'] = {'user_inputs': [], 'generated_responses': []}
+ st.session_state['input_field_key'] += 1 # Increment key value for new widget
+ st.session_state['query_method'] = st.sidebar.selectbox("Select API:", options=avail_query_methods, index=0)
+
+# Proxy
+st.session_state['proxy'] = st.sidebar.text_input("Proxy: ")
+
+# Searchbar
+search_query = st.sidebar.text_input("Search Conversations:", value=st.session_state.get('search_query', ''), key='search')
+
+if search_query:
+ filtered_conversations = []
+ indices = []
+ for idx, conversation in enumerate(st.session_state.conversations):
+ if search_query in conversation['user_inputs'][0]:
+ filtered_conversations.append(conversation)
+ indices.append(idx)
+
+ filtered_conversations = list(zip(indices, filtered_conversations))
+ conversations = sorted(filtered_conversations, key=lambda x: Levenshtein.distance(search_query, x[1]['user_inputs'][0]))
+
+ sidebar_header = f"Search Results ({len(conversations)})"
+else:
+ conversations = st.session_state.conversations
+ sidebar_header = "Conversation History"
+
+# Sidebar
+st.sidebar.header(sidebar_header)
+sidebar_col1, sidebar_col2 = st.sidebar.columns([5,1])
+for idx, conversation in enumerate(conversations):
+ if sidebar_col1.button(f"Conversation {idx + 1}: {conversation['user_inputs'][0]}", key=f"sidebar_btn_{idx}"):
+ st.session_state['selected_conversation'] = idx
+ st.session_state['current_conversation'] = conversation
+ if sidebar_col2.button('🗑️', key=f"sidebar_btn_delete_{idx}"):
+ if st.session_state['selected_conversation'] == idx:
+ st.session_state['selected_conversation'] = None
+ st.session_state['current_conversation'] = {'user_inputs': [], 'generated_responses': []}
+ delete_conversation(conversations, conversation)
+ st.experimental_rerun()
+if st.session_state['selected_conversation'] is not None:
+ conversation_to_display = conversations[st.session_state['selected_conversation']]
+else:
+ conversation_to_display = st.session_state.current_conversation
+
+if conversation_to_display['generated_responses']:
+ for i in range(len(conversation_to_display['generated_responses']) - 1, -1, -1):
+ message(conversation_to_display["generated_responses"][i], key=f"display_generated_{i}")
+ message(conversation_to_display['user_inputs'][i], is_user=True, key=f"display_user_{i}") \ No newline at end of file