From 03dc8532f9c83edcec0214f83445028f4766cf6f Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 27 Oct 2023 23:35:10 +0300 Subject: [PATCH] added context --- g4f/gui/client/css/style.css | 33 +++++++++++++++++++++++ g4f/gui/client/js/chat.v1.js | 52 +++++++++++++++++++++++++----------- g4f/gui/server/backend.py | 48 ++++++++++----------------------- 3 files changed, 83 insertions(+), 50 deletions(-) diff --git a/g4f/gui/client/css/style.css b/g4f/gui/client/css/style.css index 59efeda0..c494de3a 100644 --- a/g4f/gui/client/css/style.css +++ b/g4f/gui/client/css/style.css @@ -260,6 +260,32 @@ body { z-index: 10000; } +.message .assistant{ + max-width: 48px; + max-height: 48px; + flex-shrink: 0; +} + +.message .assistant img { + width: 100%; + height: 100%; + object-fit: cover; + border-radius: 8px; + outline: 1px solid var(--blur-border); +} + +.message .assistant:after { + content: "63"; + position: absolute; + bottom: 0; + right: 0; + height: 60%; + width: 60%; + background: var(--colour-3); + filter: blur(10px) opacity(0.5); + z-index: 10000; +} + .message .content { display: flex; flex-direction: column; @@ -280,6 +306,13 @@ body { z-index: 1000; } +.message .assistant i { + position: absolute; + bottom: -6px; + right: -6px; + z-index: 1000; +} + .new_convo { padding: 8px 12px; display: flex; diff --git a/g4f/gui/client/js/chat.v1.js b/g4f/gui/client/js/chat.v1.js index 233347ff..5b7a0bf0 100644 --- a/g4f/gui/client/js/chat.v1.js +++ b/g4f/gui/client/js/chat.v1.js @@ -31,12 +31,37 @@ const handle_ask = async () => { message_input.style.height = `80px`; message_input.focus(); + let txtMsgs = []; + const divTags = document.getElementsByClassName("message"); + for(let i=0;i 0) { message_input.value = ``; - await ask_gpt(message); + await ask_gpt(txtMsgs); } }; @@ -49,13 +74,13 @@ const remove_cancel_button = async () => { }, 300); }; -const ask_gpt = async (message) => { +const ask_gpt = async (txtMsgs) => { try { message_input.value = ``; message_input.innerHTML = ``; message_input.innerText = ``; - add_conversation(window.conversation_id, message); + add_conversation(window.conversation_id, txtMsgs[0].content); window.scrollTo(0, 0); window.controller = new AbortController(); @@ -75,7 +100,7 @@ const ask_gpt = async (message) => {
- ${format(message)} + ${format(txtMsgs[txtMsgs.length-1].content)}
`; @@ -87,7 +112,7 @@ const ask_gpt = async (message) => { message_box.innerHTML += `
-
+
${gpt_image}
@@ -120,12 +145,7 @@ const ask_gpt = async (message) => { conversation: await get_conversation(window.conversation_id), internet_access: document.getElementById(`switch`).checked, content_type: `text`, - parts: [ - { - content: message, - role: `user`, - }, - ], + parts: txtMsgs, }, }, }), @@ -154,7 +174,7 @@ const ask_gpt = async (message) => { document.getElementById(`gpt_${window.token}`).innerHTML = "An error occured, please try again, if the problem persists, please reload / refresh cache or use a differnet browser"; } - add_message(window.conversation_id, "user", message); + add_message(window.conversation_id, "user", txtMsgs[txtMsgs.length-1].content); add_message(window.conversation_id, "assistant", text); message_box.scrollTop = message_box.scrollHeight; @@ -165,7 +185,7 @@ const ask_gpt = async (message) => { window.scrollTo(0, 0); } catch (e) { - add_message(window.conversation_id, "user", message); + add_message(window.conversation_id, "user", txtMsgs[txtMsgs.length-1].content); message_box.scrollTop = message_box.scrollHeight; await remove_cancel_button(); @@ -279,7 +299,7 @@ const load_conversation = async (conversation_id) => { for (item of conversation.items) { message_box.innerHTML += `
-
+
${item.role == "assistant" ? gpt_image : user_image} ${item.role == "assistant" ? `` @@ -316,7 +336,7 @@ const get_conversation = async (conversation_id) => { const add_conversation = async (conversation_id, content) => { if (content.length > 17) { - title = content.substring(0, 17) + '..' + title = content.substring(0, 17) + '...' } else { title = content + ' '.repeat(19 - content.length) } @@ -461,7 +481,7 @@ const say_hello = async () => { message_box.innerHTML += `
-
+
${gpt_image}
diff --git a/g4f/gui/server/backend.py b/g4f/gui/server/backend.py index cf6d6358..304b9fc8 100644 --- a/g4f/gui/server/backend.py +++ b/g4f/gui/server/backend.py @@ -1,5 +1,4 @@ import g4f -import json from flask import request from .internet import search @@ -44,45 +43,26 @@ class Backend_Api: } def _conversation(self): - config = None - proxy = None try: - config = json.load(open("config.json","r",encoding="utf-8")) - proxy = config["proxy"] - - except Exception: - pass - - try: - jailbreak = request.json['jailbreak'] - internet_access = request.json['meta']['content']['internet_access'] - conversation = request.json['meta']['content']['conversation'] - prompt = request.json['meta']['content']['parts'][0] + #jailbreak = request.json['jailbreak'] + #internet_access = request.json['meta']['content']['internet_access'] + #conversation = request.json['meta']['content']['conversation'] + prompt = request.json['meta']['content']['parts'] model = request.json['model'] provider = request.json.get('provider').split('g4f.Provider.')[1] - messages = special_instructions[jailbreak] + conversation + search(internet_access, prompt) + [prompt] + messages = prompt + print(messages) def stream(): - if proxy != None: - yield from g4f.ChatCompletion.create( - model=model, - provider=get_provider(provider), - messages=messages, - stream=True, - proxy=proxy - ) if provider else g4f.ChatCompletion.create( - model=model, messages=messages, stream=True, proxy=proxy - ) - else: - yield from g4f.ChatCompletion.create( - model=model, - provider=get_provider(provider), - messages=messages, - stream=True, - ) if provider else g4f.ChatCompletion.create( - model=model, messages=messages, stream=True - ) + yield from g4f.ChatCompletion.create( + model=g4f.models.gpt_35_long, + provider=get_provider(provider), + messages=messages, + stream=True, + ) if provider else g4f.ChatCompletion.create( + model=model, messages=messages, stream=True + ) return self.app.response_class(stream(), mimetype='text/event-stream')