Add G4F_PROXY environment

Add regenerate button in gui
This commit is contained in:
Heiner Lohaus 2023-12-10 21:46:11 +01:00
parent c3ccc4e819
commit bf41cfc5d1
6 changed files with 121 additions and 122 deletions

View File

@ -22,7 +22,7 @@ docker pull hlohaus789/g4f
## 📚 Table of Contents
- [🆕 What's New](#-what-s-new)
- [🆕 What's New](#-whats-new)
- [📚 Table of Contents](#-table-of-contents)
- [🛠️ Getting Started](#-getting-started)
+ [Docker container](#docker-container)
@ -253,7 +253,7 @@ for message in response:
##### Using Browser
Some providers using a a browser to bypass the bot protection. They using the selenium webdriver to control the browser. The browser settings and the login data are saved in a custom directory. If the headless mode is enabled, the browser windows are loaded invisibly. For performance reasons, it is recommended to reuse the browser instances and close them yourself at the end:
Some providers using a browser to bypass the bot protection. They using the selenium webdriver to control the browser. The browser settings and the login data are saved in a custom directory. If the headless mode is enabled, the browser windows are loaded invisibly. For performance reasons, it is recommended to reuse the browser instances and close them yourself at the end:
```python
import g4f
@ -335,6 +335,12 @@ response = g4f.ChatCompletion.create(
print(f"Result:", response)
```
You can also set a proxy globally via an environment variable:
```sh
export G4F_PROXY="http://host:port"
```
### Interference openai-proxy API (Use with openai python package)
#### Run interference API from PyPi package

View File

@ -96,6 +96,10 @@ class ChatCompletion:
if auth:
kwargs['auth'] = auth
proxy = os.environ.get("G4F_PROXY")
if proxy and "proxy" not in kwargs:
kwargs['proxy'] = proxy
result = provider.create_completion(model.name, messages, stream, **kwargs)
return result if stream else ''.join(result)
@ -112,7 +116,7 @@ class ChatCompletion:
if stream:
if isinstance(provider, type) and issubclass(provider, AsyncGeneratorProvider):
return await provider.create_async_generator(model.name, messages, **kwargs)
raise ValueError(f'{provider.__name__} does not support "stream" argument')
raise ValueError(f'{provider.__name__} does not support "stream" argument in "create_async"')
return await provider.create_async(model.name, messages, **kwargs)

View File

@ -301,6 +301,9 @@ body {
font-size: 15px;
line-height: 1.3;
}
.message .content pre {
white-space: pre-wrap;
}
.message .user i {
position: absolute;
@ -338,19 +341,15 @@ body {
font-size: 14px;
}
.stop_generating {
.stop_generating, .regenerate {
position: absolute;
bottom: 118px;
/* left: 10px;
bottom: 125px;
right: 8px; */
bottom: 158px;
left: 50%;
transform: translateX(-50%);
z-index: 1000000;
}
.stop_generating button {
.stop_generating button, .regenerate button{
backdrop-filter: blur(20px);
-webkit-backdrop-filter: blur(20px);
background-color: var(--blur-bg);
@ -380,11 +379,8 @@ body {
}
}
.stop_generating-hiding button {
.stop_generating-hidden #cancelButton, .regenerate-hidden #regenerateButton {
animation: hide_popup 0.4s;
}
.stop_generating-hidden button {
display: none;
}

View File

@ -101,6 +101,12 @@
<i class="fa-regular fa-stop"></i>
</button>
</div>
<div class="regenerate regenerate-hidden">
<button id="regenerateButton">
<span>Regenerate</span>
<i class="fa-solid fa-rotate"></i>
</button>
</div>
<div class="box" id="messages">
</div>
<div class="user-input">

View File

@ -5,15 +5,12 @@ const message_input = document.getElementById(`message-input`);
const box_conversations = document.querySelector(`.top`);
const spinner = box_conversations.querySelector(".spinner");
const stop_generating = document.querySelector(`.stop_generating`);
const regenerate = document.querySelector(`.regenerate`);
const send_button = document.querySelector(`#send-button`);
let prompt_lock = false;
hljs.addPlugin(new CopyButtonPlugin());
const format = (text) => {
return text.replace(/(?:\r\n|\r|\n)/g, "<br>");
};
message_input.addEventListener("blur", () => {
window.scrollTo(0, 0);
});
@ -22,6 +19,10 @@ message_input.addEventListener("focus", () => {
document.documentElement.scrollTop = document.documentElement.scrollHeight;
});
const markdown_render = (content) => {
return markdown.render(content).replace("<a href=", '<a target="_blank" href=').replace('<code>', '<code class="language-plaintext">')
}
const delete_conversations = async () => {
localStorage.clear();
await new_conversation();
@ -30,38 +31,25 @@ const delete_conversations = async () => {
const handle_ask = async () => {
message_input.style.height = `80px`;
message_input.focus();
let txtMsgs = [];
const divTags = document.getElementsByClassName("message");
for(let i=0;i<divTags.length;i++){
if(!divTags[i].children[1].classList.contains("welcome-message")){
if(divTags[i].children[0].className == "assistant"){
const msg = {
role: "assistant",
content: divTags[i].children[1].textContent+" "
};
txtMsgs.push(msg);
}else{
const msg = {
role: "user",
content: divTags[i].children[1].textContent+" "
};
txtMsgs.push(msg);
}
}
}
window.scrollTo(0, 0);
let message = message_input.value;
const msg = {
role: "user",
content: message
};
txtMsgs.push(msg);
message = message_input.value
if (message.length > 0) {
message_input.value = ``;
await ask_gpt(txtMsgs);
message_input.value = '';
await add_conversation(window.conversation_id, message);
await add_message(window.conversation_id, "user", message);
window.token = message_id();
message_box.innerHTML += `
<div class="message">
<div class="user">
${user_image}
<i class="fa-regular fa-phone-arrow-up-right"></i>
</div>
<div class="content" id="user_${token}">
${markdown_render(message)}
</div>
</div>
`;
await ask_gpt();
}
};
@ -74,13 +62,10 @@ const remove_cancel_button = async () => {
}, 300);
};
const ask_gpt = async (txtMsgs) => {
const ask_gpt = async () => {
regenerate.classList.add(`regenerate-hidden`);
messages = await get_messages(window.conversation_id);
try {
message_input.value = ``;
message_input.innerHTML = ``;
message_input.innerText = ``;
add_conversation(window.conversation_id, txtMsgs[0].content);
window.scrollTo(0, 0);
window.controller = new AbortController();
@ -89,22 +74,9 @@ const ask_gpt = async (txtMsgs) => {
model = document.getElementById("model");
prompt_lock = true;
window.text = ``;
window.token = message_id();
stop_generating.classList.remove(`stop_generating-hidden`);
message_box.innerHTML += `
<div class="message">
<div class="user">
${user_image}
<i class="fa-regular fa-phone-arrow-up-right"></i>
</div>
<div class="content" id="user_${token}">
${format(txtMsgs[txtMsgs.length-1].content)}
</div>
</div>
`;
message_box.scrollTop = message_box.scrollHeight;
window.scrollTo(0, 0);
await new Promise((r) => setTimeout(r, 500));
@ -138,14 +110,13 @@ const ask_gpt = async (txtMsgs) => {
action: `_ask`,
model: model.options[model.selectedIndex].value,
jailbreak: jailbreak.options[jailbreak.selectedIndex].value,
internet_access: document.getElementById(`switch`).checked,
provider: provider.options[provider.selectedIndex].value,
meta: {
id: window.token,
content: {
conversation: await get_conversation(window.conversation_id),
internet_access: document.getElementById(`switch`).checked,
content_type: `text`,
parts: txtMsgs,
parts: messages,
},
},
}),
@ -161,7 +132,7 @@ const ask_gpt = async (txtMsgs) => {
text += chunk;
document.getElementById(`gpt_${window.token}`).innerHTML = markdown.render(text).replace("<a href=", '<a target="_blank" href=');
document.getElementById(`gpt_${window.token}`).innerHTML = markdown_render(text);
document.querySelectorAll(`code`).forEach((el) => {
hljs.highlightElement(el);
});
@ -171,10 +142,9 @@ const ask_gpt = async (txtMsgs) => {
}
if (text.includes(`G4F_ERROR`)) {
document.getElementById(`gpt_${window.token}`).innerHTML = "An error occured, please try again, if the problem persists, please reload / refresh cache or use a differnet browser";
document.getElementById(`gpt_${window.token}`).innerHTML = "An error occured, please try again, if the problem persists, please use a other model or provider";
}
add_message(window.conversation_id, "user", txtMsgs[txtMsgs.length-1].content);
add_message(window.conversation_id, "assistant", text);
message_box.scrollTop = message_box.scrollHeight;
@ -185,7 +155,6 @@ const ask_gpt = async (txtMsgs) => {
window.scrollTo(0, 0);
} catch (e) {
add_message(window.conversation_id, "user", txtMsgs[txtMsgs.length-1].content);
message_box.scrollTop = message_box.scrollHeight;
await remove_cancel_button();
@ -210,6 +179,7 @@ const ask_gpt = async (txtMsgs) => {
window.scrollTo(0, 0);
}
regenerate.classList.remove(`regenerate-hidden`);
};
const clear_conversations = async () => {
@ -280,7 +250,6 @@ const set_conversation = async (conversation_id) => {
};
const new_conversation = async () => {
history.pushState({}, null, `/chat/`);
window.conversation_id = uuid();
@ -291,12 +260,9 @@ const new_conversation = async () => {
};
const load_conversation = async (conversation_id) => {
let conversation = await JSON.parse(
localStorage.getItem(`conversation:${conversation_id}`)
);
console.log(conversation, conversation_id);
let messages = await get_messages(conversation_id);
for (item of conversation.items) {
for (item of messages) {
message_box.innerHTML += `
<div class="message">
<div class=${item.role == "assistant" ? "assistant" : "user"}>
@ -308,7 +274,7 @@ const load_conversation = async (conversation_id) => {
</div>
<div class="content">
${item.role == "assistant"
? markdown.render(item.content).replace("<a href=", '<a target="_blank" href=')
? markdown_render(item.content)
: item.content
}
</div>
@ -331,6 +297,11 @@ const get_conversation = async (conversation_id) => {
let conversation = await JSON.parse(
localStorage.getItem(`conversation:${conversation_id}`)
);
return conversation;
};
const get_messages = async (conversation_id) => {
let conversation = await get_conversation(conversation_id);
return conversation.items;
};
@ -351,21 +322,32 @@ const add_conversation = async (conversation_id, content) => {
})
);
}
history.pushState({}, null, `/chat/${conversation_id}`);
};
const remove_last_message = async (conversation_id) => {
const conversation = await get_conversation(conversation_id)
conversation.items.pop();
localStorage.setItem(
`conversation:${conversation_id}`,
JSON.stringify(conversation)
);
};
const add_message = async (conversation_id, role, content) => {
before_adding = JSON.parse(
localStorage.getItem(`conversation:${conversation_id}`)
);
const conversation = await get_conversation(conversation_id);
before_adding.items.push({
conversation.items.push({
role: role,
content: content,
});
localStorage.setItem(
`conversation:${conversation_id}`,
JSON.stringify(before_adding)
JSON.stringify(conversation)
);
};
@ -404,6 +386,12 @@ document.getElementById(`cancelButton`).addEventListener(`click`, async () => {
console.log(`aborted ${window.conversation_id}`);
});
document.getElementById(`regenerateButton`).addEventListener(`click`, async () => {
await remove_last_message(window.conversation_id);
window.token = message_id();
await ask_gpt();
});
const uuid = () => {
return `xxxxxxxx-xxxx-4xxx-yxxx-${Date.now().toString(16)}`.replace(
/[xy]/g,
@ -485,17 +473,16 @@ const say_hello = async () => {
${gpt_image}
<i class="fa-regular fa-phone-arrow-down-left"></i>
</div>
<div class="content welcome-message">
<div class="content">
<p class=" welcome-message"></p>
</div>
</div>
`;
content = ``
to_modify = document.querySelector(`.welcome-message`);
for (token of tokens) {
await new Promise(resolve => setTimeout(resolve, (Math.random() * (100 - 200) + 100)))
content += token;
to_modify.innerHTML = markdown.render(content);
to_modify.textContent += token;
}
}
@ -542,14 +529,12 @@ window.onload = async () => {
load_conversations(20, 0);
}, 1);
if (!window.location.href.endsWith(`#`)) {
if (/\/chat\/.+/.test(window.location.href)) {
await load_conversation(window.conversation_id);
}
if (/\/chat\/.+/.test(window.location.href)) {
await load_conversation(window.conversation_id);
} else {
await say_hello()
}
await say_hello()
message_input.addEventListener(`keydown`, async (evt) => {
if (prompt_lock) return;
if (evt.keyCode === 13 && !evt.shiftKey) {

View File

@ -1,14 +1,15 @@
import g4f
from g4f.Provider import __providers__
from flask import request
import json
from flask import request, Flask
from .internet import get_search_message
g4f.debug.logging = True
class Backend_Api:
def __init__(self, app) -> None:
self.app = app
def __init__(self, app: Flask) -> None:
self.app: Flask = app
self.routes = {
'/backend-api/v2/models': {
'function': self.models,
@ -61,19 +62,18 @@ class Backend_Api:
}
def _conversation(self):
try:
#jailbreak = request.json['jailbreak']
web_search = request.json['meta']['content']['internet_access']
messages = request.json['meta']['content']['parts']
if web_search:
messages[-1]["content"] = get_search_message(messages[-1]["content"])
model = request.json.get('model')
model = model if model else g4f.models.default
provider = request.json.get('provider').replace('g4f.Provider.', '')
provider = provider if provider and provider != "Auto" else None
if provider != None:
provider = g4f.Provider.ProviderUtils.convert.get(provider)
#jailbreak = request.json['jailbreak']
messages = request.json['meta']['content']['parts']
if request.json['internet_access']:
messages[-1]["content"] = get_search_message(messages[-1]["content"])
model = request.json.get('model')
model = model if model else g4f.models.default
provider = request.json.get('provider').replace('g4f.Provider.', '')
provider = provider if provider and provider != "Auto" else None
if provider != None:
provider = g4f.Provider.ProviderUtils.convert.get(provider)
def try_response():
response = g4f.ChatCompletion.create(
model=model,
provider=provider,
@ -81,13 +81,15 @@ class Backend_Api:
stream=True,
ignore_stream_and_auth=True
)
try:
yield from response
except Exception as e:
print(e)
yield json.dumps({
'code' : 'G4F_ERROR',
'_action': '_ask',
'success': False,
'error' : f'an error occurred {str(e)}'
})
return self.app.response_class(response, mimetype='text/event-stream')
except Exception as e:
print(e)
return {
'code' : 'G4F_ERROR',
'_action': '_ask',
'success': False,
'error' : f'an error occurred {str(e)}'}, 400
return self.app.response_class(try_response(), mimetype='text/event-stream')