From d76747fbf646a971f3c4424d19955651347351b5 Mon Sep 17 00:00:00 2001 From: xiangsx <1984871009@qq.com> Date: Thu, 4 May 2023 22:16:38 +0800 Subject: [PATCH 1/5] gpt4free typescript version --- README.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/README.md b/README.md index 434af30b..95172f98 100644 --- a/README.md +++ b/README.md @@ -51,6 +51,13 @@ Just API's from some language model sites. Issues Pull Requests + + gpt4free-ts + Stars + Forks + Issues + Pull Requests + ChatGPT-Clone Stars From a23381833365a0dd217587b956f413c9e0a20715 Mon Sep 17 00:00:00 2001 From: Mishal <91066601+mishalhossin@users.noreply.github.com> Date: Thu, 4 May 2023 20:25:51 +0600 Subject: [PATCH 2/5] Update __init__.py --- gpt4free/theb/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gpt4free/theb/__init__.py b/gpt4free/theb/__init__.py index b162811e..852fe17b 100644 --- a/gpt4free/theb/__init__.py +++ b/gpt4free/theb/__init__.py @@ -65,4 +65,4 @@ class Completion: @staticmethod def handle_stream_response(response): - Completion.message_queue.put(response.decode()) + Completion.message_queue.put(response.decode(errors='replace')) From c60d33c4a000ee6eae2663d08ea963d3e6295011 Mon Sep 17 00:00:00 2001 From: Hamza Date: Thu, 4 May 2023 16:05:45 +0100 Subject: [PATCH 3/5] Forefront fixed --- .vscode/settings.json | 4 ++ gpt4free/forefront/__init__.py | 78 ++++++++++------------------------ 2 files changed, 27 insertions(+), 55 deletions(-) create mode 100644 .vscode/settings.json diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 00000000..5af1e3ee --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,4 @@ +{ + "editor.tabCompletion": "on", + "diffEditor.codeLens": true +} \ No newline at end of file diff --git a/gpt4free/forefront/__init__.py b/gpt4free/forefront/__init__.py index 23978501..a3912fb4 100644 --- a/gpt4free/forefront/__init__.py +++ b/gpt4free/forefront/__init__.py @@ -1,49 +1,26 @@ -import os -import pickle from json import loads +from xtempmail import Email from re import findall +from faker import Faker from time import time, sleep -from typing import Generator, Optional from uuid import uuid4 - from fake_useragent import UserAgent -from pymailtm import MailTm, Message from requests import post from tls_client import Session - from .typing import ForeFrontResponse class Account: - COOKIES_FILE_NAME = 'cookies.pickle' - @staticmethod - def login(proxy: Optional[str] = None, logging: bool = False) -> str: - if not os.path.isfile(Account.COOKIES_FILE_NAME): - return Account.create(proxy, logging) - - with open(Account.COOKIES_FILE_NAME, 'rb') as f: - cookies = pickle.load(f) - proxies = {'http': 'http://' + proxy, 'https': 'http://' + proxy} if proxy else False - - client = Session(client_identifier='chrome110') - client.proxies = proxies - client.cookies.update(cookies) - - if Account.is_cookie_enabled(client): - response = client.get('https://clerk.forefront.ai/v1/client?_clerk_js_version=4.38.4') - return response.json()['response']['sessions'][0]['last_active_token']['jwt'] - else: - return Account.create(proxy, logging) - - @staticmethod - def create(proxy: Optional[str] = None, logging: bool = False, save_cookies: bool = False) -> str: + def create(proxy: Optional[str] = None, logging: bool = False): proxies = {'http': 'http://' + proxy, 'https': 'http://' + proxy} if proxy else False + faker = Faker() + name = (faker.name().replace(' ', '_')).lower() start = time() - mail_client = MailTm().get_account() - mail_address = mail_client.address + mail_client = Email(name=name) + mail_address = mail_client.email client = Session(client_identifier='chrome110') client.proxies = proxies @@ -66,7 +43,10 @@ class Account: response = client.post( f'https://clerk.forefront.ai/v1/client/sign_ups/{trace_token}/prepare_verification?_clerk_js_version=4.38.4', - data={'strategy': 'email_link', 'redirect_url': 'https://accounts.forefront.ai/sign-up/verify'}, + data={ + 'strategy': 'email_link', + 'redirect_url': 'https://accounts.forefront.ai/sign-up/verify' + }, ) if logging: @@ -74,31 +54,23 @@ class Account: if 'sign_up_attempt' not in response.text: return 'Failed to create account!' - - while True: - sleep(1) - new_message: Message = mail_client.wait_for_message() - if logging: - print(new_message.data['id']) - - verification_url = findall(r'https:\/\/clerk\.forefront\.ai\/v1\/verify\?token=\w.+', new_message.text)[0] - + verification_url = None + new_message = mail_client.get_new_message(5) + for msg in new_message: + verification_url = findall(r'https:\/\/clerk\.forefront\.ai\/v1\/verify\?token=\w.+', msg.text)[0] if verification_url: break - + + if verification_url is None or not verification_url: + raise RuntimeError('Error while obtaining verfication URL!') if logging: print(verification_url) - response = client.get(verification_url) response = client.get('https://clerk.forefront.ai/v1/client?_clerk_js_version=4.38.4') token = response.json()['response']['sessions'][0]['last_active_token']['jwt'] - if save_cookies: - with open(Account.COOKIES_FILE_NAME, 'wb') as f: - pickle.dump(client.cookies, f) - with open('accounts.txt', 'a') as f: f.write(f'{mail_address}:{token}\n') @@ -107,11 +79,6 @@ class Account: return token - @staticmethod - def is_cookie_enabled(client: Session) -> bool: - response = client.get('https://chat.forefront.ai/') - return 'window.startClerk' in response.text - class StreamingCompletion: @staticmethod @@ -122,14 +89,14 @@ class StreamingCompletion: action_type='new', default_persona='607e41fe-95be-497e-8e97-010a59b2e2c0', # default model='gpt-4', - proxy=None, + proxy=None ) -> Generator[ForeFrontResponse, None, None]: if not token: raise Exception('Token is required!') if not chat_id: chat_id = str(uuid4()) - proxies = {'http': 'http://' + proxy, 'https': 'http://' + proxy} if proxy else None + proxies = { 'http': 'http://' + proxy, 'https': 'http://' + proxy } if proxy else None headers = { 'authority': 'chat-server.tenant-forefront-default.knative.chi.coreweave.com', @@ -197,7 +164,7 @@ class Completion: action_type='new', default_persona='607e41fe-95be-497e-8e97-010a59b2e2c0', # default model='gpt-4', - proxy=None, + proxy=None ) -> ForeFrontResponse: text = '' final_response = None @@ -208,7 +175,7 @@ class Completion: action_type=action_type, default_persona=default_persona, model=model, - proxy=proxy, + proxy=proxy ): if response: final_response = response @@ -220,3 +187,4 @@ class Completion: raise Exception('Unable to get the response, Please try again') return final_response + \ No newline at end of file From 64ab4770dd84088d5465edd3a37b69529fa8d372 Mon Sep 17 00:00:00 2001 From: Hamza Date: Thu, 4 May 2023 16:30:34 +0100 Subject: [PATCH 4/5] Fixed imports --- gpt4free/forefront/__init__.py | 1 + gpt4free/test.py | 4 ++++ 2 files changed, 5 insertions(+) create mode 100644 gpt4free/test.py diff --git a/gpt4free/forefront/__init__.py b/gpt4free/forefront/__init__.py index a3912fb4..dbf1730b 100644 --- a/gpt4free/forefront/__init__.py +++ b/gpt4free/forefront/__init__.py @@ -1,6 +1,7 @@ from json import loads from xtempmail import Email from re import findall +from typing import Optional, Generator from faker import Faker from time import time, sleep from uuid import uuid4 diff --git a/gpt4free/test.py b/gpt4free/test.py new file mode 100644 index 00000000..b2516748 --- /dev/null +++ b/gpt4free/test.py @@ -0,0 +1,4 @@ +import forefront +token = forefront.Account.create() +response = forefront.Completion.create(token=token, prompt='Hello!') +print(response) \ No newline at end of file From 824d7259d030f7c5d35004729eee68a0e4359304 Mon Sep 17 00:00:00 2001 From: "t.me/xtekky" <98614666+xtekky@users.noreply.github.com> Date: Thu, 4 May 2023 17:29:09 +0100 Subject: [PATCH 5/5] updates --- gpt4free/italygpt/README.md | 18 -------- gpt4free/italygpt/__init__.py | 28 ------------ gpt4free/quora/__init__.py | 4 +- pyproject.toml | 2 +- unfinished/test.py | 8 ---- unfinished/vercelai/__init__.py | 41 ----------------- unfinished/vercelai/test.js | 33 -------------- unfinished/vercelai/test.py | 67 ---------------------------- unfinished/vercelai/token.py | 0 unfinished/vercelai/vercelai_test.py | 5 --- 10 files changed, 3 insertions(+), 203 deletions(-) delete mode 100644 gpt4free/italygpt/README.md delete mode 100644 gpt4free/italygpt/__init__.py delete mode 100644 unfinished/test.py delete mode 100644 unfinished/vercelai/__init__.py delete mode 100644 unfinished/vercelai/test.js delete mode 100644 unfinished/vercelai/test.py delete mode 100644 unfinished/vercelai/token.py delete mode 100644 unfinished/vercelai/vercelai_test.py diff --git a/gpt4free/italygpt/README.md b/gpt4free/italygpt/README.md deleted file mode 100644 index 984eff3a..00000000 --- a/gpt4free/italygpt/README.md +++ /dev/null @@ -1,18 +0,0 @@ -### Example: `italygpt` - -```python -# create an instance -from gpt4free import italygpt -italygpt = italygpt.Completion() - -# initialize api -italygpt.init() - -# get an answer -italygpt.create(prompt="What is the meaning of life?") -print(italygpt.answer) # html formatted - -# keep the old conversation -italygpt.create(prompt="Are you a human?", messages=italygpt.messages) -print(italygpt.answer) -``` \ No newline at end of file diff --git a/gpt4free/italygpt/__init__.py b/gpt4free/italygpt/__init__.py deleted file mode 100644 index 27a965f1..00000000 --- a/gpt4free/italygpt/__init__.py +++ /dev/null @@ -1,28 +0,0 @@ -import requests, time, ast, json -from bs4 import BeautifulSoup -from hashlib import sha256 - -class Completion: - # answer is returned with html formatting - next_id = None - messages = [] - answer = None - - def init(self): - r = requests.get("https://italygpt.it") - soup = BeautifulSoup(r.text, "html.parser") - self.next_id = soup.find("input", {"name": "next_id"})["value"] - - def create(self, prompt: str, messages: list = []): - try: - r = requests.get("https://italygpt.it/question", params={"hash": sha256(self.next_id.encode()).hexdigest(), "prompt": prompt, "raw_messages": json.dumps(messages)}).json() - except: - r = requests.get("https://italygpt.it/question", params={"hash": sha256(self.next_id.encode()).hexdigest(), "prompt": prompt, "raw_messages": json.dumps(messages)}).text - if "too many requests" in r.lower(): - # rate limit is 17 requests per 1 minute - time.sleep(20) - return self.create(prompt, messages) - self.next_id = r["next_id"] - self.messages = ast.literal_eval(r["raw_messages"]) - self.answer = r["response"] - return self \ No newline at end of file diff --git a/gpt4free/quora/__init__.py b/gpt4free/quora/__init__.py index bc17ea5d..5d9e80c1 100644 --- a/gpt4free/quora/__init__.py +++ b/gpt4free/quora/__init__.py @@ -104,8 +104,8 @@ class Model: def create( token: str, model: str = 'gpt-3.5-turbo', # claude-instant - system_prompt: str = 'You are ChatGPT a large language model developed by Openai. Answer as consisely as possible', - description: str = 'gpt-3.5 language model from openai, skidded by poe.com', + system_prompt: str = 'You are ChatGPT a large language model. Answer as consisely as possible', + description: str = 'gpt-3.5 language model', handle: str = None, ) -> ModelResponse: if not handle: diff --git a/pyproject.toml b/pyproject.toml index 83df5dc5..37ec92d2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,5 +1,5 @@ [tool.poetry] -name = "openai-rev" +name = "gpt4free" version = "0.1.0" description = "" authors = [] diff --git a/unfinished/test.py b/unfinished/test.py deleted file mode 100644 index a5f038c5..00000000 --- a/unfinished/test.py +++ /dev/null @@ -1,8 +0,0 @@ -# asyncio.run(gptbz.test()) - -import requests - -image = '/9j/4AAQSkZJRgABAQEAYABgAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAAoALQDASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD3+iiigDkZP+EhS4W0k1S+VntQPtEWmRsgkNwBu4ZsHYQNvTbls5BA6DS7uW6S6E0VwjQ3UsQM0Pl71DZUrydy4IAbvg8CsTx3DbHQLi4uVs9scWzdd+dsAaWI4PlfNjKjpzkDtmpoNSgbWYpLR7Ty5bq5trw/vd3nIowBxtzti53Y6fKT3z2djra56fNbv07HR1z13ZRX/jDyby0+02f9nfdmsEeHd5o/5anndwPkxjjPWuhrh9Mvra88RLqccmnOHtvLEqfaN+1r1lUcjbg4PbO4H+Cqk+hnRi9ZI29E0uC2N1eG3Am+13DITZRwuqlsYG0ZYEKCGJywwT2AtWTapcW1vcPPCiyrE5ils2SRQV+dW/ecMT/3zgj5utZtpdwL4e190e02W9xeb9vm7FOWY78/NnnJ28f3ahkgtptD8JRlbMos9s8QPnbcrEzDy/4sgDjzOMdeaSZbi23f8vmbfn6hBFuktmuWWPJWCNELNuxgbpcDj1Pbr2qJ9bMVyIZNK1JVLyr5qwB1AjUNu+Uk4bovGSRjAqCTwdoElv5B02MReT5G1HZfk8zzMcEfx81YlsJ7NJX0tolZzNK8dyZJA8jDIwd3yjcBkAHjOAM09SP3b/q36mkjiSNXAYBgCNykH8QeRWdfaw1ldSW66XqN0UgE++3iBRsvt2BiQN/8WPQZqharF9oN5osVml1NLbLqUbmUFY/L4CrgYYKy4yoGM5xjhlnc2OoeMrfULV7aQXGkExyYlErJ5oPQ/Jtye/zZ9qLgqaTba0NyzvPtizH7NcQeVM8OJ49u/acbl9VPY96s1geFjF/xOhF9m41Wfd9n8z73BO7f/Fzzt+X0q7c6mWvRY2DwSXcUsQuUff8Auo2ySflB+YqrYyQOmTyARPQmVP32kLqF1cbmsrJZkuni3rcfZ98UfzKvJJUE4JOM5wpODwDl3Meuf2rHbRatcBJXuj5iachjhUovlBmZudrNkEZ3HIOMGlhREhbS9He2a8MO6a4fzmGDMQ3zAk5yZ8DzMgj0yRuWdha2CzLawrEJpnnkx/G7HLMfc0bl3VNf5pff/kVLS8uxFHHJZ3s5Xyo2mZI4y2VBZyN44B6gDrwAcVZ069Go2EV2Le5t/MBPlXMZjkXnGGU9OlULSdbfTt8LWy5mt0JAkK4YRLjnnODx26Z71TXULEWn/CUWDwmxeDbM4WbkCXJbaB23SnlM5PUDNF7CcObZf12OlpCcDoTz2oVlcZVgRkjIPccGo7hgsSk7ceYg+bP94elUYpamda64915GdH1SESxiTM0KjZmTZtbDHB53Y/u89eK1qw4xD9l0mIC3wLdCg/eYwHh+73x0+9znb71uUkXUSWyCiiimZhRRRQBieL5Hj8LXjxySxuNmGivFtWHzr0lbhfx69O9MvHdZpbKKWYnUluNji+VGikVFULHnkdGbjO05JHPEviyF5/DF7HGkjuQpCx2i3THDA8RNw3Tv069qR0kk0i4uFilF3bSXTwE2a+YGzIAUQnnIPByN46kbjUPc6YNKC9X+SLtjeB9Mt5ZyqzbI1lQzK5R2C/KWGAT8w6dcjHUVzemSyxeCba9e5uWfzIgxl1aOTgXPebGw5BwR3ACdalna8+0R3Kx3nk6jc2MvkjTI2MH97zDnI+4uWOSny4z2Lqxmt/hytvHHIZhFHJsj0yJnyXDEfZ87M9cjPB56ik2y4xSsu7XcnjMsejeJszXBZZrgozaihZAYwQFfGIQM8Bvu9ehrTKuJtOg3y5gKs/8ApAy2Y5B846uMj8Tz/CaqzROH1C3EchW6uHGRZIVx9nHXs4yPvN1PydBV2Lc+u3eUkCJBDtZoAFJzJna/VjgjI/h/4EaaM5PS/wDXRF+iiirOcy7RZE8RanukmKPFA6q9yHVfvg7Y+qfd5J4Y9OhrJ8Nm4FxYJNNdORaXCsJtTS4yVnAyQoG5sfxfw/dPJrUslmGt6rcymQxM0MMStahMALk4cfM65c9cBSGA7mqmi2k9t/ZZuDJJKbSdpHNjHEdzyRvhtv3G5PyjIbBJOVqDpurP5d+zGWtzeLdahZQLNK895PiV7+N/IURKQQMEqNzKAm1tucnggG4Fkhs4INNuJL145oEuHa7BcIAuWOQRkrhiAFzkkEE8rNDJPczWtnG1rG7yfapvsqESsY1AIJPP3hztbPllTjHKvpv2CWKbTUSHdJCk8cVtH+8jUFOSNpGAynOTgJgL1BNRNxf9fmWNGa3fR7U2ty9zDswJZJxMzHvlwSCc5BwccVerBZ3tLf8Atqyguvsxt/n02OyUSsxk3FsHa24bnyM4ycgE9d1WDDIz1I5BHQ471SM6i1uY8cjjSIWLyFjLbDJu1J5Mefn6HryP4snH3hRdmTS5f7T82aS2WBY5Y5LpVjX94Pn+YYzhmydw4UDB4wio/wDY8K+XLuE1qcfY1B4MWfk6DHOT/Bg4+6K1zGkkHlSoroy7WVlGCCOQRSsU5JGUrPo96EZ5p7O7mmmlubm7XFqQoYIobB2fK3Aztwe3TQvX2QKQSMyxDiQJ1dR1P8u/TvWb5bWty2m3KTXlvqMs7Ky2ieVbqVBKSEcHJL4JB3ZwfeLfcQRnTpY7mT7PLZiOdbJSkillzgA44KMScLsBBAOBkuNxu0/6epcQv9s0+LfJzauxBuVJJDRckdXPJ+YcDJH8QrTrN2sNcsxsk2LZyjd9nXaCWj439VPH3RwcZ/hFaVNGc+gUUUUyAooooAxfFVxZxeG9RS7ltVQ25ytwzbCCQBkJ82MkD5eeah0G7tYLi/sZJrKO4fUbjy4oncM/SQ5D9Ww4J25Xniiis2/eO2FNOhf1/CxmamsEGp2+nzx2CwxajYyWKN9o3KdpX+Ebd2I2287ePm973i3UdMg0W+0y4mtUkNqJPKuBJ5ewuEBYx8gbiBxz+FFFS3ZM1p01OdNN/wBaFfVtU0qHxHplx9qsSkEl2853SvIjxwjdtCZXIX7wbt05q7YJdS6nc6vYxWEtpfi2KS+bKsjQhCSWBBG4bhtAAyCcmiinF3k0RWgqdKMl1VvxZfM2s+VkWFh5nl5x9tfG/djGfK6bec468Y/irN1CeUCeHXbrTItPc3O6GN5PNltxHx0I+YKXLYB42455ooqpaIwo2lO1rE1rZjUYrcCO2Giw/Zp7BYzKrkKu4bh8oAB2EA56HIz0u3uxL+1kbygQpQFt2fmki4GOOuOvfHbNFFPpcTu6nKFpsTU75V8oNJKXIXduOI4hk54zjHTjGO+a0KKKaM59PQxLqNNBMuoQpDFYJEfPQLISp8zcWAXIxh5CcLnOMnHQaFNKkkvtOFoli0k9xqP32Zn24LIFyM7kwRg98c5yUVL3No6xTfV2/IrxyW0vh21kQ2phaexKn97s5aErj+LPTbnj7u7+KujoopxZNZW+9/oQXdpBfWk1rcxiSGVGjdSSMhgQeRyOCRxWOtvbXU0Ol6mIHksJbea0IMoJYISGy3U5ST+JuB83uUUMVJuz121JnaL/AITOBSYPOGnyEA7/ADdvmJnH8G3IHX5s4xxmtmiihdRVFZR9AoooqjI//9k=' - -response = requests.get('https://ocr.holey.cc/ncku?base64_str=%s' % image) # .split('base64,')[1]) -print(response.content) diff --git a/unfinished/vercelai/__init__.py b/unfinished/vercelai/__init__.py deleted file mode 100644 index 1dcb5b39..00000000 --- a/unfinished/vercelai/__init__.py +++ /dev/null @@ -1,41 +0,0 @@ -import requests - -class Completion: - def create(prompt: str, - model: str = 'openai:gpt-3.5-turbo', - temperature: float = 0.7, - max_tokens: int = 200, - top_p: float = 1, - top_k: int = 1, - frequency_penalty: float = 1, - presence_penalty: float = 1, - stopSequences: list = []): - - token = requests.get('https://play.vercel.ai/openai.jpeg', headers={ - 'authority': 'play.vercel.ai', - 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', - 'referer': 'https://play.vercel.ai/', - 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'}).text.replace('=','') - - print(token) - - headers = { - 'authority': 'play.vercel.ai', - 'custom-encoding': token, - 'origin': 'https://play.vercel.ai', - 'referer': 'https://play.vercel.ai/', - 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36' - } - - for chunk in requests.post('https://play.vercel.ai/api/generate', headers=headers, stream=True, json={ - 'prompt': prompt, - 'model': model, - 'temperature': temperature, - 'maxTokens': max_tokens, - 'topK': top_p, - 'topP': top_k, - 'frequencyPenalty': frequency_penalty, - 'presencePenalty': presence_penalty, - 'stopSequences': stopSequences}).iter_lines(): - - yield (chunk) \ No newline at end of file diff --git a/unfinished/vercelai/test.js b/unfinished/vercelai/test.js deleted file mode 100644 index 0f822cfd..00000000 --- a/unfinished/vercelai/test.js +++ /dev/null @@ -1,33 +0,0 @@ -(async () => { - - let response = await fetch("https://play.vercel.ai/openai.jpeg", { - "headers": { - "accept": "*/*", - "accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3", - "sec-ch-ua": "\"Chromium\";v=\"112\", \"Google Chrome\";v=\"112\", \"Not:A-Brand\";v=\"99\"", - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": "\"macOS\"", - "sec-fetch-dest": "empty", - "sec-fetch-mode": "cors", - "sec-fetch-site": "same-origin" - }, - "referrer": "https://play.vercel.ai/", - "referrerPolicy": "strict-origin-when-cross-origin", - "body": null, - "method": "GET", - "mode": "cors", - "credentials": "omit" - }); - - - let data = JSON.parse(atob(await response.text())) - let ret = eval("(".concat(data.c, ")(data.a)")); - - botPreventionToken = btoa(JSON.stringify({ - r: ret, - t: data.t - })) - - console.log(botPreventionToken); - -})() \ No newline at end of file diff --git a/unfinished/vercelai/test.py b/unfinished/vercelai/test.py deleted file mode 100644 index 318e71c3..00000000 --- a/unfinished/vercelai/test.py +++ /dev/null @@ -1,67 +0,0 @@ -import requests -from base64 import b64decode, b64encode -from json import loads -from json import dumps - -headers = { - 'Accept': '*/*', - 'Accept-Language': 'en-GB,en-US;q=0.9,en;q=0.8', - 'Connection': 'keep-alive', - 'Referer': 'https://play.vercel.ai/', - 'Sec-Fetch-Dest': 'empty', - 'Sec-Fetch-Mode': 'cors', - 'Sec-Fetch-Site': 'same-origin', - 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36', - 'sec-ch-ua': '"Chromium";v="110", "Google Chrome";v="110", "Not:A-Brand";v="99"', - 'sec-ch-ua-mobile': '?0', - 'sec-ch-ua-platform': '"macOS"', -} - -response = requests.get('https://play.vercel.ai/openai.jpeg', headers=headers) - -token_data = loads(b64decode(response.text)) -print(token_data) - -raw_token = { - 'a': token_data['a'] * .1 * .2, - 't': token_data['t'] -} - -print(raw_token) - -new_token = b64encode(dumps(raw_token, separators=(',', ':')).encode()).decode() -print(new_token) - -import requests - -headers = { - 'authority': 'play.vercel.ai', - 'accept': '*/*', - 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', - 'content-type': 'application/json', - 'custom-encoding': new_token, - 'origin': 'https://play.vercel.ai', - 'referer': 'https://play.vercel.ai/', - 'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"', - 'sec-ch-ua-mobile': '?0', - 'sec-ch-ua-platform': '"macOS"', - 'sec-fetch-dest': 'empty', - 'sec-fetch-mode': 'cors', - 'sec-fetch-site': 'same-origin', - 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36', -} - -json_data = { - 'prompt': 'hello\n', - 'model': 'openai:gpt-3.5-turbo', - 'temperature': 0.7, - 'maxTokens': 200, - 'topK': 1, - 'topP': 1, - 'frequencyPenalty': 1, - 'presencePenalty': 1, - 'stopSequences': [], -} - -response = requests.post('https://play.vercel.ai/api/generate', headers=headers, json=json_data) -print(response.text) \ No newline at end of file diff --git a/unfinished/vercelai/token.py b/unfinished/vercelai/token.py deleted file mode 100644 index e69de29b..00000000 diff --git a/unfinished/vercelai/vercelai_test.py b/unfinished/vercelai/vercelai_test.py deleted file mode 100644 index 24cbe0bc..00000000 --- a/unfinished/vercelai/vercelai_test.py +++ /dev/null @@ -1,5 +0,0 @@ -import vercelai - -for token in vercelai.Completion.create('summarize the gnu gpl 1.0'): - print(token, end='', flush=True) -