Add unittests for async client (#1830)

* Add unittests for async client

* Add pollyfill for anext

* Update integration tests
This commit is contained in:
H Lohaus 2024-04-12 18:01:54 +02:00 committed by GitHub
parent 35179fe843
commit 0b712c2bde
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
7 changed files with 82 additions and 15 deletions

View File

@ -4,6 +4,7 @@ from .backend import *
from .main import *
from .model import *
from .client import *
from .async_client import *
from .include import *
from .integration import *

View File

@ -0,0 +1,56 @@
import unittest
from g4f.client import AsyncClient, ChatCompletion, ChatCompletionChunk
from .mocks import AsyncGeneratorProviderMock, ModelProviderMock, YieldProviderMock
DEFAULT_MESSAGES = [{'role': 'user', 'content': 'Hello'}]
class AsyncTestPassModel(unittest.IsolatedAsyncioTestCase):
async def test_response(self):
client = AsyncClient(provider=AsyncGeneratorProviderMock)
response = await client.chat.completions.create(DEFAULT_MESSAGES, "")
self.assertIsInstance(response, ChatCompletion)
self.assertEqual("Mock", response.choices[0].message.content)
async def test_pass_model(self):
client = AsyncClient(provider=ModelProviderMock)
response = await client.chat.completions.create(DEFAULT_MESSAGES, "Hello")
self.assertIsInstance(response, ChatCompletion)
self.assertEqual("Hello", response.choices[0].message.content)
async def test_max_tokens(self):
client = AsyncClient(provider=YieldProviderMock)
messages = [{'role': 'user', 'content': chunk} for chunk in ["How ", "are ", "you", "?"]]
response = await client.chat.completions.create(messages, "Hello", max_tokens=1)
self.assertIsInstance(response, ChatCompletion)
self.assertEqual("How ", response.choices[0].message.content)
response = await client.chat.completions.create(messages, "Hello", max_tokens=2)
self.assertIsInstance(response, ChatCompletion)
self.assertEqual("How are ", response.choices[0].message.content)
async def test_max_stream(self):
client = AsyncClient(provider=YieldProviderMock)
messages = [{'role': 'user', 'content': chunk} for chunk in ["How ", "are ", "you", "?"]]
response = client.chat.completions.create(messages, "Hello", stream=True)
async for chunk in response:
self.assertIsInstance(chunk, ChatCompletionChunk)
if chunk.choices[0].delta.content is not None:
self.assertIsInstance(chunk.choices[0].delta.content, str)
messages = [{'role': 'user', 'content': chunk} for chunk in ["You ", "You ", "Other", "?"]]
response = client.chat.completions.create(messages, "Hello", stream=True, max_tokens=2)
response = [chunk async for chunk in response]
self.assertEqual(len(response), 3)
for chunk in response:
if chunk.choices[0].delta.content is not None:
self.assertEqual(chunk.choices[0].delta.content, "You ")
async def test_stop(self):
client = AsyncClient(provider=YieldProviderMock)
messages = [{'role': 'user', 'content': chunk} for chunk in ["How ", "are ", "you", "?"]]
response = await client.chat.completions.create(messages, "Hello", stop=["and"])
self.assertIsInstance(response, ChatCompletion)
self.assertEqual("How are you?", response.choices[0].message.content)
if __name__ == '__main__':
unittest.main()

View File

@ -8,7 +8,7 @@ except ImportError:
has_nest_asyncio = False
from g4f.client import Client, ChatCompletion
from g4f.Provider import Bing, OpenaiChat
from g4f.Provider import Bing, OpenaiChat, DuckDuckGo
DEFAULT_MESSAGES = [{"role": "system", "content": 'Response in json, Example: {"success: true"}'},
{"role": "user", "content": "Say success true in json"}]
@ -19,11 +19,19 @@ class TestProviderIntegration(unittest.TestCase):
self.skipTest("nest_asyncio is not installed")
def test_bing(self):
self.skipTest("Not stable")
client = Client(provider=Bing)
response = client.chat.completions.create(DEFAULT_MESSAGES, "", response_format={"type": "json_object"})
self.assertIsInstance(response, ChatCompletion)
self.assertIn("success", json.loads(response.choices[0].message.content))
def test_duckduckgo(self):
self.skipTest("Not working")
client = Client(provider=DuckDuckGo)
response = client.chat.completions.create(DEFAULT_MESSAGES, "", response_format={"type": "json_object"})
self.assertIsInstance(response, ChatCompletion)
self.assertIn("success", json.loads(response.choices[0].message.content))
def test_openai(self):
client = Client(provider=OpenaiChat)
response = client.chat.completions.create(DEFAULT_MESSAGES, "", response_format={"type": "json_object"})

View File

@ -11,7 +11,7 @@ except ImportError:
from ..typing import Messages, CreateResult
from .base_provider import AbstractProvider
from ..requests import raise_for_status
from ..errors import MissingRequirementsError, RateLimitError, ResponseStatusError
from ..errors import MissingRequirementsError
class Vercel(AbstractProvider):
url = 'https://chat.vercel.ai'

View File

@ -16,6 +16,13 @@ from ..errors import NoImageResponseError
from ..image import ImageResponse as ImageProviderResponse
from ..providers.base_provider import AsyncGeneratorProvider
try:
anext
except NameError:
async def anext(iter):
async for chunk in iter:
return chunk
async def iter_response(
response: AsyncIterator[str],
stream: bool,

View File

@ -1262,7 +1262,7 @@ if (SpeechRecognition) {
function may_stop() {
if (microLabel.classList.contains("recognition")) {
//recognition.stop();
recognition.stop();
}
}
@ -1272,15 +1272,12 @@ if (SpeechRecognition) {
recognition.onstart = function() {
microLabel.classList.add("recognition");
startValue = messageInput.value;
messageInput.placeholder = "";
lastDebounceTranscript = "";
timeoutHandle = window.setTimeout(may_stop, 10000);
};
recognition.onend = function() {
microLabel.classList.remove("recognition");
messageInput.value = messageInput.placeholder;
messageInput.placeholder = "Ask a question";
//messageInput.focus();
messageInput.focus();
};
recognition.onresult = function(event) {
if (!event.results) {
@ -1298,9 +1295,9 @@ if (SpeechRecognition) {
lastDebounceTranscript = transcript;
}
if (transcript) {
messageInput.placeholder = `${startValue ? startValue+"\n" : ""}${transcript.trim()}`;
messageInput.value = `${startValue ? startValue+"\n" : ""}${transcript.trim()}`;
if (isFinal) {
startValue = messageInput.placeholder;
startValue = messageInput.value;
}
messageInput.style.height = messageInput.scrollHeight + "px";
messageInput.scrollTop = messageInput.scrollHeight;

View File

@ -34,15 +34,13 @@ class StreamResponse:
"""Asynchronously parse the JSON response content."""
return json.loads(await self.inner.acontent(), **kwargs)
async def iter_lines(self) -> AsyncGenerator[bytes, None]:
def iter_lines(self) -> AsyncGenerator[bytes, None]:
"""Asynchronously iterate over the lines of the response."""
async for line in self.inner.aiter_lines():
yield line
return self.inner.aiter_lines()
async def iter_content(self) -> AsyncGenerator[bytes, None]:
def iter_content(self) -> AsyncGenerator[bytes, None]:
"""Asynchronously iterate over the response content."""
async for chunk in self.inner.aiter_content():
yield chunk
return self.inner.aiter_content()
async def __aenter__(self):
"""Asynchronously enter the runtime context for the response object."""