Added Meta llama-3 support! (#1856)

* Added Meta llama-3 support!
Decided to change llama2.py to llama.py to hold all the llama family models.

* updated HuggingChat provider

* Update FlowGpt.py
This commit is contained in:
PD 2024-04-19 12:57:33 +05:30 committed by GitHub
parent 718ea7c187
commit 5fd118f3c9
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
8 changed files with 54 additions and 32 deletions

View File

@ -345,29 +345,31 @@ While we wait for gpt-5, here is a list of new models that are at least better t
### Models
| Model | Base Provider | Provider | Website |
| ----- | ------------- | -------- | ------- |
| gpt-3.5-turbo | OpenAI | 5+ Providers | [openai.com](https://openai.com/) |
| gpt-4 | OpenAI | 2+ Providers | [openai.com](https://openai.com/) |
| gpt-4-turbo | OpenAI | g4f.Provider.Bing | [openai.com](https://openai.com/) |
| Llama-2-7b-chat-hf | Meta | 2+ Providers | [llama.meta.com](https://llama.meta.com/) |
| Llama-2-13b-chat-hf | Meta | 2+ Providers | [llama.meta.com](https://llama.meta.com/) |
| Llama-2-70b-chat-hf | Meta | 3+ Providers | [llama.meta.com](https://llama.meta.com/) |
| CodeLlama-34b-Instruct-hf | Meta | 2+ Providers | [llama.meta.com](https://llama.meta.com/) |
| CodeLlama-70b-Instruct-hf | Meta | 2+ Providers | [llama.meta.com](https://llama.meta.com/) |
| Mixtral-8x7B-Instruct-v0.1 | Huggingface | 4+ Providers | [huggingface.co](https://huggingface.co/) |
| Mistral-7B-Instruct-v0.1 | Huggingface | 4+ Providers | [huggingface.co](https://huggingface.co/) |
| dolphin-2.6-mixtral-8x7b | Huggingface | g4f.Provider.DeepInfra | [huggingface.co](https://huggingface.co/) |
| lzlv_70b_fp16_hf | Huggingface | g4f.Provider.DeepInfra | [huggingface.co](https://huggingface.co/) |
| airoboros-70b | Huggingface | g4f.Provider.DeepInfra | [huggingface.co](https://huggingface.co/) |
| Model | Base Provider | Provider | Website |
|-----------------------------| ------------- | -------- | ------- |
| gpt-3.5-turbo | OpenAI | 5+ Providers | [openai.com](https://openai.com/) |
| gpt-4 | OpenAI | 2+ Providers | [openai.com](https://openai.com/) |
| gpt-4-turbo | OpenAI | g4f.Provider.Bing | [openai.com](https://openai.com/) |
| Llama-2-7b-chat-hf | Meta | 2+ Providers | [llama.meta.com](https://llama.meta.com/) |
| Llama-2-13b-chat-hf | Meta | 2+ Providers | [llama.meta.com](https://llama.meta.com/) |
| Llama-2-70b-chat-hf | Meta | 3+ Providers | [llama.meta.com](https://llama.meta.com/) |
| Meta-Llama-3-8b | Meta | 3+ Providers | [llama.meta.com](https://llama.meta.com/) |
| Meta-Llama-3-70b | Meta | 3+ Providers | [llama.meta.com](https://llama.meta.com/) |
| CodeLlama-34b-Instruct-hf | Meta | 2+ Providers | [llama.meta.com](https://llama.meta.com/) |
| CodeLlama-70b-Instruct-hf | Meta | 2+ Providers | [llama.meta.com](https://llama.meta.com/) |
| Mixtral-8x7B-Instruct-v0.1 | Huggingface | 4+ Providers | [huggingface.co](https://huggingface.co/) |
| Mistral-7B-Instruct-v0.1 | Huggingface | 4+ Providers | [huggingface.co](https://huggingface.co/) |
| dolphin-2.6-mixtral-8x7b | Huggingface | g4f.Provider.DeepInfra | [huggingface.co](https://huggingface.co/) |
| lzlv_70b_fp16_hf | Huggingface | g4f.Provider.DeepInfra | [huggingface.co](https://huggingface.co/) |
| airoboros-70b | Huggingface | g4f.Provider.DeepInfra | [huggingface.co](https://huggingface.co/) |
| airoboros-l2-70b-gpt4-1.4.1 | Huggingface | g4f.Provider.DeepInfra | [huggingface.co](https://huggingface.co/) |
| openchat_3.5 | Huggingface | 2+ Providers | [huggingface.co](https://huggingface.co/) |
| gemini | Google | g4f.Provider.Gemini | [gemini.google.com](https://gemini.google.com/) |
| gemini-pro | Google | 2+ Providers | [gemini.google.com](https://gemini.google.com/) |
| claude-v2 | Anthropic | 1+ Providers | [anthropic.com](https://www.anthropic.com/) |
| claude-3-opus | Anthropic | g4f.Provider.You | [anthropic.com](https://www.anthropic.com/) |
| claude-3-sonnet | Anthropic | g4f.Provider.You | [anthropic.com](https://www.anthropic.com/) |
| pi | Inflection | g4f.Provider.Pi | [inflection.ai](https://inflection.ai/) |
| openchat_3.5 | Huggingface | 2+ Providers | [huggingface.co](https://huggingface.co/) |
| gemini | Google | g4f.Provider.Gemini | [gemini.google.com](https://gemini.google.com/) |
| gemini-pro | Google | 2+ Providers | [gemini.google.com](https://gemini.google.com/) |
| claude-v2 | Anthropic | 1+ Providers | [anthropic.com](https://www.anthropic.com/) |
| claude-3-opus | Anthropic | g4f.Provider.You | [anthropic.com](https://www.anthropic.com/) |
| claude-3-sonnet | Anthropic | g4f.Provider.You | [anthropic.com](https://www.anthropic.com/) |
| pi | Inflection | g4f.Provider.Pi | [inflection.ai](https://inflection.ai/) |
## 🔗 Powered by gpt4free

View File

@ -99,4 +99,4 @@ class FlowGpt(AsyncGeneratorProvider, ProviderModelMixin):
if "event" not in message:
continue
if message["event"] == "text":
yield message["data"]
yield message["data"]

View File

@ -19,7 +19,8 @@ class HuggingChat(AsyncGeneratorProvider, ProviderModelMixin):
'mistralai/Mixtral-8x7B-Instruct-v0.1',
'google/gemma-1.1-7b-it',
'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO',
'mistralai/Mistral-7B-Instruct-v0.2'
'mistralai/Mistral-7B-Instruct-v0.2',
'meta-llama/Meta-Llama-3-70B-Instruct'
]
model_aliases = {
"openchat/openchat_3.5": "openchat/openchat-3.5-0106",

View File

@ -7,17 +7,21 @@ from ..requests.raise_for_status import raise_for_status
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
class Llama2(AsyncGeneratorProvider, ProviderModelMixin):
class Llama(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://www.llama2.ai"
working = True
supports_message_history = True
default_model = "meta/llama-2-70b-chat"
default_model = "meta/llama-3-70b-chat"
models = [
"meta/llama-2-7b-chat",
"meta/llama-2-13b-chat",
"meta/llama-2-70b-chat",
"meta/llama-3-8b-chat",
"meta/llama-3-70b-chat",
]
model_aliases = {
"meta-llama/Meta-Llama-3-8b": "meta/llama-3-8b-chat",
"meta-llama/Meta-Llama-3-70b": "meta/llama-3-70b-chat",
"meta-llama/Llama-2-7b-chat-hf": "meta/llama-2-7b-chat",
"meta-llama/Llama-2-13b-chat-hf": "meta/llama-2-13b-chat",
"meta-llama/Llama-2-70b-chat-hf": "meta/llama-2-70b-chat",

View File

@ -40,7 +40,7 @@ from .HuggingChat import HuggingChat
from .HuggingFace import HuggingFace
from .Koala import Koala
from .Liaobots import Liaobots
from .Llama2 import Llama2
from .Llama import Llama
from .Local import Local
from .PerplexityLabs import PerplexityLabs
from .Pi import Pi

View File

@ -220,6 +220,7 @@
<option value="gpt-4">gpt-4</option>
<option value="gpt-3.5-turbo">gpt-3.5-turbo</option>
<option value="llama2-70b">llama2-70b</option>
<option value="llama3-70b">llama2-70b</option>
<option value="gemini-pro">gemini-pro</option>
<option value="">----</option>
</select>

View File

@ -926,7 +926,7 @@ colorThemes.forEach((themeOption) => {
function count_tokens(model, text) {
if (model) {
if (window.llamaTokenizer)
if (model.startsWith("llama2") || model.startsWith("codellama")) {
if (model.startsWith("llama") || model.startsWith("codellama")) {
return llamaTokenizer.encode(text).length;
}
if (window.mistralTokenizer)

View File

@ -16,7 +16,7 @@ from .Provider import (
GigaChat,
Liaobots,
FreeGpt,
Llama2,
Llama,
Vercel,
Gemini,
Koala,
@ -117,19 +117,31 @@ gigachat_pro = Model(
llama2_7b = Model(
name = "meta-llama/Llama-2-7b-chat-hf",
base_provider = 'meta',
best_provider = RetryProvider([Llama2, DeepInfra])
best_provider = RetryProvider([Llama, DeepInfra])
)
llama2_13b = Model(
name = "meta-llama/Llama-2-13b-chat-hf",
base_provider = 'meta',
best_provider = RetryProvider([Llama2, DeepInfra])
best_provider = RetryProvider([Llama, DeepInfra])
)
llama2_70b = Model(
name = "meta-llama/Llama-2-70b-chat-hf",
base_provider = "meta",
best_provider = RetryProvider([Llama2, DeepInfra, HuggingChat])
best_provider = RetryProvider([Llama, DeepInfra, HuggingChat])
)
llama3_8b = Model(
name = "meta-llama/Meta-Llama-3-8b",
base_provider = "meta",
best_provider = RetryProvider([Llama])
)
llama3_70b = Model(
name = "meta-llama/Meta-Llama-3-70b",
base_provider = "meta",
best_provider = RetryProvider([Llama, HuggingChat])
)
codellama_34b_instruct = Model(
@ -306,6 +318,8 @@ class ModelUtils:
'llama2-7b' : llama2_7b,
'llama2-13b': llama2_13b,
'llama2-70b': llama2_70b,
'llama3-8b' : llama3_8b,
'llama3-70b': llama3_70b,
'codellama-34b-instruct': codellama_34b_instruct,
'codellama-70b-instruct': codellama_70b_instruct,