Add AsyncAuthedProvider in Copilot

Add orginal url to downloaded image
Support ssl argument in StreamSession
Report Provider and Errors in RetryProvider
Support ssl argument in OpenaiTemplate
Remove model duplication in OpenaiChat
Disable ChatGpt provider and remove it from models.py
Update slim requirements
Support provider names as model name in Image generation
Add model qwen-2.5-1m-demo to models.py
This commit is contained in:
hlohaus 2025-01-28 20:33:50 +01:00
parent aef3d8dc66
commit 9524c3f327
20 changed files with 169 additions and 128 deletions

View file

@ -76,7 +76,7 @@ def init_session(user_agent):
class ChatGpt(AbstractProvider, ProviderModelMixin):
label = "ChatGpt"
url = "https://chatgpt.com"
working = True
working = False
supports_message_history = True
supports_system_message = True
supports_stream = True

View file

@ -92,7 +92,6 @@ class Copilot(AbstractProvider, ProviderModelMixin):
cls._access_token, cls._cookies = asyncio.run(get_access_token_and_cookies(cls.url, proxy))
else:
raise h
yield Parameters(**{"api_key": cls._access_token, "cookies": cls._cookies if isinstance(cls._cookies, dict) else {c.name: c.value for c in cls._cookies}})
websocket_url = f"{websocket_url}&accessToken={quote(cls._access_token)}"
headers = {"authorization": f"Bearer {cls._access_token}"}

View file

@ -1,8 +1,16 @@
from __future__ import annotations
from ..Copilot import Copilot
import os
from typing import AsyncIterator
class CopilotAccount(Copilot):
from ..base_provider import AsyncAuthedProvider
from ..Copilot import Copilot, readHAR, has_nodriver, get_access_token_and_cookies
from ...providers.response import AuthResult, RequestLogin
from ...typing import AsyncResult, Messages
from ...errors import NoValidHarFileError
from ... import debug
class CopilotAccount(AsyncAuthedProvider, Copilot):
needs_auth = True
use_nodriver = True
parent = "Copilot"
@ -13,3 +21,37 @@ class CopilotAccount(Copilot):
model_aliases = {
"dall-e-3": default_model
}
@classmethod
async def on_auth_async(cls, proxy: str = None, **kwargs) -> AsyncIterator:
if cls._access_token is None:
try:
cls._access_token, cls._cookies = readHAR(cls.url)
except NoValidHarFileError as h:
debug.log(f"Copilot: {h}")
if has_nodriver:
login_url = os.environ.get("G4F_LOGIN_URL")
if login_url:
yield RequestLogin(cls.label, login_url)
cls._access_token, cls._cookies = await get_access_token_and_cookies(cls.url, proxy)
else:
raise h
yield AuthResult(
api_key=cls._access_token,
cookies=cls._cookies,
)
@classmethod
async def create_authed(
cls,
model: str,
messages: Messages,
auth_result: AuthResult,
**kwargs
) -> AsyncResult:
Copilot._access_token = getattr(auth_result, "api_key")
Copilot._cookies = getattr(auth_result, "cookies")
Copilot.needs_auth = cls.needs_auth
for chunk in Copilot.create_completion(model, messages, **kwargs):
yield chunk
auth_result.cookies = Copilot._cookies if isinstance(Copilot._cookies, dict) else {c.name: c.value for c in Copilot._cookies}

View file

@ -5,8 +5,3 @@ from .OpenaiChat import OpenaiChat
class OpenaiAccount(OpenaiChat):
needs_auth = True
parent = "OpenaiChat"
default_model = "gpt-4o"
default_vision_model = default_model
default_image_model = OpenaiChat.default_image_model
image_models = [default_model, default_image_model, "gpt-4"]
fallback_models = [*OpenaiChat.fallback_models, default_image_model]

View file

@ -98,8 +98,9 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
default_model = "auto"
default_image_model = "dall-e-3"
image_models = [default_image_model]
fallback_models = [default_model, "gpt-4", "gpt-4o", "gpt-4o-mini", "gpt-4o-canmore", "o1", "o1-preview", "o1-mini"] +image_models
vision_models = fallback_models
text_models = [default_model, "gpt-4", "gpt-4o", "gpt-4o-mini", "gpt-4o-canmore", "o1", "o1-preview", "o1-mini"]
vision_models = text_models
models = text_models + image_models
synthesize_content_type = "audio/mpeg"
_api_key: str = None
@ -120,33 +121,6 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
turnstile_token=RequestConfig.turnstile_token
)
@classmethod
def get_models(cls, proxy: str = None, timeout: int = 180) -> List[str]:
if not cls.models:
# try:
# headers = {
# **(cls.get_default_headers() if cls._headers is None else cls._headers),
# "accept": "application/json",
# }
# with Session(
# proxy=proxy,
# impersonate="chrome",
# timeout=timeout,
# headers=headers
# ) as session:
# response = session.get(
# f"{cls.url}/backend-anon/models"
# if cls._api_key is None else
# f"{cls.url}/backend-api/models"
# )
# raise_for_status(response)
# data = response.json()
# cls.models = [model.get("slug") for model in data.get("models")]
# except Exception as e:
# debug.log(f"OpenaiChat: Failed to get models: {type(e).__name__}: {e}")
cls.models = cls.fallback_models
return cls.models
@classmethod
async def upload_images(
cls,

View file

@ -20,6 +20,7 @@ class OpenaiTemplate(AsyncGeneratorProvider, ProviderModelMixin, RaiseErrorMixin
default_model = ""
fallback_models = []
sort_models = True
ssl = None
@classmethod
def get_models(cls, api_key: str = None, api_base: str = None) -> list[str]:
@ -30,7 +31,7 @@ class OpenaiTemplate(AsyncGeneratorProvider, ProviderModelMixin, RaiseErrorMixin
api_base = cls.api_base
if api_key is not None:
headers["authorization"] = f"Bearer {api_key}"
response = requests.get(f"{api_base}/models", headers=headers)
response = requests.get(f"{api_base}/models", headers=headers, verify=cls.ssl)
raise_for_status(response)
data = response.json()
data = data.get("data") if isinstance(data, dict) else data
@ -79,12 +80,12 @@ class OpenaiTemplate(AsyncGeneratorProvider, ProviderModelMixin, RaiseErrorMixin
api_base = cls.api_base
# Proxy for image generation feature
if model in cls.image_models:
if model and model in cls.image_models:
data = {
"prompt": messages[-1]["content"] if prompt is None else prompt,
"model": model,
}
async with session.post(f"{api_base.rstrip('/')}/images/generations", json=data) as response:
async with session.post(f"{api_base.rstrip('/')}/images/generations", json=data, ssl=cls.ssl) as response:
data = await response.json()
cls.raise_error(data)
await raise_for_status(response)
@ -119,7 +120,7 @@ class OpenaiTemplate(AsyncGeneratorProvider, ProviderModelMixin, RaiseErrorMixin
)
if api_endpoint is None:
api_endpoint = f"{api_base.rstrip('/')}/chat/completions"
async with session.post(api_endpoint, json=data) as response:
async with session.post(api_endpoint, json=data, ssl=cls.ssl) as response:
content_type = response.headers.get("content-type", "text/event-stream" if stream else "application/json")
if content_type.startswith("application/json"):
data = await response.json()
@ -180,7 +181,7 @@ class OpenaiTemplate(AsyncGeneratorProvider, ProviderModelMixin, RaiseErrorMixin
"Content-Type": "application/json",
**(
{"Authorization": f"Bearer {api_key}"}
if api_key is not None else {}
if api_key else {}
),
**({} if headers is None else headers)
}

View file

@ -70,6 +70,8 @@ def iter_response(
continue
elif isinstance(chunk, SynthesizeData) or not chunk:
continue
elif isinstance(chunk, Exception):
continue
chunk = str(chunk)
content += chunk
@ -149,6 +151,8 @@ async def async_iter_response(
continue
elif isinstance(chunk, SynthesizeData) or not chunk:
continue
elif isinstance(chunk, Exception):
continue
chunk = str(chunk)
content += chunk

View file

@ -1,14 +1,15 @@
from __future__ import annotations
from ..models import ModelUtils
from ..Provider import ProviderUtils
class ImageModels():
def __init__(self, client):
self.client = client
self.models = ModelUtils.convert
def get(self, name, default=None):
model = self.models.get(name)
if model and model.best_provider:
return model.best_provider
if name in ModelUtils.convert:
return ModelUtils.convert[name].best_provider
if name in ProviderUtils.convert:
return ProviderUtils.convert[name]
return default

View file

@ -722,12 +722,13 @@ async function add_message_chunk(message, message_id, provider, scroll, finish_m
`;
} else if (message.type == "message") {
console.error(message.message)
await api("log", {...message, provider: provider_storage[message_id]});
} else if (message.type == "error") {
content_map.update_timeouts.forEach((timeoutId)=>clearTimeout(timeoutId));
content_map.update_timeouts = [];
error_storage[message_id] = message.error
console.error(message.error);
content_map.inner.innerHTML += markdown_render(`**An error occured:** ${message.error}`);
error_storage[message_id] = message.message
console.error(message.message);
content_map.inner.innerHTML += markdown_render(`**An error occured:** ${message.message}`);
let p = document.createElement("p");
p.innerText = message.error;
log_storage.appendChild(p);
@ -865,19 +866,23 @@ const ask_gpt = async (message_id, message_index = -1, regenerate = false, provi
}
if (message_storage[message_id]) {
const message_provider = message_id in provider_storage ? provider_storage[message_id] : null;
let usage;
let usage = {};
if (usage_storage[message_id]) {
usage = usage_storage[message_id];
delete usage_storage[message_id];
}
// Calculate usage if we have no usage result jet
if (document.getElementById("track_usage").checked && !usage && window.GPTTokenizer_cl100k_base) {
usage = {
model: message_provider?.model,
provider: message_provider?.name,
...usage
}
// Calculate usage if we don't have it jet
if (document.getElementById("track_usage").checked && !usage.prompt_tokens && window.GPTTokenizer_cl100k_base) {
const prompt_token_model = model?.startsWith("gpt-3") ? "gpt-3.5-turbo" : "gpt-4"
const prompt_tokens = GPTTokenizer_cl100k_base?.encodeChat(messages, prompt_token_model).length;
const completion_tokens = count_tokens(message_provider?.model, message_storage[message_id]);
usage = {
model: message_provider?.model,
provider: message_provider?.name,
...usage,
prompt_tokens: prompt_tokens,
completion_tokens: completion_tokens,
total_tokens: prompt_tokens + completion_tokens
@ -1748,9 +1753,10 @@ function update_message(content_map, message_id, content = null, scroll = true)
content = content.substring(0, lastIndex) + '<span class="cursor"></span>' + lastElement;
}
}
content_map.inner.innerHTML = content;
if (error_storage[message_id]) {
content_map.inner.innerHTML += markdown_render(`**An error occured:** ${error_storage[message_id]}`);
content_map.inner.innerHTML = message + markdown_render(`**An error occured:** ${error_storage[message_id]}`);
} else {
content_map.inner.innerHTML = content;
}
content_map.count.innerText = count_words_and_tokens(message_storage[message_id], provider_storage[message_id]?.model);
highlight(content_map.inner);

View file

@ -12,9 +12,9 @@ from ...image import ImagePreview, ImageResponse, copy_images, ensure_images_dir
from ...tools.run_tools import iter_run_tools
from ...Provider import ProviderUtils, __providers__
from ...providers.base_provider import ProviderModelMixin
from ...providers.retry_provider import IterListProvider
from ...providers.retry_provider import BaseRetryProvider
from ...providers.response import BaseConversation, JsonConversation, FinishReason, Usage, Reasoning
from ...providers.response import SynthesizeData, TitleGeneration, RequestLogin, Parameters
from ...providers.response import SynthesizeData, TitleGeneration, RequestLogin, Parameters, ProviderInfo
from ... import version, models
from ... import ChatCompletion, get_model_and_provider
from ... import debug
@ -154,41 +154,32 @@ class Api:
)
except Exception as e:
logger.exception(e)
yield self._format_json('error', get_error_message(e))
yield self._format_json('error', type(e).__name__, message=get_error_message(e))
return
params = {
**(provider_handler.get_parameters(as_json=True) if hasattr(provider_handler, "get_parameters") else {}),
"model": model,
"messages": kwargs.get("messages"),
}
if isinstance(kwargs.get("conversation"), JsonConversation):
params["conversation"] = kwargs.get("conversation").get_dict()
else:
params["conversation_id"] = conversation_id
if kwargs.get("api_key") is not None:
params["api_key"] = kwargs["api_key"]
yield self._format_json("parameters", params)
first = True
if not isinstance(provider_handler, BaseRetryProvider):
yield self.handle_provider(provider_handler, model)
if hasattr(provider_handler, "get_parameters"):
yield self._format_json("parameters", provider_handler.get_parameters(as_json=True))
try:
result = iter_run_tools(ChatCompletion.create, **{**kwargs, "model": model, "provider": provider_handler})
for chunk in result:
if first:
first = False
yield self.handle_provider(provider_handler, model)
if isinstance(chunk, BaseConversation):
if isinstance(chunk, ProviderInfo):
yield self.handle_provider(chunk, model)
provider = chunk.name
elif isinstance(chunk, BaseConversation):
if provider is not None:
if provider not in conversations:
conversations[provider] = {}
conversations[provider][conversation_id] = chunk
if isinstance(chunk, JsonConversation):
yield self._format_json("conversation", {
provider.__name__ if isinstance(provider, type) else provider: chunk.get_dict()
provider: chunk.get_dict()
})
else:
yield self._format_json("conversation_id", conversation_id)
elif isinstance(chunk, Exception):
logger.exception(chunk)
yield self._format_json("message", get_error_message(chunk))
yield self._format_json('message', get_error_message(chunk), error=type(chunk).__name__)
elif isinstance(chunk, ImagePreview):
yield self._format_json("preview", chunk.to_string())
elif isinstance(chunk, ImageResponse):
@ -219,9 +210,11 @@ class Api:
debug.logs = []
except Exception as e:
logger.exception(e)
yield self._format_json('error', get_error_message(e))
if first:
yield self.handle_provider(provider_handler, model)
if debug.logs:
for log in debug.logs:
yield self._format_json("log", str(log))
debug.logs = []
yield self._format_json('error', type(e).__name__, message=get_error_message(e))
def _format_json(self, response_type: str, content = None, **kwargs):
if content is not None:
@ -235,11 +228,11 @@ class Api:
}
def handle_provider(self, provider_handler, model):
if isinstance(provider_handler, IterListProvider) and provider_handler.last_provider is not None:
if isinstance(provider_handler, BaseRetryProvider) and provider_handler.last_provider is not None:
provider_handler = provider_handler.last_provider
if not model and hasattr(provider_handler, "last_model") and provider_handler.last_model is not None:
model = provider_handler.last_model
return self._format_json("provider", {**provider_handler.get_dict(), "model": model})
if model:
return self._format_json("provider", {**provider_handler.get_dict(), "model": model})
return self._format_json("provider", provider_handler.get_dict())
def get_error_message(exception: Exception) -> str:
return f"{type(exception).__name__}: {exception}"

View file

@ -277,7 +277,8 @@ class Backend_Api(Api):
return Response(filter_markdown(response, do_filter_markdown), mimetype='text/plain')
def cast_str():
for chunk in response:
yield str(chunk)
if not isinstance(chunk, Exception):
yield str(chunk)
return Response(cast_str(), mimetype='text/plain')
except Exception as e:
logger.exception(e)

View file

@ -242,36 +242,48 @@ def ensure_images_dir():
async def copy_images(
images: list[str],
cookies: Optional[Cookies] = None,
proxy: Optional[str] = None
proxy: Optional[str] = None,
add_url: bool = True,
target: str = None,
ssl: bool = None
) -> list[str]:
if add_url:
add_url = not cookies
ensure_images_dir()
async with ClientSession(
connector=get_connector(proxy=proxy),
cookies=cookies
) as session:
async def copy_image(image: str) -> str:
target = os.path.join(images_dir, f"{int(time.time())}_{str(uuid.uuid4())}")
if image.startswith("data:"):
with open(target, "wb") as f:
f.write(extract_data_uri(image))
else:
try:
async with session.get(image) as response:
response.raise_for_status()
with open(target, "wb") as f:
async for chunk in response.content.iter_chunked(4096):
f.write(chunk)
except ClientError as e:
debug.log(f"copy_images failed: {e.__class__.__name__}: {e}")
return image
with open(target, "rb") as f:
extension = is_accepted_format(f.read(12)).split("/")[-1]
extension = "jpg" if extension == "jpeg" else extension
new_target = f"{target}.{extension}"
os.rename(target, new_target)
return f"/images/{os.path.basename(new_target)}"
async def copy_image(image: str, target: str = None) -> str:
if target is None or len(images) > 1:
target = os.path.join(images_dir, f"{int(time.time())}_{str(uuid.uuid4())}")
try:
if image.startswith("data:"):
with open(target, "wb") as f:
f.write(extract_data_uri(image))
else:
try:
async with session.get(image, ssl=ssl) as response:
response.raise_for_status()
with open(target, "wb") as f:
async for chunk in response.content.iter_chunked(4096):
f.write(chunk)
except ClientError as e:
debug.log(f"copy_images failed: {e.__class__.__name__}: {e}")
return image
if "." not in target:
with open(target, "rb") as f:
extension = is_accepted_format(f.read(12)).split("/")[-1]
extension = "jpg" if extension == "jpeg" else extension
new_target = f"{target}.{extension}"
os.rename(target, new_target)
target = new_target
finally:
if "." not in target and os.path.exists(target):
os.unlink(target)
return f"/images/{os.path.basename(target)}{'?url=' + image if add_url and not image.startswith('data:') else ''}"
return await asyncio.gather(*[copy_image(image) for image in images])
return await asyncio.gather(*[copy_image(image, target) for image in images])
class ImageDataResponse():
def __init__(

View file

@ -11,7 +11,6 @@ from .Provider import (
Blackbox,
CablyAI,
ChatGLM,
ChatGpt,
ChatGptEs,
ChatGptt,
Cloudflare,
@ -120,7 +119,7 @@ default_vision = Model(
gpt_35_turbo = Model(
name = 'gpt-3.5-turbo',
base_provider = 'OpenAI',
best_provider = IterListProvider([DarkAI, ChatGpt])
best_provider = IterListProvider([DarkAI])
)
# gpt-4
@ -134,13 +133,13 @@ gpt_4 = Model(
gpt_4o = VisionModel(
name = 'gpt-4o',
base_provider = 'OpenAI',
best_provider = IterListProvider([Blackbox, ChatGptt, Jmuz, ChatGptEs, PollinationsAI, DarkAI, Copilot, ChatGpt, Liaobots, OpenaiChat])
best_provider = IterListProvider([Blackbox, ChatGptt, Jmuz, ChatGptEs, PollinationsAI, DarkAI, Copilot, Liaobots, OpenaiChat])
)
gpt_4o_mini = Model(
name = 'gpt-4o-mini',
base_provider = 'OpenAI',
best_provider = IterListProvider([DDG, ChatGptEs, ChatGptt, Jmuz, PollinationsAI, OIVSCode, ChatGpt, Liaobots, OpenaiChat])
best_provider = IterListProvider([DDG, ChatGptEs, ChatGptt, Jmuz, PollinationsAI, OIVSCode, Liaobots, OpenaiChat])
)
# o1
@ -440,14 +439,18 @@ qwen_2_5_coder_32b = Model(
base_provider = 'Qwen',
best_provider = IterListProvider([DeepInfraChat, PollinationsAI, AutonomousAI, Jmuz, HuggingChat])
)
qwen_2_5_1m = Model(
name = 'qwen-2.5-1m-demo',
base_provider = 'Qwen',
best_provider = HuggingSpace
)
# qwq/qvq
### qwq/qvq ###
qwq_32b = Model(
name = 'qwq-32b',
base_provider = 'Qwen',
best_provider = IterListProvider([Blackbox, DeepInfraChat, Jmuz, HuggingChat])
)
qvq_72b = VisionModel(
name = 'qvq-72b',
base_provider = 'Qwen',
@ -467,7 +470,6 @@ deepseek_chat = Model(
base_provider = 'DeepSeek',
best_provider = IterListProvider([Blackbox, DeepInfraChat, Jmuz, PollinationsAI])
)
deepseek_r1 = Model(
name = 'deepseek-r1',
base_provider = 'DeepSeek',
@ -721,6 +723,7 @@ class ModelUtils:
qwen_2_vl_7b.name: qwen_2_vl_7b,
qwen_2_5_72b.name: qwen_2_5_72b,
qwen_2_5_coder_32b.name: qwen_2_5_coder_32b,
qwen_2_5_1m.name: qwen_2_5_1m,
# qwq/qvq
qwq_32b.name: qwq_32b,

View file

@ -379,7 +379,7 @@ class RaiseErrorMixin():
raise ResponseError(data["error"]["message"])
else:
raise ResponseError(data["error"])
elif "choices" not in data or not data["choices"]:
elif ("choices" not in data or not data["choices"]) and "data" not in data:
raise ResponseError(f"Invalid response: {json.dumps(data)}")
class AsyncAuthedProvider(AsyncGeneratorProvider):

View file

@ -200,3 +200,7 @@ class ImagePreview(ImageResponse):
class Parameters(ResponseType, JsonMixin):
def __str__(self):
return ""
class ProviderInfo(ResponseType, JsonMixin):
def __str__(self):
return ""

View file

@ -4,7 +4,7 @@ import random
from ..typing import Type, List, CreateResult, Messages, AsyncResult
from .types import BaseProvider, BaseRetryProvider, ProviderType
from .response import ImageResponse
from .response import ImageResponse, ProviderInfo
from .. import debug
from ..errors import RetryProviderError, RetryNoProviderError
@ -53,6 +53,7 @@ class IterListProvider(BaseRetryProvider):
for provider in self.get_providers(stream and not ignore_stream, ignored):
self.last_provider = provider
debug.log(f"Using {provider.__name__} provider")
yield ProviderInfo(**provider.get_dict(), model=model if model else getattr(provider, "default_model"))
try:
response = provider.get_create_function()(model, messages, stream=stream, **kwargs)
for chunk in response:
@ -67,6 +68,7 @@ class IterListProvider(BaseRetryProvider):
debug.log(f"{provider.__name__}: {e.__class__.__name__}: {e}")
if started:
raise e
yield e
raise_exceptions(exceptions)
@ -85,6 +87,7 @@ class IterListProvider(BaseRetryProvider):
for provider in self.get_providers(stream and not ignore_stream, ignored):
self.last_provider = provider
debug.log(f"Using {provider.__name__} provider")
yield ProviderInfo(provider.get_dict())
try:
response = provider.get_async_create_function()(model, messages, stream=stream, **kwargs)
if hasattr(response, "__aiter__"):
@ -105,6 +108,7 @@ class IterListProvider(BaseRetryProvider):
debug.log(f"{provider.__name__}: {e.__class__.__name__}: {e}")
if started:
raise e
yield e
raise_exceptions(exceptions)

View file

@ -71,12 +71,12 @@ class StreamSession(AsyncSession):
"""
def request(
self, method: str, url: str, **kwargs
self, method: str, url: str, ssl = None, **kwargs
) -> StreamResponse:
if isinstance(kwargs.get("data"), CurlMime):
kwargs["multipart"] = kwargs.pop("data")
"""Create and return a StreamResponse object for the given HTTP request."""
return StreamResponse(super().request(method, url, stream=True, **kwargs))
return StreamResponse(super().request(method, url, stream=True, verify=ssl, **kwargs))
def ws_connect(self, url, *args, **kwargs):
return WebSocket(self, url, **kwargs)

View file

@ -25,7 +25,8 @@ async def raise_for_status_async(response: Union[StreamResponse, ClientResponse]
return
text = await response.text()
if message is None:
message = "HTML content" if response.headers.get("content-type", "").startswith("text/html") else text
is_html = response.headers.get("content-type", "").startswith("text/html") or text.startswith("<!DOCTYPE")
message = "HTML content" if is_html else text
if message == "HTML content":
if response.status == 520:
message = "Unknown error (Cloudflare)"
@ -46,7 +47,8 @@ def raise_for_status(response: Union[Response, StreamResponse, ClientResponse, R
if response.ok:
return
if message is None:
message = "HTML content" if response.headers.get("content-type", "").startswith("text/html") else response.text
is_html = response.headers.get("content-type", "").startswith("text/html") or response.text.startswith("<!DOCTYPE")
message = "HTML content" if is_html else response.text
if message == "HTML content":
if response.status_code == 520:
message = "Unknown error (Cloudflare)"

View file

@ -15,3 +15,4 @@ beautifulsoup4
aiohttp_socks
cryptography
python-multipart
pypdf2

View file

@ -58,7 +58,6 @@ EXTRA_REQUIRE = {
"uvicorn", # api
"python-multipart",
"pypdf2", # files
"docx",
],
"image": [
"pillow",