This commit is contained in:
UrloMythus
2026-02-19 20:15:03 +01:00
parent 7785e8c604
commit cfc6bbabc9
181 changed files with 32141 additions and 4629 deletions

View File

@@ -1,25 +1,33 @@
import asyncio
import logging
import typing
from dataclasses import dataclass
from functools import partial
from urllib import parse
from urllib.parse import urlencode, urlparse
from urllib.parse import urlencode
import aiohttp
from aiohttp import ClientSession, ClientTimeout, ClientResponse
import anyio
import h11
import httpx
import tenacity
from fastapi import Response
from starlette.background import BackgroundTask
from starlette.concurrency import iterate_in_threadpool
from starlette.requests import Request
from starlette.types import Receive, Send, Scope
from tenacity import retry, stop_after_attempt, wait_exponential, retry_if_exception_type
from tenacity import retry, stop_after_attempt, wait_exponential
from tqdm.asyncio import tqdm as tqdm_asyncio
from mediaflow_proxy.configs import settings
from mediaflow_proxy.const import SUPPORTED_REQUEST_HEADERS
from mediaflow_proxy.utils.crypto_utils import EncryptionHandler
from mediaflow_proxy.utils.stream_transformers import StreamTransformer
from mediaflow_proxy.utils.http_client import (
create_aiohttp_session,
get_routing_config,
_ensure_routing_initialized,
_create_connector,
)
logger = logging.getLogger(__name__)
@@ -31,218 +39,279 @@ class DownloadError(Exception):
super().__init__(message)
def create_httpx_client(follow_redirects: bool = True, **kwargs) -> httpx.AsyncClient:
"""Creates an HTTPX client with configured proxy routing"""
mounts = settings.transport_config.get_mounts()
kwargs.setdefault("timeout", settings.transport_config.timeout)
client = httpx.AsyncClient(mounts=mounts, follow_redirects=follow_redirects, **kwargs)
return client
def retry_if_download_error_not_404(retry_state):
"""Retry on DownloadError except for 404 errors."""
if retry_state.outcome.failed:
exception = retry_state.outcome.exception()
if isinstance(exception, DownloadError):
return exception.status_code != 404
return False
@retry(
stop=stop_after_attempt(3),
wait=wait_exponential(multiplier=1, min=4, max=10),
retry=retry_if_exception_type(DownloadError),
retry=retry_if_download_error_not_404,
)
async def fetch_with_retry(client, method, url, headers, follow_redirects=True, **kwargs):
async def fetch_with_retry(
session: ClientSession,
method: str,
url: str,
headers: dict,
proxy: typing.Optional[str] = None,
**kwargs,
) -> ClientResponse:
"""
Fetches a URL with retry logic.
Fetches a URL with retry logic using native aiohttp.
Args:
client (httpx.AsyncClient): The HTTP client to use for the request.
method (str): The HTTP method to use (e.g., GET, POST).
url (str): The URL to fetch.
headers (dict): The headers to include in the request.
follow_redirects (bool, optional): Whether to follow redirects. Defaults to True.
session: The aiohttp ClientSession to use for the request.
method: The HTTP method to use (e.g., GET, POST).
url: The URL to fetch.
headers: The headers to include in the request.
proxy: Optional proxy URL for HTTP proxies.
**kwargs: Additional arguments to pass to the request.
Returns:
httpx.Response: The HTTP response.
ClientResponse: The HTTP response.
Raises:
DownloadError: If the request fails after retries.
"""
try:
response = await client.request(method, url, headers=headers, follow_redirects=follow_redirects, **kwargs)
response = await session.request(method, url, headers=headers, proxy=proxy, **kwargs)
response.raise_for_status()
return response
except httpx.TimeoutException:
except asyncio.TimeoutError:
logger.warning(f"Timeout while downloading {url}")
raise DownloadError(409, f"Timeout while downloading {url}")
except httpx.HTTPStatusError as e:
logger.error(f"HTTP error {e.response.status_code} while downloading {url}")
if e.response.status_code == 404:
logger.error(f"Segment Resource not found: {url}")
raise e
raise DownloadError(e.response.status_code, f"HTTP error {e.response.status_code} while downloading {url}")
except aiohttp.ClientResponseError as e:
if e.status == 404:
logger.debug(f"Segment not found (404): {url}")
raise DownloadError(404, f"Not found (404): {url}")
logger.error(f"HTTP error {e.status} while downloading {url}")
raise DownloadError(e.status, f"HTTP error {e.status} while downloading {url}")
except aiohttp.ClientError as e:
logger.error(f"Client error downloading {url}: {e}")
raise DownloadError(502, f"Client error downloading {url}: {e}")
except Exception as e:
logger.error(f"Error downloading {url}: {e}")
raise
class Streamer:
# PNG signature and IEND marker for fake PNG header detection (StreamWish/FileMoon)
_PNG_SIGNATURE = b"\x89PNG\r\n\x1a\n"
_PNG_IEND_MARKER = b"\x49\x45\x4E\x44\xAE\x42\x60\x82"
"""Handles streaming HTTP responses using aiohttp."""
def __init__(self, client):
def __init__(self, session: ClientSession, proxy_url: typing.Optional[str] = None):
"""
Initializes the Streamer with an HTTP client.
Initializes the Streamer with an aiohttp session.
Args:
client (httpx.AsyncClient): The HTTP client to use for streaming.
session: The aiohttp ClientSession to use for streaming.
proxy_url: Optional proxy URL for HTTP proxies.
"""
self.client = client
self.response = None
self.session = session
self.proxy_url = proxy_url
self.response: typing.Optional[ClientResponse] = None
self.progress_bar = None
self.bytes_transferred = 0
self.start_byte = 0
self.end_byte = 0
self.total_size = 0
# Store request details for potential retry during streaming
self._current_url: typing.Optional[str] = None
self._current_headers: typing.Optional[dict] = None
@retry(
stop=stop_after_attempt(3),
wait=wait_exponential(multiplier=1, min=4, max=10),
retry=retry_if_exception_type(DownloadError),
retry=retry_if_download_error_not_404,
)
async def create_streaming_response(self, url: str, headers: dict):
async def create_streaming_response(self, url: str, headers: dict, method: str = "GET"):
"""
Creates and sends a streaming request.
Args:
url (str): The URL to stream from.
headers (dict): The headers to include in the request.
url: The URL to stream from.
headers: The headers to include in the request.
method: HTTP method to use (GET or HEAD). Defaults to GET.
For HEAD requests, will fallback to GET if server doesn't support HEAD.
"""
# Store request details for potential retry during streaming
self._current_url = url
self._current_headers = headers.copy()
try:
request = self.client.build_request("GET", url, headers=headers)
self.response = await self.client.send(request, stream=True, follow_redirects=True)
self.response.raise_for_status()
except httpx.TimeoutException:
if method.upper() == "HEAD":
# Try HEAD first, fallback to GET if server doesn't support it
try:
self.response = await self.session.head(url, headers=headers, proxy=self.proxy_url)
self.response.raise_for_status()
except (aiohttp.ClientResponseError, aiohttp.ClientError) as head_error:
# HEAD failed, fallback to GET (some servers don't support HEAD)
logger.debug(f"HEAD request failed ({head_error}), falling back to GET")
self.response = await self.session.get(url, headers=headers, proxy=self.proxy_url)
self.response.raise_for_status()
else:
self.response = await self.session.get(url, headers=headers, proxy=self.proxy_url)
self.response.raise_for_status()
except asyncio.TimeoutError:
logger.warning("Timeout while creating streaming response")
raise DownloadError(409, "Timeout while creating streaming response")
except httpx.HTTPStatusError as e:
logger.error(f"HTTP error {e.response.status_code} while creating streaming response")
if e.response.status_code == 404:
logger.error(f"Segment Resource not found: {url}")
raise e
raise DownloadError(
e.response.status_code, f"HTTP error {e.response.status_code} while creating streaming response"
)
except httpx.RequestError as e:
except aiohttp.ClientResponseError as e:
if e.status == 404:
logger.debug(f"Segment not found (404): {url}")
raise DownloadError(404, f"Not found (404): {url}")
# Don't retry rate-limit errors (429, 509) - retrying while other connections
# are still active just wastes time. Let the player handle its own retry logic.
if e.status in (429, 509):
logger.warning(f"Rate limited ({e.status}) by upstream: {url}")
raise aiohttp.ClientResponseError(e.request_info, e.history, status=e.status, message=e.message)
logger.error(f"HTTP error {e.status} while creating streaming response")
raise DownloadError(e.status, f"HTTP error {e.status} while creating streaming response")
except aiohttp.ClientError as e:
logger.error(f"Error creating streaming response: {e}")
raise DownloadError(502, f"Error creating streaming response: {e}")
except Exception as e:
logger.error(f"Error creating streaming response: {e}")
raise RuntimeError(f"Error creating streaming response: {e}")
@staticmethod
def _strip_fake_png_wrapper(chunk: bytes) -> bytes:
async def _retry_connection(self, from_byte: int) -> bool:
"""
Strip fake PNG wrapper from chunk data.
Some streaming services (StreamWish, FileMoon) prepend a fake PNG image
to video data to evade detection. This method detects and removes it.
Attempt to reconnect to the upstream using Range header.
Args:
chunk: The raw chunk data that may contain a fake PNG header.
from_byte: The byte position to resume from.
Returns:
The chunk with fake PNG wrapper removed, or original chunk if not present.
bool: True if reconnection was successful, False otherwise.
"""
if not chunk.startswith(Streamer._PNG_SIGNATURE):
return chunk
if not self._current_url or not self._current_headers:
return False
# Find the IEND marker that signals end of PNG data
iend_pos = chunk.find(Streamer._PNG_IEND_MARKER)
if iend_pos == -1:
# IEND not found in this chunk - return as-is to avoid data corruption
logger.debug("PNG signature detected but IEND marker not found in chunk")
return chunk
# Close existing response if any
if self.response:
self.response.close()
self.response = None
# Calculate position after IEND marker
content_start = iend_pos + len(Streamer._PNG_IEND_MARKER)
# Create new headers with Range
retry_headers = self._current_headers.copy()
if self.total_size > 0:
retry_headers["Range"] = f"bytes={from_byte}-{self.total_size - 1}"
else:
retry_headers["Range"] = f"bytes={from_byte}-"
# Skip any padding bytes (null or 0xFF) between PNG and actual content
while content_start < len(chunk) and chunk[content_start] in (0x00, 0xFF):
content_start += 1
try:
self.response = await self.session.get(self._current_url, headers=retry_headers, proxy=self.proxy_url)
# Accept both 200 and 206 (Partial Content) as valid responses
if self.response.status in (200, 206):
logger.info(f"Successfully reconnected at byte {from_byte}")
return True
else:
logger.warning(f"Retry connection returned unexpected status: {self.response.status}")
return False
except Exception as e:
logger.warning(f"Failed to reconnect: {e}")
return False
stripped_bytes = content_start
logger.debug(f"Stripped {stripped_bytes} bytes of fake PNG wrapper from stream")
async def stream_content(
self, transformer: typing.Optional[StreamTransformer] = None
) -> typing.AsyncGenerator[bytes, None]:
"""
Stream content from the response, optionally applying a transformer.
return chunk[content_start:]
Includes automatic retry logic when upstream disconnects mid-stream,
using Range headers to resume from the last successful byte.
async def stream_content(self) -> typing.AsyncGenerator[bytes, None]:
Args:
transformer: Optional StreamTransformer to apply host-specific
content manipulation (e.g., PNG stripping, TS detection).
If None, content is streamed directly without modification.
Yields:
Bytes chunks from the upstream response.
"""
if not self.response:
raise RuntimeError("No response available for streaming")
is_first_chunk = True
retry_count = 0
max_retries = settings.upstream_retry_attempts if settings.upstream_retry_on_disconnect else 0
try:
self.parse_content_range()
while True:
try:
self.parse_content_range()
if settings.enable_streaming_progress:
with tqdm_asyncio(
total=self.total_size,
initial=self.start_byte,
unit="B",
unit_scale=True,
unit_divisor=1024,
desc="Streaming",
ncols=100,
mininterval=1,
) as self.progress_bar:
async for chunk in self.response.aiter_bytes():
if is_first_chunk:
is_first_chunk = False
chunk = self._strip_fake_png_wrapper(chunk)
# Create async generator from response content
async def raw_chunks():
async for chunk in self.response.content.iter_any():
yield chunk
# Choose the chunk source based on whether we have a transformer
# Note: Transformer state may not survive reconnection properly for all transformers
if transformer and retry_count == 0:
chunk_source = transformer.transform(raw_chunks())
else:
chunk_source = raw_chunks()
if settings.enable_streaming_progress:
with tqdm_asyncio(
total=self.total_size,
initial=self.start_byte,
unit="B",
unit_scale=True,
unit_divisor=1024,
desc="Streaming",
ncols=100,
mininterval=1,
) as self.progress_bar:
async for chunk in chunk_source:
yield chunk
self.bytes_transferred += len(chunk)
self.progress_bar.update(len(chunk))
else:
async for chunk in chunk_source:
yield chunk
self.bytes_transferred += len(chunk)
self.progress_bar.update(len(chunk))
else:
async for chunk in self.response.aiter_bytes():
if is_first_chunk:
is_first_chunk = False
chunk = self._strip_fake_png_wrapper(chunk)
yield chunk
self.bytes_transferred += len(chunk)
# Successfully completed streaming
return
except httpx.TimeoutException:
logger.warning("Timeout while streaming")
raise DownloadError(409, "Timeout while streaming")
except httpx.RemoteProtocolError as e:
# Special handling for connection closed errors
if "peer closed connection without sending complete message body" in str(e):
logger.warning(f"Remote server closed connection prematurely: {e}")
# If we've received some data, just log the warning and return normally
except asyncio.TimeoutError:
logger.warning("Timeout while streaming")
raise DownloadError(409, "Timeout while streaming")
except (aiohttp.ServerDisconnectedError, aiohttp.ClientPayloadError, aiohttp.ClientError) as e:
# Handle connection errors with potential retry
error_type = type(e).__name__
logger.warning(f"{error_type} while streaming after {self.bytes_transferred} bytes: {e}")
# Check if we should retry
if retry_count < max_retries and self.bytes_transferred > 0:
retry_count += 1
resume_from = self.start_byte + self.bytes_transferred
logger.info(f"Attempting reconnection (retry {retry_count}/{max_retries}) from byte {resume_from}")
# Wait before retry
await asyncio.sleep(settings.upstream_retry_delay)
if await self._retry_connection(resume_from):
# Successfully reconnected, continue the loop to resume streaming
continue
else:
logger.warning(f"Reconnection failed on retry {retry_count}")
# No more retries or reconnection failed
if self.bytes_transferred > 0:
logger.info(
f"Partial content received ({self.bytes_transferred} bytes). Continuing with available data."
f"Partial content received ({self.bytes_transferred} bytes). "
f"Graceful termination after {retry_count} retry attempts."
)
return
else:
# If we haven't received any data, raise an error
raise DownloadError(502, f"Remote server closed connection without sending any data: {e}")
else:
logger.error(f"Protocol error while streaming: {e}")
raise DownloadError(502, f"Protocol error while streaming: {e}")
except GeneratorExit:
logger.info("Streaming session stopped by the user")
except httpx.ReadError as e:
# Handle network read errors gracefully - these occur when upstream connection drops
logger.warning(f"ReadError while streaming: {e}")
if self.bytes_transferred > 0:
logger.info(f"Partial content received ({self.bytes_transferred} bytes) before ReadError. Graceful termination.")
raise DownloadError(502, f"{error_type} while streaming: {e}")
except GeneratorExit:
logger.info("Streaming session stopped by the user")
return
else:
raise DownloadError(502, f"ReadError while streaming: {e}")
except Exception as e:
logger.error(f"Error streaming content: {e}")
raise
@staticmethod
def format_bytes(size) -> str:
power = 2**10
@@ -263,41 +332,41 @@ class Streamer:
self.total_size = int(self.response.headers.get("Content-Length", 0))
self.end_byte = self.total_size - 1 if self.total_size > 0 else 0
async def get_text(self, url: str, headers: dict):
async def get_text(self, url: str, headers: dict) -> str:
"""
Sends a GET request to a URL and returns the response text.
Args:
url (str): The URL to send the GET request to.
headers (dict): The headers to include in the request.
url: The URL to send the GET request to.
headers: The headers to include in the request.
Returns:
str: The response text.
"""
try:
self.response = await fetch_with_retry(self.client, "GET", url, headers)
self.response = await fetch_with_retry(self.session, "GET", url, headers, proxy=self.proxy_url)
return await self.response.text()
except tenacity.RetryError as e:
raise e.last_attempt.result()
return self.response.text
async def close(self):
"""
Closes the HTTP client and response.
Closes the HTTP response and session.
"""
if self.response:
await self.response.aclose()
self.response.close()
if self.progress_bar:
self.progress_bar.close()
await self.client.aclose()
await self.session.close()
async def download_file_with_retry(url: str, headers: dict):
async def download_file_with_retry(url: str, headers: dict) -> bytes:
"""
Downloads a file with retry logic.
Args:
url (str): The URL of the file to download.
headers (dict): The headers to include in the request.
url: The URL of the file to download.
headers: The headers to include in the request.
Returns:
bytes: The downloaded file content.
@@ -305,10 +374,10 @@ async def download_file_with_retry(url: str, headers: dict):
Raises:
DownloadError: If the download fails after retries.
"""
async with create_httpx_client() as client:
async with create_aiohttp_session(url) as (session, proxy_url):
try:
response = await fetch_with_retry(client, "GET", url, headers)
return response.content
response = await fetch_with_retry(session, "GET", url, headers, proxy=proxy_url)
return await response.read()
except DownloadError as e:
logger.error(f"Failed to download file: {e}")
raise e
@@ -316,31 +385,85 @@ async def download_file_with_retry(url: str, headers: dict):
raise DownloadError(502, f"Failed to download file: {e.last_attempt.result()}")
async def request_with_retry(method: str, url: str, headers: dict, **kwargs) -> httpx.Response:
async def request_with_retry(method: str, url: str, headers: dict, **kwargs) -> ClientResponse:
"""
Sends an HTTP request with retry logic.
Args:
method (str): The HTTP method to use (e.g., GET, POST).
url (str): The URL to send the request to.
headers (dict): The headers to include in the request.
method: The HTTP method to use (e.g., GET, POST).
url: The URL to send the request to.
headers: The headers to include in the request.
**kwargs: Additional arguments to pass to the request.
Returns:
httpx.Response: The HTTP response.
ClientResponse: The HTTP response.
Raises:
DownloadError: If the request fails after retries.
"""
async with create_httpx_client() as client:
async with create_aiohttp_session(url) as (session, proxy_url):
try:
response = await fetch_with_retry(client, method, url, headers, **kwargs)
response = await fetch_with_retry(session, method, url, headers, proxy=proxy_url, **kwargs)
# Read the content so it's available after session closes
await response.read()
return response
except DownloadError as e:
logger.error(f"Failed to download file: {e}")
logger.error(f"Failed to make request: {e}")
raise
async def create_streamer(url: str = None) -> Streamer:
"""
Create a Streamer configured for the given URL.
The Streamer manages its own session lifecycle. Call streamer.close()
when done to release resources.
Args:
url: Optional URL for routing configuration (SSL/proxy settings).
Returns:
Streamer: A configured Streamer instance.
"""
_ensure_routing_initialized()
routing_config = get_routing_config()
route_match = routing_config.match_url(url)
# Use sock_read timeout: no total timeout, but timeout if no data received
# for sock_read seconds. This correctly handles:
# - Live streams (indefinite duration)
# - Large file downloads (total time depends on file size)
# - Seek operations (upstream may take time to seek)
# - Dead connection detection (timeout if no data flows)
timeout_config = ClientTimeout(
total=None,
sock_read=settings.transport_config.timeout,
)
connector, proxy_url = _create_connector(route_match.proxy_url, route_match.verify_ssl)
session = ClientSession(connector=connector, timeout=timeout_config)
return Streamer(session, proxy_url)
# Keep setup_streamer as alias for backward compatibility during transition
async def setup_streamer(url: str = None) -> typing.Tuple[ClientSession, str, Streamer]:
"""
Set up an aiohttp session and streamer.
DEPRECATED: Use create_streamer() instead which returns only the Streamer.
Args:
url: Optional URL for routing configuration.
Returns:
Tuple of (session, proxy_url, streamer)
"""
streamer = await create_streamer(url)
return streamer.session, streamer.proxy_url, streamer
def encode_mediaflow_proxy_url(
mediaflow_proxy_url: str,
endpoint: typing.Optional[str] = None,
@@ -348,25 +471,31 @@ def encode_mediaflow_proxy_url(
query_params: typing.Optional[dict] = None,
request_headers: typing.Optional[dict] = None,
response_headers: typing.Optional[dict] = None,
propagate_response_headers: typing.Optional[dict] = None,
remove_response_headers: typing.Optional[list[str]] = None,
encryption_handler: EncryptionHandler = None,
expiration: int = None,
ip: str = None,
filename: typing.Optional[str] = None,
stream_transformer: typing.Optional[str] = None,
) -> str:
"""
Encodes & Encrypt (Optional) a MediaFlow proxy URL with query parameters and headers.
Args:
mediaflow_proxy_url (str): The base MediaFlow proxy URL.
endpoint (str, optional): The endpoint to append to the base URL. Defaults to None.
destination_url (str, optional): The destination URL to include in the query parameters. Defaults to None.
query_params (dict, optional): Additional query parameters to include. Defaults to None.
request_headers (dict, optional): Headers to include as query parameters. Defaults to None.
response_headers (dict, optional): Headers to include as query parameters. Defaults to None.
encryption_handler (EncryptionHandler, optional): The encryption handler to use. Defaults to None.
expiration (int, optional): The expiration time for the encrypted token. Defaults to None.
ip (str, optional): The public IP address to include in the query parameters. Defaults to None.
filename (str, optional): Filename to be preserved for media players like Infuse. Defaults to None.
mediaflow_proxy_url: The base MediaFlow proxy URL.
endpoint: The endpoint to append to the base URL. Defaults to None.
destination_url: The destination URL to include in the query parameters. Defaults to None.
query_params: Additional query parameters to include. Defaults to None.
request_headers: Headers to include as query parameters. Defaults to None.
response_headers: Headers to include as query parameters (r_ prefix). Defaults to None.
propagate_response_headers: Response headers that propagate to segments (rp_ prefix). Defaults to None.
remove_response_headers: List of response header names to remove. Defaults to None.
encryption_handler: The encryption handler to use. Defaults to None.
expiration: The expiration time for the encrypted token. Defaults to None.
ip: The public IP address to include in the query parameters. Defaults to None.
filename: Filename to be preserved for media players like Infuse. Defaults to None.
stream_transformer: ID of the stream transformer to apply. Defaults to None.
Returns:
str: The encoded MediaFlow proxy URL.
@@ -376,15 +505,45 @@ def encode_mediaflow_proxy_url(
if destination_url is not None:
query_params["d"] = destination_url
# Add headers if provided
# Add headers if provided (always use lowercase prefix for consistency)
# Filter out empty values to avoid URLs like &h_if-range=&h_referer=...
# Also exclude dynamic per-request headers (range, if-range) that are already handled
# via SUPPORTED_REQUEST_HEADERS from the player's actual request. Encoding them as h_
# query params would bake in stale values that override the player's real headers on
# subsequent requests (e.g., when seeking to a different position).
if request_headers:
query_params.update(
{key if key.startswith("h_") else f"h_{key}": value for key, value in request_headers.items()}
{
key if key.lower().startswith("h_") else f"h_{key}": value
for key, value in request_headers.items()
if value and (key.lower().removeprefix("h_") not in SUPPORTED_REQUEST_HEADERS)
}
)
if response_headers:
query_params.update(
{key if key.startswith("r_") else f"r_{key}": value for key, value in response_headers.items()}
{
key if key.lower().startswith("r_") else f"r_{key}": value
for key, value in response_headers.items()
if value # Skip empty/None values
}
)
# Add propagate response headers (rp_ prefix - these propagate to segments)
if propagate_response_headers:
query_params.update(
{
key if key.lower().startswith("rp_") else f"rp_{key}": value
for key, value in propagate_response_headers.items()
if value # Skip empty/None values
}
)
# Add remove headers if provided (x_ prefix for "exclude")
if remove_response_headers:
query_params["x_headers"] = ",".join(remove_response_headers)
# Add stream transformer if provided
if stream_transformer:
query_params["transformer"] = stream_transformer
# Construct the base URL
if endpoint is None:
@@ -441,10 +600,10 @@ def encode_stremio_proxy_url(
Format: http://127.0.0.1:11470/proxy/d=<encoded_origin>&h=<headers>&r=<response_headers>/<path><query>
Args:
stremio_proxy_url (str): The base Stremio proxy URL.
destination_url (str): The destination URL to proxy.
request_headers (dict, optional): Headers to include as query parameters. Defaults to None.
response_headers (dict, optional): Response headers to include as query parameters. Defaults to None.
stremio_proxy_url: The base Stremio proxy URL.
destination_url: The destination URL to proxy.
request_headers: Headers to include as query parameters. Defaults to None.
response_headers: Response headers to include as query parameters. Defaults to None.
Returns:
str: The encoded Stremio proxy URL.
@@ -498,7 +657,7 @@ def get_original_scheme(request: Request) -> str:
Determines the original scheme (http or https) of the request.
Args:
request (Request): The incoming HTTP request.
request: The incoming HTTP request.
Returns:
str: The original scheme ('http' or 'https')
@@ -528,6 +687,35 @@ def get_original_scheme(request: Request) -> str:
class ProxyRequestHeaders:
request: dict
response: dict
remove: list # headers to remove from response
propagate: dict # response headers to propagate to segments (rp_ prefix)
def apply_header_manipulation(
base_headers: dict, proxy_headers: ProxyRequestHeaders, include_propagate: bool = True
) -> dict:
"""
Apply response header additions and removals.
This function filters out headers specified in proxy_headers.remove,
then merges in headers from proxy_headers.response and optionally proxy_headers.propagate.
Args:
base_headers: The base headers to start with.
proxy_headers: The proxy headers containing response additions and removals.
include_propagate: Whether to include propagate headers (rp_).
Set to False for manifests, True for segments. Defaults to True.
Returns:
dict: The manipulated headers.
"""
remove_set = set(h.lower() for h in proxy_headers.remove)
result = {k: v for k, v in base_headers.items() if k.lower() not in remove_set}
# Apply propagate headers first (for segments), then response headers (response takes precedence)
if include_propagate:
result.update(proxy_headers.propagate)
result.update(proxy_headers.response)
return result
def get_proxy_headers(request: Request) -> ProxyRequestHeaders:
@@ -535,32 +723,42 @@ def get_proxy_headers(request: Request) -> ProxyRequestHeaders:
Extracts proxy headers from the request query parameters.
Args:
request (Request): The incoming HTTP request.
request: The incoming HTTP request.
Returns:
ProxyRequest: A named tuple containing the request headers and response headers.
ProxyRequest: A named tuple containing the request headers, response headers, and headers to remove.
"""
request_headers = {k: v for k, v in request.headers.items() if k in SUPPORTED_REQUEST_HEADERS}
request_headers.update({k[2:].lower(): v for k, v in request.query_params.items() if k.startswith("h_")})
request_headers = {k: v for k, v in request.headers.items() if k in SUPPORTED_REQUEST_HEADERS and v}
# Extract h_ prefixed headers from query params, filtering out empty values
for k, v in request.query_params.items():
if k.lower().startswith("h_") and v: # Skip empty values
request_headers[k[2:].lower()] = v
request_headers.setdefault("user-agent", settings.user_agent)
# Handle common misspelling of referer
if "referrer" in request_headers:
if "referer" not in request_headers:
request_headers["referer"] = request_headers.pop("referrer")
dest = request.query_params.get("d", "")
host = urlparse(dest).netloc.lower()
if "vidoza" in host or "videzz" in host:
# Remove ALL empty headers
for h in list(request_headers.keys()):
v = request_headers[h]
if v is None or v.strip() == "":
request_headers.pop(h, None)
response_headers = {k[2:].lower(): v for k, v in request.query_params.items() if k.startswith("r_")}
return ProxyRequestHeaders(request_headers, response_headers)
# r_ prefix: response headers (manifest only, not propagated to segments)
# Filter out empty values
response_headers = {
k[2:].lower(): v
for k, v in request.query_params.items()
if k.lower().startswith("r_") and not k.lower().startswith("rp_") and v
}
# rp_ prefix: response headers that propagate to segments
# Filter out empty values
propagate_headers = {k[3:].lower(): v for k, v in request.query_params.items() if k.lower().startswith("rp_") and v}
# Parse headers to remove from response (x_headers parameter)
x_headers_param = request.query_params.get("x_headers", "")
remove_headers = [h.strip().lower() for h in x_headers_param.split(",") if h.strip()] if x_headers_param else []
return ProxyRequestHeaders(request_headers, response_headers, remove_headers, propagate_headers)
class EnhancedStreamingResponse(Response):
@@ -632,19 +830,19 @@ class EnhancedStreamingResponse(Response):
# Successfully streamed all content
await send({"type": "http.response.body", "body": b"", "more_body": False})
finalization_sent = True
except (httpx.RemoteProtocolError, httpx.ReadError, h11._util.LocalProtocolError) as e:
except (aiohttp.ServerDisconnectedError, aiohttp.ClientPayloadError, aiohttp.ClientError) as e:
# Handle connection closed / read errors gracefully
if data_sent:
# We've sent some data to the client, so try to complete the response
logger.warning(f"Upstream connection error after partial streaming: {e}")
try:
await send({"type": "http.response.body", "body": b"", "more_body": False})
finalization_sent = True
logger.info(
f"Response finalized after partial content ({self.actual_content_length} bytes transferred)"
)
except Exception as close_err:
logger.warning(f"Could not finalize response after upstream error: {close_err}")
# We've sent some data to the client. With Content-Length set, we cannot
# gracefully finalize a partial response - h11 will raise LocalProtocolError
# if we try to send more_body: False without delivering all promised bytes.
# The best we can do is log and return silently, letting the client handle
# the incomplete response (most players will just stop or retry).
logger.warning(
f"Upstream connection error after partial streaming ({self.actual_content_length} bytes transferred): {e}"
)
# Don't try to finalize - just return and let the connection close naturally
return
else:
# No data was sent, re-raise the error
logger.error(f"Upstream error before any data was streamed: {e}")
@@ -667,13 +865,16 @@ class EnhancedStreamingResponse(Response):
except Exception:
# If we can't send an error response, just log it
pass
elif response_started and not finalization_sent:
# Response already started but not finalized - gracefully close the stream
elif response_started and not finalization_sent and not data_sent:
# Response started but no data sent yet - we can safely finalize
# (If data was sent with Content-Length, we can't finalize without h11 error)
try:
await send({"type": "http.response.body", "body": b"", "more_body": False})
finalization_sent = True
except Exception:
pass
# If data was sent but streaming failed, just return silently
# The client will see an incomplete response which is unavoidable with Content-Length
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
async with anyio.create_task_group() as task_group: