mirror of
https://github.com/UrloMythus/UnHided.git
synced 2026-04-11 11:50:51 +00:00
update
This commit is contained in:
@@ -2,5 +2,16 @@ from .proxy import proxy_router
|
||||
from .extractor import extractor_router
|
||||
from .speedtest import speedtest_router
|
||||
from .playlist_builder import playlist_builder_router
|
||||
from .xtream import xtream_root_router
|
||||
from .acestream import acestream_router
|
||||
from .telegram import telegram_router
|
||||
|
||||
__all__ = ["proxy_router", "extractor_router", "speedtest_router", "playlist_builder_router"]
|
||||
__all__ = [
|
||||
"proxy_router",
|
||||
"extractor_router",
|
||||
"speedtest_router",
|
||||
"playlist_builder_router",
|
||||
"xtream_root_router",
|
||||
"acestream_router",
|
||||
"telegram_router",
|
||||
]
|
||||
|
||||
BIN
mediaflow_proxy/routes/__pycache__/__init__.cpython-313.pyc
Normal file
BIN
mediaflow_proxy/routes/__pycache__/__init__.cpython-313.pyc
Normal file
Binary file not shown.
BIN
mediaflow_proxy/routes/__pycache__/acestream.cpython-313.pyc
Normal file
BIN
mediaflow_proxy/routes/__pycache__/acestream.cpython-313.pyc
Normal file
Binary file not shown.
BIN
mediaflow_proxy/routes/__pycache__/extractor.cpython-313.pyc
Normal file
BIN
mediaflow_proxy/routes/__pycache__/extractor.cpython-313.pyc
Normal file
Binary file not shown.
Binary file not shown.
BIN
mediaflow_proxy/routes/__pycache__/proxy.cpython-313.pyc
Normal file
BIN
mediaflow_proxy/routes/__pycache__/proxy.cpython-313.pyc
Normal file
Binary file not shown.
BIN
mediaflow_proxy/routes/__pycache__/speedtest.cpython-313.pyc
Normal file
BIN
mediaflow_proxy/routes/__pycache__/speedtest.cpython-313.pyc
Normal file
Binary file not shown.
BIN
mediaflow_proxy/routes/__pycache__/telegram.cpython-313.pyc
Normal file
BIN
mediaflow_proxy/routes/__pycache__/telegram.cpython-313.pyc
Normal file
Binary file not shown.
BIN
mediaflow_proxy/routes/__pycache__/xtream.cpython-313.pyc
Normal file
BIN
mediaflow_proxy/routes/__pycache__/xtream.cpython-313.pyc
Normal file
Binary file not shown.
540
mediaflow_proxy/routes/acestream.py
Normal file
540
mediaflow_proxy/routes/acestream.py
Normal file
@@ -0,0 +1,540 @@
|
||||
"""
|
||||
Acestream proxy routes.
|
||||
|
||||
Provides endpoints for proxying acestream content:
|
||||
- /proxy/acestream/manifest.m3u8 - HLS manifest proxy (primary, leverages existing HLS infrastructure)
|
||||
- /proxy/acestream/stream - MPEG-TS stream proxy with fan-out to multiple clients
|
||||
- /proxy/acestream/segment.ts - Segment proxy for HLS mode
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
from typing import Annotated
|
||||
from urllib.parse import urlencode, urljoin, urlparse
|
||||
|
||||
import aiohttp
|
||||
from fastapi import APIRouter, Query, Request, HTTPException, Response, Depends
|
||||
from starlette.background import BackgroundTask
|
||||
|
||||
from mediaflow_proxy.configs import settings
|
||||
from mediaflow_proxy.remuxer.transcode_pipeline import stream_transcode_universal
|
||||
from mediaflow_proxy.utils.acestream import acestream_manager, AcestreamSession
|
||||
from mediaflow_proxy.utils.http_client import create_aiohttp_session
|
||||
from mediaflow_proxy.utils.http_utils import (
|
||||
get_original_scheme,
|
||||
get_proxy_headers,
|
||||
ProxyRequestHeaders,
|
||||
EnhancedStreamingResponse,
|
||||
apply_header_manipulation,
|
||||
create_streamer,
|
||||
)
|
||||
from mediaflow_proxy.utils.m3u8_processor import M3U8Processor
|
||||
from mediaflow_proxy.utils.hls_prebuffer import hls_prebuffer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
acestream_router = APIRouter()
|
||||
|
||||
|
||||
class AcestreamM3U8Processor(M3U8Processor):
|
||||
"""
|
||||
M3U8 processor specialized for Acestream.
|
||||
|
||||
Rewrites segment URLs to go through the acestream segment proxy endpoint
|
||||
while preserving session information.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
request: Request,
|
||||
session: AcestreamSession,
|
||||
key_url: str = None,
|
||||
force_playlist_proxy: bool = True,
|
||||
key_only_proxy: bool = False,
|
||||
no_proxy: bool = False,
|
||||
):
|
||||
super().__init__(
|
||||
request=request,
|
||||
key_url=key_url,
|
||||
force_playlist_proxy=force_playlist_proxy,
|
||||
key_only_proxy=key_only_proxy,
|
||||
no_proxy=no_proxy,
|
||||
)
|
||||
self.session = session
|
||||
|
||||
async def proxy_content_url(self, url: str, base_url: str) -> str:
|
||||
"""
|
||||
Override to route acestream segments through the acestream segment endpoint.
|
||||
|
||||
This ensures segments use /proxy/acestream/segment.ts instead of /proxy/hls/segment.ts
|
||||
"""
|
||||
full_url = urljoin(base_url, url)
|
||||
|
||||
# If no_proxy is enabled, return the direct URL
|
||||
if self.no_proxy:
|
||||
return full_url
|
||||
|
||||
# Check if this is a playlist URL (use standard proxy for playlists)
|
||||
parsed = urlparse(full_url)
|
||||
is_playlist = parsed.path.endswith((".m3u", ".m3u8", ".m3u_plus"))
|
||||
|
||||
if is_playlist:
|
||||
# Use standard playlist proxy
|
||||
return await super().proxy_content_url(url, base_url)
|
||||
|
||||
# For segments, route through acestream segment endpoint
|
||||
query_params = {
|
||||
"d": full_url,
|
||||
}
|
||||
|
||||
# Preserve the original id/infohash parameter from the request
|
||||
if "id" in self.request.query_params:
|
||||
query_params["id"] = self.request.query_params["id"]
|
||||
else:
|
||||
query_params["infohash"] = self.session.infohash
|
||||
|
||||
# Include api_password and headers from the original request
|
||||
for key, value in self.request.query_params.items():
|
||||
if key == "api_password" or key.startswith("h_"):
|
||||
query_params[key] = value
|
||||
|
||||
# Determine the segment extension
|
||||
path = parsed.path.lower()
|
||||
if path.endswith(".ts"):
|
||||
ext = "ts"
|
||||
elif path.endswith(".m4s"):
|
||||
ext = "m4s"
|
||||
elif path.endswith(".mp4"):
|
||||
ext = "mp4"
|
||||
else:
|
||||
ext = "ts"
|
||||
|
||||
# Build acestream segment proxy URL
|
||||
base_proxy_url = str(
|
||||
self.request.url_for("acestream_segment_proxy", ext=ext).replace(scheme=get_original_scheme(self.request))
|
||||
)
|
||||
return f"{base_proxy_url}?{urlencode(query_params)}"
|
||||
|
||||
|
||||
@acestream_router.head("/acestream/manifest.m3u8")
|
||||
@acestream_router.get("/acestream/manifest.m3u8")
|
||||
async def acestream_hls_manifest(
|
||||
request: Request,
|
||||
proxy_headers: Annotated[ProxyRequestHeaders, Depends(get_proxy_headers)],
|
||||
infohash: str = Query(None, description="Acestream infohash"),
|
||||
id: str = Query(None, description="Acestream content ID (alternative to infohash)"),
|
||||
):
|
||||
"""
|
||||
Proxy Acestream HLS manifest.
|
||||
|
||||
Creates or reuses an acestream session and proxies the HLS manifest,
|
||||
rewriting segment URLs to go through mediaflow.
|
||||
|
||||
Args:
|
||||
request: The incoming HTTP request.
|
||||
proxy_headers: Headers for proxy requests.
|
||||
infohash: The acestream infohash.
|
||||
id: Alternative content ID.
|
||||
|
||||
Returns:
|
||||
Processed HLS manifest with proxied segment URLs.
|
||||
"""
|
||||
if not settings.enable_acestream:
|
||||
raise HTTPException(status_code=503, detail="Acestream support is disabled")
|
||||
|
||||
if not infohash and not id:
|
||||
raise HTTPException(status_code=400, detail="Either 'infohash' or 'id' parameter is required")
|
||||
|
||||
content_id = id
|
||||
if not infohash:
|
||||
infohash = content_id # Use content_id as the key if no infohash
|
||||
|
||||
max_retries = 2
|
||||
last_error = None
|
||||
|
||||
for attempt in range(max_retries):
|
||||
try:
|
||||
# Get or create acestream session (don't increment client count for manifest requests)
|
||||
session = await acestream_manager.get_or_create_session(infohash, content_id, increment_client=False)
|
||||
|
||||
if not session.playback_url:
|
||||
raise HTTPException(status_code=502, detail="Failed to get playback URL from acestream")
|
||||
|
||||
logger.info(f"[acestream_hls_manifest] Using playback URL: {session.playback_url}")
|
||||
|
||||
# Fetch the manifest from acestream with extended timeout for buffering
|
||||
async with create_aiohttp_session(session.playback_url, timeout=120) as (http_session, proxy_url):
|
||||
response = await http_session.get(
|
||||
session.playback_url,
|
||||
headers=proxy_headers.request,
|
||||
proxy=proxy_url,
|
||||
)
|
||||
response.raise_for_status()
|
||||
manifest_content = await response.text()
|
||||
break # Success, exit retry loop
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
last_error = "Timeout fetching manifest"
|
||||
if attempt < max_retries - 1:
|
||||
logger.warning(f"[acestream_hls_manifest] Timeout fetching manifest, retrying: {infohash[:16]}...")
|
||||
await asyncio.sleep(1) # Brief delay before retry
|
||||
continue
|
||||
logger.error(f"[acestream_hls_manifest] Timeout after {max_retries} attempts")
|
||||
raise HTTPException(status_code=504, detail="Timeout fetching manifest from acestream")
|
||||
|
||||
except aiohttp.ClientResponseError as e:
|
||||
last_error = e
|
||||
# If we get 403, the session is stale - invalidate and retry
|
||||
if e.status == 403 and attempt < max_retries - 1:
|
||||
logger.warning(
|
||||
f"[acestream_hls_manifest] Session stale (403), invalidating and retrying: {infohash[:16]}..."
|
||||
)
|
||||
await acestream_manager.invalidate_session(infohash)
|
||||
continue # Retry with fresh session
|
||||
logger.error(f"[acestream_hls_manifest] HTTP error fetching manifest: {e}")
|
||||
raise HTTPException(status_code=e.status, detail=f"Failed to fetch manifest: {e}")
|
||||
|
||||
except aiohttp.ClientError as e:
|
||||
last_error = e
|
||||
logger.error(f"[acestream_hls_manifest] Client error fetching manifest: {e}")
|
||||
raise HTTPException(status_code=502, detail=f"Failed to fetch manifest: {e}")
|
||||
|
||||
else:
|
||||
# Exhausted retries
|
||||
logger.error(f"[acestream_hls_manifest] Failed after {max_retries} attempts: {last_error}")
|
||||
raise HTTPException(status_code=502, detail=f"Failed to fetch manifest after retries: {last_error}")
|
||||
|
||||
try:
|
||||
# Process the manifest to rewrite URLs
|
||||
processor = AcestreamM3U8Processor(
|
||||
request=request,
|
||||
session=session,
|
||||
force_playlist_proxy=True,
|
||||
)
|
||||
|
||||
processed_manifest = await processor.process_m3u8(manifest_content, base_url=session.playback_url)
|
||||
|
||||
# Register with HLS prebuffer for segment caching
|
||||
if settings.enable_hls_prebuffer:
|
||||
segment_urls = processor._extract_segment_urls_from_content(manifest_content, session.playback_url)
|
||||
if segment_urls:
|
||||
await hls_prebuffer.register_playlist(
|
||||
playlist_url=session.playback_url,
|
||||
segment_urls=segment_urls,
|
||||
headers=proxy_headers.request,
|
||||
)
|
||||
|
||||
base_headers = {
|
||||
"content-type": "application/vnd.apple.mpegurl",
|
||||
"cache-control": "no-cache, no-store, must-revalidate",
|
||||
"access-control-allow-origin": "*",
|
||||
}
|
||||
response_headers = apply_header_manipulation(base_headers, proxy_headers, include_propagate=False)
|
||||
|
||||
return Response(
|
||||
content=processed_manifest, media_type="application/vnd.apple.mpegurl", headers=response_headers
|
||||
)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.exception(f"[acestream_hls_manifest] Error: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Internal error: {e}")
|
||||
|
||||
|
||||
# Map file extensions to MIME types for segments
|
||||
SEGMENT_MIME_TYPES = {
|
||||
"ts": "video/mp2t",
|
||||
"m4s": "video/mp4",
|
||||
"mp4": "video/mp4",
|
||||
"m4a": "audio/mp4",
|
||||
"aac": "audio/aac",
|
||||
}
|
||||
|
||||
|
||||
@acestream_router.get("/acestream/segment.{ext}")
|
||||
async def acestream_segment_proxy(
|
||||
request: Request,
|
||||
proxy_headers: Annotated[ProxyRequestHeaders, Depends(get_proxy_headers)],
|
||||
ext: str,
|
||||
d: str = Query(..., description="Segment URL"),
|
||||
infohash: str = Query(None, description="Acestream session infohash"),
|
||||
id: str = Query(None, description="Acestream content ID (alternative to infohash)"),
|
||||
):
|
||||
"""
|
||||
Proxy Acestream HLS segments.
|
||||
|
||||
Uses the HLS prebuffer for segment caching if enabled.
|
||||
|
||||
Args:
|
||||
request: The incoming HTTP request.
|
||||
proxy_headers: Headers for proxy requests.
|
||||
ext: Segment file extension.
|
||||
d: The segment URL to proxy.
|
||||
infohash: The acestream session infohash (for tracking).
|
||||
id: Alternative content ID.
|
||||
|
||||
Returns:
|
||||
Proxied segment content.
|
||||
"""
|
||||
if not settings.enable_acestream:
|
||||
raise HTTPException(status_code=503, detail="Acestream support is disabled")
|
||||
|
||||
# Use id or infohash for session lookup
|
||||
session_key = id or infohash
|
||||
if not session_key:
|
||||
raise HTTPException(status_code=400, detail="Either 'infohash' or 'id' parameter is required")
|
||||
|
||||
segment_url = d
|
||||
mime_type = SEGMENT_MIME_TYPES.get(ext.lower(), "application/octet-stream")
|
||||
|
||||
logger.debug(f"[acestream_segment_proxy] Request for: {segment_url}")
|
||||
|
||||
# Touch the session to keep it alive - use touch_segment() to indicate active playback
|
||||
session = acestream_manager.get_session(session_key)
|
||||
if session:
|
||||
session.touch_segment()
|
||||
logger.debug(f"[acestream_segment_proxy] Touched session: {session_key[:16]}...")
|
||||
|
||||
# Use HLS prebuffer if enabled
|
||||
if settings.enable_hls_prebuffer:
|
||||
await hls_prebuffer.request_segment(segment_url)
|
||||
segment_data = await hls_prebuffer.get_or_download(segment_url, proxy_headers.request)
|
||||
|
||||
if segment_data:
|
||||
logger.info(f"[acestream_segment_proxy] Serving from prebuffer ({len(segment_data)} bytes)")
|
||||
base_headers = {
|
||||
"content-type": mime_type,
|
||||
"cache-control": "public, max-age=3600",
|
||||
"access-control-allow-origin": "*",
|
||||
}
|
||||
response_headers = apply_header_manipulation(base_headers, proxy_headers)
|
||||
return Response(content=segment_data, media_type=mime_type, headers=response_headers)
|
||||
|
||||
logger.warning("[acestream_segment_proxy] Prebuffer miss, using direct streaming")
|
||||
|
||||
# Fallback to direct streaming
|
||||
streamer = await create_streamer(segment_url)
|
||||
try:
|
||||
await streamer.create_streaming_response(segment_url, proxy_headers.request)
|
||||
|
||||
base_headers = {
|
||||
"content-type": mime_type,
|
||||
"cache-control": "public, max-age=3600",
|
||||
"access-control-allow-origin": "*",
|
||||
}
|
||||
response_headers = apply_header_manipulation(base_headers, proxy_headers)
|
||||
|
||||
return EnhancedStreamingResponse(
|
||||
streamer.stream_content(),
|
||||
status_code=streamer.response.status if streamer.response else 200,
|
||||
headers=response_headers,
|
||||
background=BackgroundTask(streamer.close),
|
||||
)
|
||||
except Exception as e:
|
||||
await streamer.close()
|
||||
logger.error(f"[acestream_segment_proxy] Error streaming segment: {e}")
|
||||
raise HTTPException(status_code=502, detail=f"Failed to stream segment: {e}")
|
||||
|
||||
|
||||
@acestream_router.head("/acestream/stream")
|
||||
@acestream_router.get("/acestream/stream")
|
||||
async def acestream_ts_stream(
|
||||
request: Request,
|
||||
proxy_headers: Annotated[ProxyRequestHeaders, Depends(get_proxy_headers)],
|
||||
infohash: str = Query(None, description="Acestream infohash"),
|
||||
id: str = Query(None, description="Acestream content ID (alternative to infohash)"),
|
||||
transcode: bool = Query(False, description="Transcode to browser-compatible fMP4"),
|
||||
start: float | None = Query(None, description="Seek start time in seconds (transcode mode)"),
|
||||
):
|
||||
"""
|
||||
Proxy Acestream MPEG-TS stream with fan-out to multiple clients.
|
||||
|
||||
Creates or reuses an acestream session and streams MPEG-TS content.
|
||||
Multiple clients can share the same upstream connection.
|
||||
|
||||
When transcode=true, the MPEG-TS stream is transcoded on-the-fly to
|
||||
browser-compatible fMP4 (H.264 + AAC).
|
||||
|
||||
Args:
|
||||
request: The incoming HTTP request.
|
||||
proxy_headers: Headers for proxy requests.
|
||||
infohash: The acestream infohash.
|
||||
id: Alternative content ID.
|
||||
transcode: Transcode to browser-compatible format.
|
||||
start: Seek start time in seconds (transcode mode).
|
||||
|
||||
Returns:
|
||||
MPEG-TS stream (or fMP4 if transcode=true).
|
||||
"""
|
||||
if not settings.enable_acestream:
|
||||
raise HTTPException(status_code=503, detail="Acestream support is disabled")
|
||||
|
||||
if not infohash and not id:
|
||||
raise HTTPException(status_code=400, detail="Either 'infohash' or 'id' parameter is required")
|
||||
|
||||
content_id = id
|
||||
if not infohash:
|
||||
infohash = content_id
|
||||
|
||||
try:
|
||||
# Get or create acestream session
|
||||
# For MPEG-TS, we need to use getstream endpoint
|
||||
base_url = f"http://{settings.acestream_host}:{settings.acestream_port}"
|
||||
session = await acestream_manager.get_or_create_session(infohash, content_id)
|
||||
|
||||
if not session.playback_url:
|
||||
raise HTTPException(status_code=502, detail="Failed to get playback URL from acestream")
|
||||
|
||||
# For MPEG-TS streaming, we need to convert HLS playback URL to getstream
|
||||
# Acestream uses different parameter names:
|
||||
# - 'id' for content IDs
|
||||
# - 'infohash' for magnet link hashes (40-char hex)
|
||||
if content_id:
|
||||
ts_url = f"{base_url}/ace/getstream?id={content_id}&pid={session.pid}"
|
||||
else:
|
||||
ts_url = f"{base_url}/ace/getstream?infohash={infohash}&pid={session.pid}"
|
||||
|
||||
logger.info(f"[acestream_ts_stream] Streaming from: {ts_url}")
|
||||
|
||||
if transcode:
|
||||
if not settings.enable_transcode:
|
||||
await acestream_manager.release_session(infohash)
|
||||
raise HTTPException(status_code=503, detail="Transcoding support is disabled")
|
||||
# Acestream provides a live MPEG-TS stream that does NOT support
|
||||
# HTTP Range requests and is not seekable. Use an ffmpeg subprocess
|
||||
# to remux video (passthrough) and transcode audio (AC3→AAC) to
|
||||
# fragmented MP4. The subprocess approach isolates native FFmpeg
|
||||
# crashes from the Python server process.
|
||||
|
||||
if request.method == "HEAD":
|
||||
await acestream_manager.release_session(infohash)
|
||||
return Response(
|
||||
status_code=200,
|
||||
headers={
|
||||
"access-control-allow-origin": "*",
|
||||
"cache-control": "no-cache, no-store",
|
||||
"content-type": "video/mp4",
|
||||
"content-disposition": "inline",
|
||||
},
|
||||
)
|
||||
|
||||
async def _acestream_ts_source():
|
||||
"""Single-connection async byte generator for the live TS stream."""
|
||||
try:
|
||||
async with create_aiohttp_session(ts_url) as (session, proxy_url):
|
||||
async with session.get(
|
||||
ts_url,
|
||||
proxy=proxy_url,
|
||||
allow_redirects=True,
|
||||
) as resp:
|
||||
resp.raise_for_status()
|
||||
async for chunk in resp.content.iter_any():
|
||||
yield chunk
|
||||
except asyncio.CancelledError:
|
||||
logger.debug("[acestream_ts_stream] Transcode source cancelled")
|
||||
except GeneratorExit:
|
||||
logger.debug("[acestream_ts_stream] Transcode source closed")
|
||||
|
||||
# Use our custom PyAV pipeline with forced video re-encoding
|
||||
# (live MPEG-TS sources often have corrupt H.264 bitstreams
|
||||
# that browsers reject; re-encoding produces a clean stream).
|
||||
content = stream_transcode_universal(
|
||||
_acestream_ts_source(),
|
||||
force_video_reencode=True,
|
||||
)
|
||||
|
||||
async def release_transcode_session():
|
||||
await acestream_manager.release_session(infohash)
|
||||
|
||||
return EnhancedStreamingResponse(
|
||||
content=content,
|
||||
media_type="video/mp4",
|
||||
headers={
|
||||
"access-control-allow-origin": "*",
|
||||
"cache-control": "no-cache, no-store",
|
||||
"content-disposition": "inline",
|
||||
},
|
||||
background=BackgroundTask(release_transcode_session),
|
||||
)
|
||||
|
||||
streamer = await create_streamer(ts_url)
|
||||
try:
|
||||
await streamer.create_streaming_response(ts_url, proxy_headers.request)
|
||||
|
||||
base_headers = {
|
||||
"content-type": "video/mp2t",
|
||||
"transfer-encoding": "chunked",
|
||||
"cache-control": "no-cache, no-store, must-revalidate",
|
||||
"access-control-allow-origin": "*",
|
||||
}
|
||||
response_headers = apply_header_manipulation(base_headers, proxy_headers)
|
||||
|
||||
async def release_on_complete():
|
||||
"""Release session when streaming completes."""
|
||||
await streamer.close()
|
||||
await acestream_manager.release_session(infohash)
|
||||
|
||||
return EnhancedStreamingResponse(
|
||||
streamer.stream_content(),
|
||||
status_code=streamer.response.status if streamer.response else 200,
|
||||
headers=response_headers,
|
||||
background=BackgroundTask(release_on_complete),
|
||||
)
|
||||
|
||||
except Exception:
|
||||
await streamer.close()
|
||||
await acestream_manager.release_session(infohash)
|
||||
raise
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.exception(f"[acestream_ts_stream] Error: {e}")
|
||||
await acestream_manager.release_session(infohash)
|
||||
raise HTTPException(status_code=500, detail=f"Internal error: {e}")
|
||||
|
||||
|
||||
@acestream_router.get("/acestream/status")
|
||||
async def acestream_status(
|
||||
infohash: str = Query(None, description="Acestream infohash to check"),
|
||||
):
|
||||
"""
|
||||
Get acestream session status.
|
||||
|
||||
Args:
|
||||
infohash: Optional infohash to check specific session.
|
||||
|
||||
Returns:
|
||||
Session status information.
|
||||
"""
|
||||
if not settings.enable_acestream:
|
||||
raise HTTPException(status_code=503, detail="Acestream support is disabled")
|
||||
|
||||
if infohash:
|
||||
session = acestream_manager.get_session(infohash)
|
||||
if session:
|
||||
return {
|
||||
"status": "active",
|
||||
"infohash": session.infohash,
|
||||
"client_count": session.client_count,
|
||||
"is_live": session.is_live,
|
||||
"created_at": session.created_at,
|
||||
"last_access": session.last_access,
|
||||
}
|
||||
else:
|
||||
return {"status": "not_found", "infohash": infohash}
|
||||
|
||||
# Return all active sessions
|
||||
sessions = acestream_manager.get_active_sessions()
|
||||
return {
|
||||
"enabled": settings.enable_acestream,
|
||||
"active_sessions": len(sessions),
|
||||
"sessions": [
|
||||
{
|
||||
"infohash": s.infohash,
|
||||
"client_count": s.client_count,
|
||||
"is_live": s.is_live,
|
||||
}
|
||||
for s in sessions.values()
|
||||
],
|
||||
}
|
||||
@@ -1,3 +1,4 @@
|
||||
import copy
|
||||
import logging
|
||||
from typing import Annotated
|
||||
|
||||
@@ -7,7 +8,10 @@ from fastapi.responses import RedirectResponse
|
||||
from mediaflow_proxy.extractors.base import ExtractorError
|
||||
from mediaflow_proxy.extractors.factory import ExtractorFactory
|
||||
from mediaflow_proxy.schemas import ExtractorURLParams
|
||||
from mediaflow_proxy.utils.cache_utils import get_cached_extractor_result, set_cache_extractor_result
|
||||
from mediaflow_proxy.utils.cache_utils import (
|
||||
get_cached_extractor_result,
|
||||
set_cache_extractor_result,
|
||||
)
|
||||
from mediaflow_proxy.utils.http_utils import (
|
||||
DownloadError,
|
||||
encode_mediaflow_proxy_url,
|
||||
@@ -16,11 +20,28 @@ from mediaflow_proxy.utils.http_utils import (
|
||||
get_proxy_headers,
|
||||
)
|
||||
from mediaflow_proxy.utils.base64_utils import process_potential_base64_url
|
||||
from mediaflow_proxy.utils import redis_utils
|
||||
|
||||
extractor_router = APIRouter()
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
async def refresh_extractor_cache(cache_key: str, extractor_params: ExtractorURLParams, proxy_headers: ProxyRequestHeaders):
|
||||
# Cooldown duration for background refresh (2 minutes)
|
||||
_REFRESH_COOLDOWN = 120
|
||||
|
||||
# Hosts where background refresh should be DISABLED
|
||||
# These hosts generate unique CDN URLs per extraction - refreshing invalidates existing streams!
|
||||
# When a new URL is extracted, the old URL becomes invalid and causes 509 errors.
|
||||
_NO_BACKGROUND_REFRESH_HOSTS = frozenset(
|
||||
{
|
||||
"Vidoza",
|
||||
# Add other hosts here that generate unique per-extraction URLs
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
async def refresh_extractor_cache(
|
||||
cache_key: str, extractor_params: ExtractorURLParams, proxy_headers: ProxyRequestHeaders
|
||||
):
|
||||
"""Asynchronously refreshes the extractor cache in the background."""
|
||||
try:
|
||||
logger.info(f"Background cache refresh started for key: {cache_key}")
|
||||
@@ -32,32 +53,114 @@ async def refresh_extractor_cache(cache_key: str, extractor_params: ExtractorURL
|
||||
logger.error(f"Background cache refresh failed for key {cache_key}: {e}")
|
||||
|
||||
|
||||
@extractor_router.head("/video")
|
||||
@extractor_router.get("/video")
|
||||
async def extract_url(
|
||||
extractor_params: Annotated[ExtractorURLParams, Query()],
|
||||
# Extension to content-type mapping for player compatibility
|
||||
# When a player requests /extractor/video.m3u8, it can detect HLS from the URL
|
||||
EXTRACTOR_EXT_CONTENT_TYPES = {
|
||||
"m3u8": "application/vnd.apple.mpegurl",
|
||||
"m3u": "application/vnd.apple.mpegurl",
|
||||
"mp4": "video/mp4",
|
||||
"mkv": "video/x-matroska",
|
||||
"ts": "video/mp2t",
|
||||
"avi": "video/x-msvideo",
|
||||
"webm": "video/webm",
|
||||
}
|
||||
|
||||
|
||||
async def _extract_url_impl(
|
||||
extractor_params: ExtractorURLParams,
|
||||
request: Request,
|
||||
background_tasks: BackgroundTasks,
|
||||
proxy_headers: Annotated[ProxyRequestHeaders, Depends(get_proxy_headers)],
|
||||
proxy_headers: ProxyRequestHeaders,
|
||||
ext: str | None = None,
|
||||
):
|
||||
"""Extract clean links from various video hosting services."""
|
||||
"""
|
||||
Core extraction logic shared by all extractor endpoints.
|
||||
|
||||
Args:
|
||||
extractor_params: Extraction parameters from query string
|
||||
request: FastAPI request object
|
||||
background_tasks: Background task manager
|
||||
proxy_headers: Proxy headers from request
|
||||
ext: Optional file extension hint for player compatibility (e.g., "m3u8", "mp4")
|
||||
"""
|
||||
try:
|
||||
# Process potential base64 encoded destination URL
|
||||
processed_destination = process_potential_base64_url(extractor_params.destination)
|
||||
extractor_params.destination = processed_destination
|
||||
|
||||
|
||||
cache_key = f"{extractor_params.host}_{extractor_params.model_dump_json()}"
|
||||
response = await get_cached_extractor_result(cache_key)
|
||||
|
||||
|
||||
# Extractor results are resolved via the pod's outgoing IP and may not
|
||||
# be valid when served from a different pod. Namespace the cache and
|
||||
# all associated coordination keys so each pod operates on its own
|
||||
# partition of the shared Redis. On single-instance deployments (no
|
||||
# CACHE_NAMESPACE env var) make_instance_key() is a no-op.
|
||||
instance_cache_key = redis_utils.make_instance_key(cache_key)
|
||||
|
||||
response = await get_cached_extractor_result(instance_cache_key)
|
||||
|
||||
if response:
|
||||
logger.info(f"Serving from cache for key: {cache_key}")
|
||||
# Schedule a background task to refresh the cache without blocking the user
|
||||
background_tasks.add_task(refresh_extractor_cache, cache_key, extractor_params, proxy_headers)
|
||||
logger.info(f"Serving from cache for key: {instance_cache_key}")
|
||||
# Schedule a background refresh, but only if:
|
||||
# 1. The host is NOT in the no-refresh list (hosts with unique per-extraction URLs)
|
||||
# 2. The cooldown has elapsed (prevents flooding upstream)
|
||||
#
|
||||
# WARNING: For hosts like Vidoza, background refresh is DANGEROUS!
|
||||
# Each extraction generates a unique CDN URL. Refreshing invalidates the
|
||||
# old URL, causing 509 errors for clients still using it.
|
||||
if extractor_params.host not in _NO_BACKGROUND_REFRESH_HOSTS:
|
||||
cooldown_key = f"extractor_refresh:{instance_cache_key}"
|
||||
if await redis_utils.check_and_set_cooldown(cooldown_key, _REFRESH_COOLDOWN):
|
||||
background_tasks.add_task(
|
||||
refresh_extractor_cache, instance_cache_key, extractor_params, proxy_headers
|
||||
)
|
||||
else:
|
||||
logger.debug(f"Skipping background refresh for {extractor_params.host} (unique CDN URLs)")
|
||||
else:
|
||||
logger.info(f"Cache miss for key: {cache_key}. Fetching fresh data.")
|
||||
extractor = ExtractorFactory.get_extractor(extractor_params.host, proxy_headers.request)
|
||||
response = await extractor.extract(extractor_params.destination, **extractor_params.extra_params)
|
||||
await set_cache_extractor_result(cache_key, response)
|
||||
# Use Redis-based in-flight tracking for cross-worker deduplication.
|
||||
# If another worker is already extracting, wait for them to finish.
|
||||
inflight_key = f"extractor:{instance_cache_key}"
|
||||
|
||||
if not await redis_utils.mark_inflight(inflight_key, ttl=60):
|
||||
# Another worker is extracting - wait for them to finish and check cache
|
||||
logger.info(f"Waiting for in-flight extraction (cross-worker) for key: {instance_cache_key}")
|
||||
if await redis_utils.wait_for_completion(inflight_key, timeout=30.0):
|
||||
# Extraction completed, check cache
|
||||
response = await get_cached_extractor_result(instance_cache_key)
|
||||
if response:
|
||||
logger.info(f"Serving from cache (after wait) for key: {instance_cache_key}")
|
||||
|
||||
if response is None:
|
||||
# We either marked it as in-flight (first) or waited and still no cache hit.
|
||||
# Use Redis lock to ensure only one worker extracts at a time.
|
||||
if await redis_utils.acquire_lock(f"extractor_lock:{instance_cache_key}", ttl=30, timeout=30.0):
|
||||
try:
|
||||
# Re-check cache after acquiring lock - another worker may have populated it
|
||||
response = await get_cached_extractor_result(instance_cache_key)
|
||||
if response:
|
||||
logger.info(f"Serving from cache (after lock) for key: {instance_cache_key}")
|
||||
else:
|
||||
logger.info(f"Cache miss for key: {instance_cache_key}. Fetching fresh data.")
|
||||
try:
|
||||
extractor = ExtractorFactory.get_extractor(extractor_params.host, proxy_headers.request)
|
||||
response = await extractor.extract(
|
||||
extractor_params.destination, **extractor_params.extra_params
|
||||
)
|
||||
await set_cache_extractor_result(instance_cache_key, response)
|
||||
except Exception:
|
||||
raise
|
||||
finally:
|
||||
await redis_utils.release_lock(f"extractor_lock:{instance_cache_key}")
|
||||
await redis_utils.clear_inflight(inflight_key)
|
||||
else:
|
||||
# Lock timeout - try to serve from cache anyway
|
||||
response = await get_cached_extractor_result(instance_cache_key)
|
||||
if not response:
|
||||
raise HTTPException(status_code=503, detail="Extraction in progress, please retry")
|
||||
|
||||
# Deep copy so each concurrent request gets its own dict to mutate
|
||||
# (pop mediaflow_endpoint, update request_headers, etc.)
|
||||
response = copy.deepcopy(response)
|
||||
|
||||
# Ensure the latest request headers are used, even with cached data
|
||||
if "request_headers" not in response:
|
||||
@@ -94,3 +197,62 @@ async def extract_url(
|
||||
except Exception as e:
|
||||
logger.exception(f"Extraction failed: {str(e)}")
|
||||
raise HTTPException(status_code=500, detail=f"Extraction failed: {str(e)}")
|
||||
|
||||
|
||||
@extractor_router.head("/video")
|
||||
@extractor_router.get("/video")
|
||||
async def extract_url(
|
||||
extractor_params: Annotated[ExtractorURLParams, Query()],
|
||||
request: Request,
|
||||
background_tasks: BackgroundTasks,
|
||||
proxy_headers: Annotated[ProxyRequestHeaders, Depends(get_proxy_headers)],
|
||||
):
|
||||
"""
|
||||
Extract clean links from various video hosting services.
|
||||
|
||||
This is the base endpoint without extension. For better player compatibility
|
||||
(especially ExoPlayer), use the extension variants:
|
||||
- /extractor/video.m3u8 for HLS streams
|
||||
- /extractor/video.mp4 for MP4 streams
|
||||
"""
|
||||
return await _extract_url_impl(extractor_params, request, background_tasks, proxy_headers)
|
||||
|
||||
|
||||
@extractor_router.head("/video.{ext}")
|
||||
@extractor_router.get("/video.{ext}")
|
||||
async def extract_url_with_extension(
|
||||
ext: str,
|
||||
extractor_params: Annotated[ExtractorURLParams, Query()],
|
||||
request: Request,
|
||||
background_tasks: BackgroundTasks,
|
||||
proxy_headers: Annotated[ProxyRequestHeaders, Depends(get_proxy_headers)],
|
||||
):
|
||||
"""
|
||||
Extract clean links with file extension hint for player compatibility.
|
||||
|
||||
The extension in the URL helps players like ExoPlayer detect the content type
|
||||
without needing to follow redirects or inspect headers. This is especially
|
||||
important for HLS streams where ExoPlayer needs .m3u8 in the URL to use
|
||||
HlsMediaSource instead of ProgressiveMediaSource.
|
||||
|
||||
Supported extensions:
|
||||
- .m3u8, .m3u - HLS playlists (application/vnd.apple.mpegurl)
|
||||
- .mp4 - MP4 video (video/mp4)
|
||||
- .mkv - Matroska video (video/x-matroska)
|
||||
- .ts - MPEG-TS (video/mp2t)
|
||||
- .avi - AVI video (video/x-msvideo)
|
||||
- .webm - WebM video (video/webm)
|
||||
|
||||
Example:
|
||||
/extractor/video.m3u8?host=TurboVidPlay&d=...&redirect_stream=true
|
||||
|
||||
This URL clearly indicates HLS content, making ExoPlayer use the correct source.
|
||||
"""
|
||||
ext_lower = ext.lower()
|
||||
if ext_lower not in EXTRACTOR_EXT_CONTENT_TYPES:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Unsupported extension: .{ext}. Supported: {', '.join('.' + e for e in EXTRACTOR_EXT_CONTENT_TYPES.keys())}",
|
||||
)
|
||||
|
||||
return await _extract_url_impl(extractor_params, request, background_tasks, proxy_headers, ext=ext_lower)
|
||||
|
||||
@@ -5,167 +5,181 @@ from typing import Iterator, Dict, Optional
|
||||
from fastapi import APIRouter, Request, HTTPException, Query
|
||||
from fastapi.responses import StreamingResponse
|
||||
from starlette.responses import RedirectResponse
|
||||
import httpx
|
||||
|
||||
from mediaflow_proxy.configs import settings
|
||||
from mediaflow_proxy.utils.http_utils import get_original_scheme
|
||||
from mediaflow_proxy.utils.http_client import create_aiohttp_session
|
||||
import asyncio
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
playlist_builder_router = APIRouter()
|
||||
|
||||
|
||||
def rewrite_m3u_links_streaming(m3u_lines_iterator: Iterator[str], base_url: str, api_password: Optional[str]) -> Iterator[str]:
|
||||
def rewrite_m3u_links_streaming(
|
||||
m3u_lines_iterator: Iterator[str], base_url: str, api_password: Optional[str]
|
||||
) -> Iterator[str]:
|
||||
"""
|
||||
Riscrive i link da un iteratore di linee M3U secondo le regole specificate,
|
||||
includendo gli headers da #EXTVLCOPT e #EXTHTTP. Yields rewritten lines.
|
||||
Rewrites links from an M3U line iterator according to the specified rules,
|
||||
including headers from #EXTVLCOPT and #EXTHTTP. Yields rewritten lines.
|
||||
"""
|
||||
current_ext_headers: Dict[str, str] = {} # Dizionario per conservare gli headers dalle direttive
|
||||
current_kodi_props: Dict[str, str] = {} # Dizionario per conservare le proprietà KODI
|
||||
|
||||
current_ext_headers: Dict[str, str] = {} # Dictionary to store headers from directives
|
||||
current_kodi_props: Dict[str, str] = {} # Dictionary to store KODI properties
|
||||
|
||||
for line_with_newline in m3u_lines_iterator:
|
||||
line_content = line_with_newline.rstrip('\n')
|
||||
line_content = line_with_newline.rstrip("\n")
|
||||
logical_line = line_content.strip()
|
||||
|
||||
|
||||
is_header_tag = False
|
||||
if logical_line.startswith('#EXTVLCOPT:'):
|
||||
if logical_line.startswith("#EXTVLCOPT:"):
|
||||
# Yield the original line to preserve it
|
||||
yield line_with_newline
|
||||
|
||||
|
||||
is_header_tag = True
|
||||
try:
|
||||
option_str = logical_line.split(':', 1)[1]
|
||||
if '=' in option_str:
|
||||
key_vlc, value_vlc = option_str.split('=', 1)
|
||||
option_str = logical_line.split(":", 1)[1]
|
||||
if "=" in option_str:
|
||||
key_vlc, value_vlc = option_str.split("=", 1)
|
||||
key_vlc = key_vlc.strip()
|
||||
value_vlc = value_vlc.strip()
|
||||
|
||||
|
||||
# Gestione speciale per http-header che contiene "Key: Value"
|
||||
if key_vlc == 'http-header' and ':' in value_vlc:
|
||||
header_key, header_value = value_vlc.split(':', 1)
|
||||
if key_vlc == "http-header" and ":" in value_vlc:
|
||||
header_key, header_value = value_vlc.split(":", 1)
|
||||
header_key = header_key.strip()
|
||||
header_value = header_value.strip()
|
||||
current_ext_headers[header_key] = header_value
|
||||
elif key_vlc.startswith('http-'):
|
||||
# Gestisce http-user-agent, http-referer etc.
|
||||
header_key = key_vlc[len('http-'):]
|
||||
elif key_vlc.startswith("http-"):
|
||||
# Handle http-user-agent, http-referer, etc.
|
||||
header_key = key_vlc[len("http-") :]
|
||||
current_ext_headers[header_key] = value_vlc
|
||||
except Exception as e:
|
||||
logger.error(f"⚠️ Error parsing #EXTVLCOPT '{logical_line}': {e}")
|
||||
|
||||
elif logical_line.startswith('#EXTHTTP:'):
|
||||
logger.error(f"Error parsing #EXTVLCOPT '{logical_line}': {e}")
|
||||
|
||||
elif logical_line.startswith("#EXTHTTP:"):
|
||||
# Yield the original line to preserve it
|
||||
yield line_with_newline
|
||||
|
||||
|
||||
is_header_tag = True
|
||||
try:
|
||||
json_str = logical_line.split(':', 1)[1]
|
||||
# Sostituisce tutti gli header correnti con quelli del JSON
|
||||
json_str = logical_line.split(":", 1)[1]
|
||||
# Replace all current headers with those from the JSON
|
||||
current_ext_headers = json.loads(json_str)
|
||||
except Exception as e:
|
||||
logger.error(f"⚠️ Error parsing #EXTHTTP '{logical_line}': {e}")
|
||||
current_ext_headers = {} # Resetta in caso di errore
|
||||
|
||||
elif logical_line.startswith('#KODIPROP:'):
|
||||
logger.error(f"Error parsing #EXTHTTP '{logical_line}': {e}")
|
||||
current_ext_headers = {} # Reset on error
|
||||
|
||||
elif logical_line.startswith("#KODIPROP:"):
|
||||
# Yield the original line to preserve it
|
||||
yield line_with_newline
|
||||
|
||||
|
||||
is_header_tag = True
|
||||
try:
|
||||
prop_str = logical_line.split(':', 1)[1]
|
||||
if '=' in prop_str:
|
||||
key_kodi, value_kodi = prop_str.split('=', 1)
|
||||
prop_str = logical_line.split(":", 1)[1]
|
||||
if "=" in prop_str:
|
||||
key_kodi, value_kodi = prop_str.split("=", 1)
|
||||
current_kodi_props[key_kodi.strip()] = value_kodi.strip()
|
||||
except Exception as e:
|
||||
logger.error(f"⚠️ Error parsing #KODIPROP '{logical_line}': {e}")
|
||||
|
||||
logger.error(f"Error parsing #KODIPROP '{logical_line}': {e}")
|
||||
|
||||
if is_header_tag:
|
||||
continue
|
||||
|
||||
if logical_line and not logical_line.startswith('#') and \
|
||||
('http://' in logical_line or 'https://' in logical_line):
|
||||
|
||||
|
||||
if (
|
||||
logical_line
|
||||
and not logical_line.startswith("#")
|
||||
and ("http://" in logical_line or "https://" in logical_line)
|
||||
):
|
||||
processed_url_content = logical_line
|
||||
|
||||
|
||||
# Non modificare link pluto.tv
|
||||
if 'pluto.tv' in logical_line:
|
||||
if "pluto.tv" in logical_line:
|
||||
processed_url_content = logical_line
|
||||
elif 'vavoo.to' in logical_line:
|
||||
encoded_url = urllib.parse.quote(logical_line, safe='')
|
||||
elif "vavoo.to" in logical_line:
|
||||
encoded_url = urllib.parse.quote(logical_line, safe="")
|
||||
processed_url_content = f"{base_url}/proxy/hls/manifest.m3u8?d={encoded_url}"
|
||||
elif 'vixsrc.to' in logical_line:
|
||||
encoded_url = urllib.parse.quote(logical_line, safe='')
|
||||
elif "vixsrc.to" in logical_line:
|
||||
encoded_url = urllib.parse.quote(logical_line, safe="")
|
||||
processed_url_content = f"{base_url}/extractor/video?host=VixCloud&redirect_stream=true&d={encoded_url}&max_res=true&no_proxy=true"
|
||||
elif '.m3u8' in logical_line:
|
||||
encoded_url = urllib.parse.quote(logical_line, safe='')
|
||||
elif ".m3u8" in logical_line:
|
||||
encoded_url = urllib.parse.quote(logical_line, safe="")
|
||||
processed_url_content = f"{base_url}/proxy/hls/manifest.m3u8?d={encoded_url}"
|
||||
elif '.mpd' in logical_line:
|
||||
elif ".mpd" in logical_line:
|
||||
# Estrai parametri DRM dall'URL MPD se presenti (es. &key_id=...&key=...)
|
||||
from urllib.parse import urlparse, parse_qs, urlencode, urlunparse
|
||||
|
||||
|
||||
# Parse dell'URL per estrarre parametri
|
||||
parsed_url = urlparse(logical_line)
|
||||
query_params = parse_qs(parsed_url.query)
|
||||
|
||||
|
||||
# Estrai key_id e key se presenti nei parametri della query
|
||||
key_id = query_params.get('key_id', [None])[0]
|
||||
key = query_params.get('key', [None])[0]
|
||||
|
||||
key_id = query_params.get("key_id", [None])[0]
|
||||
key = query_params.get("key", [None])[0]
|
||||
|
||||
# Rimuovi key_id e key dai parametri originali
|
||||
clean_query_params = {k: v for k, v in query_params.items() if k not in ['key_id', 'key']}
|
||||
|
||||
clean_query_params = {k: v for k, v in query_params.items() if k not in ["key_id", "key"]}
|
||||
|
||||
# Ricostruisci l'URL senza i parametri DRM
|
||||
clean_query = urlencode(clean_query_params, doseq=True)
|
||||
clean_url = urlunparse((
|
||||
parsed_url.scheme,
|
||||
parsed_url.netloc,
|
||||
parsed_url.path,
|
||||
parsed_url.params,
|
||||
clean_query,
|
||||
'' # Rimuovi il frammento per evitare problemi
|
||||
))
|
||||
|
||||
clean_url = urlunparse(
|
||||
(
|
||||
parsed_url.scheme,
|
||||
parsed_url.netloc,
|
||||
parsed_url.path,
|
||||
parsed_url.params,
|
||||
clean_query,
|
||||
"", # Rimuovi il frammento per evitare problemi
|
||||
)
|
||||
)
|
||||
|
||||
# Codifica l'URL pulito per il parametro 'd'
|
||||
encoded_clean_url = urllib.parse.quote(clean_url, safe='')
|
||||
|
||||
encoded_clean_url = urllib.parse.quote(clean_url, safe="")
|
||||
|
||||
# Costruisci l'URL MediaFlow con parametri DRM separati
|
||||
processed_url_content = f"{base_url}/proxy/mpd/manifest.m3u8?d={encoded_clean_url}"
|
||||
|
||||
|
||||
# Aggiungi i parametri DRM all'URL di MediaFlow se sono stati trovati
|
||||
if key_id:
|
||||
processed_url_content += f"&key_id={key_id}"
|
||||
if key:
|
||||
processed_url_content += f"&key={key}"
|
||||
|
||||
# Aggiungi chiavi da #KODIPROP se presenti
|
||||
license_key = current_kodi_props.get('inputstream.adaptive.license_key')
|
||||
if license_key and ':' in license_key:
|
||||
key_id_kodi, key_kodi = license_key.split(':', 1)
|
||||
processed_url_content += f"&key_id={key_id_kodi}"
|
||||
processed_url_content += f"&key={key_kodi}"
|
||||
|
||||
elif '.php' in logical_line:
|
||||
encoded_url = urllib.parse.quote(logical_line, safe='')
|
||||
elif ".php" in logical_line:
|
||||
encoded_url = urllib.parse.quote(logical_line, safe="")
|
||||
processed_url_content = f"{base_url}/proxy/hls/manifest.m3u8?d={encoded_url}"
|
||||
else:
|
||||
# Per tutti gli altri link senza estensioni specifiche, trattali come .m3u8 con codifica
|
||||
encoded_url = urllib.parse.quote(logical_line, safe='')
|
||||
encoded_url = urllib.parse.quote(logical_line, safe="")
|
||||
processed_url_content = f"{base_url}/proxy/hls/manifest.m3u8?d={encoded_url}"
|
||||
|
||||
|
||||
# Aggiungi chiavi da #KODIPROP se presenti
|
||||
license_key = current_kodi_props.get("inputstream.adaptive.license_key")
|
||||
if license_key and ":" in license_key:
|
||||
key_id_kodi, key_kodi = license_key.split(":", 1)
|
||||
# Aggiungi key_id e key solo se non sono già stati aggiunti (es. dall'URL MPD)
|
||||
if "&key_id=" not in processed_url_content:
|
||||
processed_url_content += f"&key_id={key_id_kodi}"
|
||||
if "&key=" not in processed_url_content:
|
||||
processed_url_content += f"&key={key_kodi}"
|
||||
|
||||
# Applica gli header raccolti prima di api_password
|
||||
if current_ext_headers:
|
||||
header_params_str = "".join([f"&h_{urllib.parse.quote(key)}={urllib.parse.quote(value)}" for key, value in current_ext_headers.items()])
|
||||
header_params_str = "".join(
|
||||
[
|
||||
f"&h_{urllib.parse.quote(key)}={urllib.parse.quote(value)}"
|
||||
for key, value in current_ext_headers.items()
|
||||
]
|
||||
)
|
||||
processed_url_content += header_params_str
|
||||
current_ext_headers = {}
|
||||
|
||||
# Resetta le proprietà KODI dopo averle usate
|
||||
|
||||
# Reset KODI properties after using them
|
||||
current_kodi_props = {}
|
||||
|
||||
# Aggiungi api_password sempre alla fine
|
||||
|
||||
# Always append api_password at the end
|
||||
if api_password:
|
||||
processed_url_content += f"&api_password={api_password}"
|
||||
|
||||
yield processed_url_content + '\n'
|
||||
|
||||
yield processed_url_content + "\n"
|
||||
else:
|
||||
yield line_with_newline
|
||||
|
||||
@@ -173,45 +187,46 @@ def rewrite_m3u_links_streaming(m3u_lines_iterator: Iterator[str], base_url: str
|
||||
async def async_download_m3u_playlist(url: str) -> list[str]:
|
||||
"""Scarica una playlist M3U in modo asincrono e restituisce le righe."""
|
||||
headers = {
|
||||
'User-Agent': settings.user_agent,
|
||||
'Accept': '*/*',
|
||||
'Accept-Language': 'en-US,en;q=0.9',
|
||||
'Accept-Encoding': 'gzip, deflate',
|
||||
'Connection': 'keep-alive'
|
||||
"User-Agent": settings.user_agent,
|
||||
"Accept": "*/*",
|
||||
"Accept-Language": "en-US,en;q=0.9",
|
||||
"Accept-Encoding": "gzip, deflate",
|
||||
"Connection": "keep-alive",
|
||||
}
|
||||
lines = []
|
||||
try:
|
||||
async with httpx.AsyncClient(verify=True, timeout=30, follow_redirects=True) as client:
|
||||
async with client.stream('GET', url, headers=headers) as response:
|
||||
response.raise_for_status()
|
||||
async for line_bytes in response.aiter_lines():
|
||||
if isinstance(line_bytes, bytes):
|
||||
decoded_line = line_bytes.decode('utf-8', errors='replace')
|
||||
else:
|
||||
decoded_line = str(line_bytes)
|
||||
lines.append(decoded_line + '\n' if decoded_line else '')
|
||||
async with create_aiohttp_session(url, timeout=30) as (session, proxy_url):
|
||||
response = await session.get(url, headers=headers, proxy=proxy_url)
|
||||
response.raise_for_status()
|
||||
content = await response.text()
|
||||
# Split content into lines
|
||||
for line in content.splitlines():
|
||||
lines.append(line + "\n" if line else "")
|
||||
except Exception as e:
|
||||
logger.error(f"Error downloading playlist (async): {str(e)}")
|
||||
raise
|
||||
return lines
|
||||
|
||||
|
||||
def parse_channel_entries(lines: list[str]) -> list[list[str]]:
|
||||
"""
|
||||
Analizza le linee di una playlist M3U e le raggruppa in entry di canali.
|
||||
Ogni entry è una lista di linee che compongono un singolo canale
|
||||
(da #EXTINF fino all'URL, incluse le righe intermedie).
|
||||
Parse the lines of an M3U playlist and group them into channel entries.
|
||||
Each entry is a list of lines that make up a single channel
|
||||
(from #EXTINF to the URL, including intermediate lines).
|
||||
"""
|
||||
entries = []
|
||||
current_entry = []
|
||||
for line in lines:
|
||||
stripped_line = line.strip()
|
||||
if stripped_line.startswith('#EXTINF:'):
|
||||
if current_entry: # In caso di #EXTINF senza URL precedente
|
||||
logger.warning(f"Found a new #EXTINF tag before a URL was found for the previous entry. Discarding: {current_entry}")
|
||||
if stripped_line.startswith("#EXTINF:"):
|
||||
if current_entry: # In case of #EXTINF without a preceding URL
|
||||
logger.warning(
|
||||
f"Found a new #EXTINF tag before a URL was found for the previous entry. Discarding: {current_entry}"
|
||||
)
|
||||
current_entry = [line]
|
||||
elif current_entry:
|
||||
current_entry.append(line)
|
||||
if stripped_line and not stripped_line.startswith('#'):
|
||||
if stripped_line and not stripped_line.startswith("#"):
|
||||
entries.append(current_entry)
|
||||
current_entry = []
|
||||
return entries
|
||||
@@ -226,48 +241,52 @@ async def async_generate_combined_playlist(playlist_definitions: list[str], base
|
||||
playlist_url_str = definition
|
||||
should_sort = False
|
||||
|
||||
if definition.startswith('sort:'):
|
||||
if definition.startswith("sort:"):
|
||||
should_sort = True
|
||||
definition = definition[len('sort:'):]
|
||||
definition = definition[len("sort:") :]
|
||||
|
||||
if definition.startswith('no_proxy:'): # Può essere combinato con sort:
|
||||
if definition.startswith("no_proxy:"): # Can be combined with sort:
|
||||
should_proxy = False
|
||||
playlist_url_str = definition[len('no_proxy:'):]
|
||||
playlist_url_str = definition[len("no_proxy:") :]
|
||||
else:
|
||||
playlist_url_str = definition
|
||||
|
||||
download_tasks.append({
|
||||
"url": playlist_url_str,
|
||||
"proxy": should_proxy,
|
||||
"sort": should_sort
|
||||
})
|
||||
download_tasks.append({"url": playlist_url_str, "proxy": should_proxy, "sort": should_sort})
|
||||
|
||||
# Download all playlists in parallel
|
||||
results = await asyncio.gather(
|
||||
*[async_download_m3u_playlist(task["url"]) for task in download_tasks], return_exceptions=True
|
||||
)
|
||||
|
||||
# Scarica tutte le playlist in parallelo
|
||||
results = await asyncio.gather(*[async_download_m3u_playlist(task["url"]) for task in download_tasks], return_exceptions=True)
|
||||
|
||||
# Raggruppa le playlist da ordinare e quelle da non ordinare
|
||||
sorted_playlist_lines = []
|
||||
channel_entries_to_sort = []
|
||||
unsorted_playlists_data = []
|
||||
|
||||
|
||||
for idx, result in enumerate(results):
|
||||
task_info = download_tasks[idx]
|
||||
if isinstance(result, Exception):
|
||||
# Aggiungi errore come playlist non ordinata
|
||||
unsorted_playlists_data.append({'lines': [f"# ERROR processing playlist {task_info['url']}: {str(result)}\n"], 'proxy': False})
|
||||
unsorted_playlists_data.append(
|
||||
{"lines": [f"# ERROR processing playlist {task_info['url']}: {str(result)}\n"], "proxy": False}
|
||||
)
|
||||
continue
|
||||
|
||||
|
||||
if task_info.get("sort", False):
|
||||
sorted_playlist_lines.extend(result)
|
||||
# Se la playlist deve essere ordinata, estraiamo i canali e li mettiamo nel pool globale da ordinare
|
||||
entries = parse_channel_entries(result)
|
||||
for entry_lines in entries:
|
||||
channel_entries_to_sort.append((entry_lines, task_info["proxy"]))
|
||||
else:
|
||||
unsorted_playlists_data.append({'lines': result, 'proxy': task_info['proxy']})
|
||||
unsorted_playlists_data.append({"lines": result, "proxy": task_info["proxy"]})
|
||||
|
||||
# Gestione dell'header #EXTM3U
|
||||
first_playlist_header_handled = False
|
||||
|
||||
def yield_header_once(lines_iter):
|
||||
nonlocal first_playlist_header_handled
|
||||
has_header = False
|
||||
for line in lines_iter:
|
||||
is_extm3u = line.strip().startswith('#EXTM3U')
|
||||
is_extm3u = line.strip().startswith("#EXTM3U")
|
||||
if is_extm3u:
|
||||
has_header = True
|
||||
if not first_playlist_header_handled:
|
||||
@@ -276,52 +295,40 @@ async def async_generate_combined_playlist(playlist_definitions: list[str], base
|
||||
else:
|
||||
yield line
|
||||
if has_header and not first_playlist_header_handled:
|
||||
first_playlist_header_handled = True
|
||||
first_playlist_header_handled = True
|
||||
|
||||
# 1. Processa e ordina le playlist marcate con 'sort'
|
||||
if sorted_playlist_lines:
|
||||
# Estrai le entry dei canali
|
||||
# Modifica: Estrai le entry e mantieni l'informazione sul proxy
|
||||
channel_entries_with_proxy_info = []
|
||||
for idx, result in enumerate(results):
|
||||
task_info = download_tasks[idx]
|
||||
if task_info.get("sort") and isinstance(result, list):
|
||||
entries = parse_channel_entries(result) # result è la lista di linee della playlist
|
||||
for entry_lines in entries:
|
||||
# L'opzione proxy si applica a tutto il blocco del canale
|
||||
channel_entries_with_proxy_info.append((entry_lines, task_info["proxy"]))
|
||||
if channel_entries_to_sort:
|
||||
# Sort all entries from ALL sorted playlists together by channel name (from #EXTINF)
|
||||
# The first line of each entry is always #EXTINF
|
||||
channel_entries_to_sort.sort(key=lambda x: x[0][0].split(",")[-1].strip().lower())
|
||||
|
||||
# Ordina le entry in base al nome del canale (da #EXTINF)
|
||||
# La prima riga di ogni entry è sempre #EXTINF
|
||||
channel_entries_with_proxy_info.sort(key=lambda x: x[0][0].split(',')[-1].strip())
|
||||
|
||||
# Gestisci l'header una sola volta per il blocco ordinato
|
||||
# Handle the header only once for the sorted block
|
||||
if not first_playlist_header_handled:
|
||||
yield "#EXTM3U\n"
|
||||
first_playlist_header_handled = True
|
||||
|
||||
# Applica la riscrittura dei link in modo selettivo
|
||||
for entry_lines, should_proxy in channel_entries_with_proxy_info:
|
||||
# L'URL è l'ultima riga dell'entry
|
||||
|
||||
# Apply link rewriting selectively
|
||||
for entry_lines, should_proxy in channel_entries_to_sort:
|
||||
# The URL is the last line of the entry
|
||||
url = entry_lines[-1]
|
||||
# Yield tutte le righe prima dell'URL
|
||||
for line in entry_lines[:-1]:
|
||||
yield line
|
||||
|
||||
|
||||
if should_proxy:
|
||||
# Usa un iteratore fittizio per processare una sola linea
|
||||
rewritten_url_iter = rewrite_m3u_links_streaming(iter([url]), base_url, api_password)
|
||||
yield next(rewritten_url_iter, url) # Prende l'URL riscritto, con fallback all'originale
|
||||
yield next(rewritten_url_iter, url) # Prende l'URL riscritto, con fallback all'originale
|
||||
else:
|
||||
yield url # Lascia l'URL invariato
|
||||
|
||||
yield url # Lascia l'URL invariato
|
||||
|
||||
# 2. Accoda le playlist non ordinate
|
||||
for playlist_data in unsorted_playlists_data:
|
||||
lines_iterator = iter(playlist_data['lines'])
|
||||
if playlist_data['proxy']:
|
||||
lines_iterator = iter(playlist_data["lines"])
|
||||
if playlist_data["proxy"]:
|
||||
lines_iterator = rewrite_m3u_links_streaming(lines_iterator, base_url, api_password)
|
||||
|
||||
|
||||
for line in yield_header_once(lines_iterator):
|
||||
yield line
|
||||
|
||||
@@ -334,7 +341,7 @@ async def proxy_handler(
|
||||
):
|
||||
"""
|
||||
Endpoint per il proxy delle playlist M3U con supporto MFP.
|
||||
|
||||
|
||||
Formato query string: playlist1&url1;playlist2&url2
|
||||
Esempio: https://mfp.com:pass123&http://provider.com/playlist.m3u
|
||||
"""
|
||||
@@ -346,21 +353,21 @@ async def proxy_handler(
|
||||
raise HTTPException(status_code=400, detail="Query string cannot be empty")
|
||||
|
||||
# Validate that we have at least one valid definition
|
||||
playlist_definitions = [def_.strip() for def_ in d.split(';') if def_.strip()]
|
||||
playlist_definitions = [def_.strip() for def_ in d.split(";") if def_.strip()]
|
||||
if not playlist_definitions:
|
||||
raise HTTPException(status_code=400, detail="No valid playlist definitions found")
|
||||
|
||||
|
||||
# Costruisci base_url con lo schema corretto
|
||||
original_scheme = get_original_scheme(request)
|
||||
base_url = f"{original_scheme}://{request.url.netloc}"
|
||||
|
||||
|
||||
# Estrai base_url dalla prima definizione se presente
|
||||
if playlist_definitions and '&' in playlist_definitions[0]:
|
||||
parts = playlist_definitions[0].split('&', 1)
|
||||
if ':' in parts[0] and not parts[0].startswith('http'):
|
||||
if playlist_definitions and "&" in playlist_definitions[0]:
|
||||
parts = playlist_definitions[0].split("&", 1)
|
||||
if ":" in parts[0] and not parts[0].startswith("http"):
|
||||
# Estrai base_url dalla prima parte se contiene password
|
||||
base_url_part = parts[0].rsplit(':', 1)[0]
|
||||
if base_url_part.startswith('http'):
|
||||
base_url_part = parts[0].rsplit(":", 1)[0]
|
||||
if base_url_part.startswith("http"):
|
||||
base_url = base_url_part
|
||||
|
||||
async def generate_response():
|
||||
@@ -369,13 +376,10 @@ async def proxy_handler(
|
||||
|
||||
return StreamingResponse(
|
||||
generate_response(),
|
||||
media_type='application/vnd.apple.mpegurl',
|
||||
headers={
|
||||
'Content-Disposition': 'attachment; filename="playlist.m3u"',
|
||||
'Access-Control-Allow-Origin': '*'
|
||||
}
|
||||
media_type="application/vnd.apple.mpegurl",
|
||||
headers={"Content-Disposition": 'attachment; filename="playlist.m3u"', "Access-Control-Allow-Origin": "*"},
|
||||
)
|
||||
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"General error in playlist handler: {str(e)}")
|
||||
raise HTTPException(status_code=500, detail=f"Error: {str(e)}") from e
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
975
mediaflow_proxy/routes/telegram.py
Normal file
975
mediaflow_proxy/routes/telegram.py
Normal file
@@ -0,0 +1,975 @@
|
||||
"""
|
||||
Telegram MTProto proxy routes.
|
||||
|
||||
Provides endpoints for streaming Telegram media:
|
||||
- /proxy/telegram/stream - Stream media from t.me links or file_id (&transcode=true for fMP4 audio transcode)
|
||||
- /proxy/telegram/info - Get media metadata
|
||||
- /proxy/telegram/status - Check session status
|
||||
- /proxy/telegram/session/* - Session string generation
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import re
|
||||
import secrets
|
||||
from typing import Annotated, Optional
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Query, Request, Response
|
||||
from pydantic import BaseModel
|
||||
|
||||
from telethon import TelegramClient
|
||||
from telethon.sessions import StringSession
|
||||
|
||||
from mediaflow_proxy.configs import settings
|
||||
from mediaflow_proxy.remuxer.media_source import TelegramMediaSource
|
||||
from mediaflow_proxy.remuxer.transcode_handler import (
|
||||
handle_transcode,
|
||||
handle_transcode_hls_init,
|
||||
handle_transcode_hls_playlist,
|
||||
handle_transcode_hls_segment,
|
||||
)
|
||||
from mediaflow_proxy.utils.http_utils import (
|
||||
EnhancedStreamingResponse,
|
||||
ProxyRequestHeaders,
|
||||
apply_header_manipulation,
|
||||
get_proxy_headers,
|
||||
)
|
||||
from mediaflow_proxy.utils.telegram import (
|
||||
TelegramMediaRef,
|
||||
parse_telegram_url,
|
||||
telegram_manager,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
telegram_router = APIRouter()
|
||||
|
||||
|
||||
def get_content_type(mime_type: str, file_name: Optional[str] = None) -> str:
|
||||
"""Determine content type from mime type or filename."""
|
||||
if mime_type:
|
||||
return mime_type
|
||||
|
||||
if file_name:
|
||||
ext = file_name.rsplit(".", 1)[-1].lower() if "." in file_name else ""
|
||||
mime_map = {
|
||||
"mp4": "video/mp4",
|
||||
"mkv": "video/x-matroska",
|
||||
"avi": "video/x-msvideo",
|
||||
"webm": "video/webm",
|
||||
"mov": "video/quicktime",
|
||||
"mp3": "audio/mpeg",
|
||||
"m4a": "audio/mp4",
|
||||
"flac": "audio/flac",
|
||||
"ogg": "audio/ogg",
|
||||
"jpg": "image/jpeg",
|
||||
"jpeg": "image/jpeg",
|
||||
"png": "image/png",
|
||||
"gif": "image/gif",
|
||||
"webp": "image/webp",
|
||||
}
|
||||
return mime_map.get(ext, "application/octet-stream")
|
||||
|
||||
return "application/octet-stream"
|
||||
|
||||
|
||||
def parse_range_header(range_header: Optional[str], file_size: int) -> tuple[int, int]:
|
||||
"""
|
||||
Parse HTTP Range header.
|
||||
|
||||
Args:
|
||||
range_header: The Range header value (e.g., "bytes=0-999")
|
||||
file_size: Total file size
|
||||
|
||||
Returns:
|
||||
Tuple of (start, end) byte positions
|
||||
"""
|
||||
if not range_header:
|
||||
return 0, file_size - 1
|
||||
|
||||
# Parse "bytes=start-end" format
|
||||
match = re.match(r"bytes=(\d*)-(\d*)", range_header)
|
||||
if not match:
|
||||
return 0, file_size - 1
|
||||
|
||||
start_str, end_str = match.groups()
|
||||
|
||||
if start_str and end_str:
|
||||
start = int(start_str)
|
||||
end = min(int(end_str), file_size - 1)
|
||||
elif start_str:
|
||||
start = int(start_str)
|
||||
end = file_size - 1
|
||||
elif end_str:
|
||||
# Suffix range: last N bytes
|
||||
suffix_length = int(end_str)
|
||||
start = max(0, file_size - suffix_length)
|
||||
end = file_size - 1
|
||||
else:
|
||||
start = 0
|
||||
end = file_size - 1
|
||||
|
||||
# Validate start <= end (handle malformed ranges like "bytes=999-0")
|
||||
if start > end:
|
||||
return 0, file_size - 1
|
||||
|
||||
return start, end
|
||||
|
||||
|
||||
@telegram_router.head("/telegram/stream")
|
||||
@telegram_router.get("/telegram/stream")
|
||||
@telegram_router.head("/telegram/stream/{filename:path}")
|
||||
@telegram_router.get("/telegram/stream/{filename:path}")
|
||||
async def telegram_stream(
|
||||
request: Request,
|
||||
proxy_headers: Annotated[ProxyRequestHeaders, Depends(get_proxy_headers)],
|
||||
d: Optional[str] = Query(None, description="t.me link or Telegram URL"),
|
||||
url: Optional[str] = Query(None, description="Alias for 'd' parameter"),
|
||||
chat_id: Optional[str] = Query(None, description="Chat/Channel ID (use with message_id)"),
|
||||
message_id: Optional[int] = Query(None, description="Message ID (use with chat_id)"),
|
||||
file_id: Optional[str] = Query(None, description="Bot API file_id (requires file_size parameter)"),
|
||||
file_size: Optional[int] = Query(None, description="File size in bytes (required for file_id streaming)"),
|
||||
transcode: bool = Query(False, description="Transcode to browser-compatible fMP4 (EAC3/AC3->AAC)"),
|
||||
start: Optional[float] = Query(None, description="Seek start time in seconds (used with transcode=true)"),
|
||||
filename: Optional[str] = None,
|
||||
):
|
||||
"""
|
||||
Stream Telegram media with range request support and parallel downloads.
|
||||
|
||||
Supports:
|
||||
- t.me links: https://t.me/channel/123, https://t.me/c/123456789/456
|
||||
- chat_id + message_id: Direct reference by IDs (e.g., chat_id=-100123456&message_id=789)
|
||||
- file_id + file_size: Direct streaming by Bot API file_id (requires file_size)
|
||||
|
||||
When transcode=true, the media is remuxed to fragmented MP4 with
|
||||
browser-compatible codecs. Audio is transcoded to AAC. Video is passed
|
||||
through when the source codec is already browser-compatible (H.264);
|
||||
otherwise it is re-encoded to H.264. Seeking is supported via standard
|
||||
HTTP Range requests (byte offsets are converted to time positions using
|
||||
an estimated fMP4 size). The 'start' query parameter can also be used
|
||||
for explicit time-based seeking.
|
||||
|
||||
Args:
|
||||
request: The incoming HTTP request
|
||||
proxy_headers: Headers for proxy requests
|
||||
d: t.me link or Telegram URL
|
||||
url: Alias for 'd' parameter
|
||||
chat_id: Chat/Channel ID (numeric or username)
|
||||
message_id: Message ID within the chat
|
||||
file_id: Bot API file_id (requires file_size parameter)
|
||||
file_size: File size in bytes (required for file_id streaming)
|
||||
transcode: Transcode to browser-compatible format (EAC3/AC3->AAC)
|
||||
filename: Optional filename for Content-Disposition
|
||||
|
||||
Returns:
|
||||
Streaming response with media content, or redirect to HLS manifest when transcoding
|
||||
"""
|
||||
if not settings.enable_telegram:
|
||||
raise HTTPException(status_code=503, detail="Telegram proxy support is disabled")
|
||||
|
||||
# Get the URL from either parameter
|
||||
telegram_url = d or url
|
||||
|
||||
# Determine which input method was used
|
||||
if not telegram_url and not file_id and not (chat_id and message_id):
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail="Provide either 'd' (t.me URL), 'chat_id' + 'message_id', or 'file_id' + 'file_size' parameters",
|
||||
)
|
||||
|
||||
try:
|
||||
# Parse the reference based on input type
|
||||
if telegram_url:
|
||||
ref = parse_telegram_url(telegram_url)
|
||||
elif chat_id and message_id:
|
||||
# Direct chat_id + message_id
|
||||
# Try to parse chat_id as int, otherwise treat as username
|
||||
try:
|
||||
parsed_chat_id: int | str = int(chat_id)
|
||||
except ValueError:
|
||||
parsed_chat_id = chat_id # Username
|
||||
ref = TelegramMediaRef(chat_id=parsed_chat_id, message_id=message_id)
|
||||
else:
|
||||
# file_id mode
|
||||
if not file_size:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail="file_size parameter is required when using file_id. "
|
||||
"The file_id doesn't contain size information needed for range requests.",
|
||||
)
|
||||
ref = TelegramMediaRef(file_id=file_id)
|
||||
|
||||
# Get media info (pass file_size for file_id mode)
|
||||
media_info = await telegram_manager.get_media_info(ref, file_size=file_size)
|
||||
actual_file_size = media_info.file_size
|
||||
mime_type = media_info.mime_type
|
||||
media_filename = filename or media_info.file_name
|
||||
|
||||
# For file_id mode, validate access before starting stream
|
||||
# This catches FileReferenceExpiredError early, before headers are sent
|
||||
if ref.file_id and not ref.message_id:
|
||||
await telegram_manager.validate_file_access(ref, file_size=file_size)
|
||||
|
||||
# Handle transcode mode: stream as fMP4 with transcoded audio
|
||||
if transcode:
|
||||
if not settings.enable_transcode:
|
||||
raise HTTPException(status_code=503, detail="Transcoding support is disabled")
|
||||
return await _handle_transcode(
|
||||
request,
|
||||
ref,
|
||||
actual_file_size,
|
||||
start_time=start,
|
||||
file_name=media_filename or "",
|
||||
)
|
||||
|
||||
# Parse range header
|
||||
range_header = request.headers.get("range")
|
||||
start, end = parse_range_header(range_header, actual_file_size)
|
||||
content_length = end - start + 1
|
||||
|
||||
# Handle HEAD requests
|
||||
if request.method == "HEAD":
|
||||
headers = {
|
||||
"content-type": get_content_type(mime_type, media_filename),
|
||||
"content-length": str(actual_file_size),
|
||||
"accept-ranges": "bytes",
|
||||
"access-control-allow-origin": "*",
|
||||
}
|
||||
if media_filename:
|
||||
headers["content-disposition"] = f'inline; filename="{media_filename}"'
|
||||
return Response(headers=headers)
|
||||
|
||||
# Build response headers
|
||||
is_range_request = range_header is not None
|
||||
status_code = 206 if is_range_request else 200
|
||||
|
||||
base_headers = {
|
||||
"content-type": get_content_type(mime_type, media_filename),
|
||||
"content-length": str(content_length),
|
||||
"accept-ranges": "bytes",
|
||||
"access-control-allow-origin": "*",
|
||||
}
|
||||
|
||||
if is_range_request:
|
||||
base_headers["content-range"] = f"bytes {start}-{end}/{actual_file_size}"
|
||||
|
||||
if media_filename:
|
||||
base_headers["content-disposition"] = f'inline; filename="{media_filename}"'
|
||||
|
||||
response_headers = apply_header_manipulation(base_headers, proxy_headers)
|
||||
|
||||
# Stream the content (pass file_size for file_id mode)
|
||||
async def stream_content():
|
||||
try:
|
||||
async for chunk in telegram_manager.stream_media(
|
||||
ref, offset=start, limit=content_length, file_size=actual_file_size
|
||||
):
|
||||
yield chunk
|
||||
except asyncio.CancelledError:
|
||||
# Client disconnected (e.g., seeking in video player) - this is normal
|
||||
logger.debug("[telegram_stream] Stream cancelled by client")
|
||||
except GeneratorExit:
|
||||
# Generator closed - this is normal during cleanup
|
||||
logger.debug("[telegram_stream] Stream generator closed")
|
||||
except Exception as e:
|
||||
error_name = type(e).__name__
|
||||
# Handle errors that occur mid-stream (after headers sent)
|
||||
if error_name == "FileReferenceExpiredError":
|
||||
logger.error(
|
||||
"[telegram_stream] File reference expired mid-stream. "
|
||||
"This file_id belongs to a different session or the reference is stale."
|
||||
)
|
||||
# Don't re-raise - just end the stream to avoid protocol errors
|
||||
return
|
||||
elif error_name in ("ChannelPrivateError", "ChatAdminRequiredError", "UserNotParticipantError"):
|
||||
logger.error(f"[telegram_stream] Access denied mid-stream: {error_name}")
|
||||
return
|
||||
else:
|
||||
logger.error(f"[telegram_stream] Error streaming: {e}")
|
||||
# For unknown errors, also don't re-raise to avoid protocol errors
|
||||
return
|
||||
|
||||
return EnhancedStreamingResponse(
|
||||
stream_content(),
|
||||
status_code=status_code,
|
||||
headers=response_headers,
|
||||
media_type=get_content_type(mime_type, media_filename),
|
||||
)
|
||||
|
||||
except ValueError as e:
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
except Exception as e:
|
||||
# Handle specific Telegram errors
|
||||
error_name = type(e).__name__
|
||||
|
||||
if error_name == "FloodWaitError":
|
||||
wait_seconds = getattr(e, "seconds", 60)
|
||||
logger.warning(f"[telegram_stream] Flood wait: {wait_seconds}s")
|
||||
raise HTTPException(
|
||||
status_code=429,
|
||||
detail=f"Rate limited by Telegram. Please wait {wait_seconds} seconds.",
|
||||
headers={"Retry-After": str(wait_seconds)},
|
||||
)
|
||||
elif error_name == "ChannelPrivateError":
|
||||
raise HTTPException(
|
||||
status_code=403,
|
||||
detail="Cannot access private channel. The session user is not a member of this channel/group.",
|
||||
)
|
||||
elif error_name == "ChatAdminRequiredError":
|
||||
raise HTTPException(
|
||||
status_code=403,
|
||||
detail="Admin privileges required to access this chat.",
|
||||
)
|
||||
elif error_name == "UserNotParticipantError":
|
||||
raise HTTPException(
|
||||
status_code=403,
|
||||
detail="The session user is not a participant of this chat.",
|
||||
)
|
||||
elif error_name == "MessageIdInvalidError":
|
||||
raise HTTPException(status_code=404, detail="Message not found in the specified chat.")
|
||||
elif error_name == "AuthKeyError":
|
||||
raise HTTPException(
|
||||
status_code=401, detail="Telegram session is invalid. Please regenerate the session string."
|
||||
)
|
||||
elif error_name == "FileReferenceExpiredError":
|
||||
raise HTTPException(
|
||||
status_code=410,
|
||||
detail="File reference expired or inaccessible. "
|
||||
"This file_id belongs to a different bot/user session. "
|
||||
"Use chat_id + message_id instead, or ensure the session has access to this file.",
|
||||
)
|
||||
elif error_name == "UserBannedInChannelError":
|
||||
raise HTTPException(
|
||||
status_code=403,
|
||||
detail="The session user is banned from this channel.",
|
||||
)
|
||||
elif error_name == "ChannelInvalidError":
|
||||
raise HTTPException(
|
||||
status_code=404,
|
||||
detail="Invalid channel. The channel may not exist or the ID is incorrect.",
|
||||
)
|
||||
elif error_name == "PeerIdInvalidError":
|
||||
raise HTTPException(
|
||||
status_code=404,
|
||||
detail="Invalid chat ID. The chat/channel/user ID is incorrect or inaccessible.",
|
||||
)
|
||||
|
||||
logger.exception(f"[telegram_stream] Unexpected error: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Internal error: {error_name}")
|
||||
|
||||
|
||||
async def _handle_transcode(
|
||||
request: Request,
|
||||
ref: TelegramMediaRef,
|
||||
file_size: int,
|
||||
start_time: float | None = None,
|
||||
file_name: str = "",
|
||||
) -> Response:
|
||||
"""
|
||||
Handle transcode mode: delegate to the shared transcode handler.
|
||||
|
||||
Wraps the Telegram media reference in a TelegramMediaSource and
|
||||
passes it to the source-agnostic transcode handler which handles
|
||||
cue probing, seeking, and pipeline selection.
|
||||
"""
|
||||
source = TelegramMediaSource(ref, file_size, file_name=file_name)
|
||||
return await handle_transcode(request, source, start_time=start_time)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# HLS transcode endpoints for Telegram sources
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
async def _resolve_telegram_source(
|
||||
d: str | None = None,
|
||||
url: str | None = None,
|
||||
chat_id: str | None = None,
|
||||
message_id: int | None = None,
|
||||
file_id: str | None = None,
|
||||
file_size: int | None = None,
|
||||
filename: str | None = None,
|
||||
*,
|
||||
use_single_client: bool = False,
|
||||
) -> TelegramMediaSource:
|
||||
"""
|
||||
Resolve input parameters to a ``TelegramMediaSource``.
|
||||
|
||||
Args:
|
||||
use_single_client: When ``True``, the returned source will use
|
||||
Telethon's built-in single-connection downloader instead of
|
||||
the parallel ``ParallelTransferrer``. Should be ``True``
|
||||
for HLS requests (playlist, init, segments) where each
|
||||
request fetches a small byte range and spinning up multiple
|
||||
DC connections per request is wasteful.
|
||||
"""
|
||||
if not settings.enable_telegram:
|
||||
from fastapi import HTTPException
|
||||
|
||||
raise HTTPException(status_code=503, detail="Telegram proxy support is disabled")
|
||||
|
||||
telegram_url = d or url
|
||||
|
||||
if not telegram_url and not file_id and not (chat_id and message_id):
|
||||
from fastapi import HTTPException
|
||||
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail="Provide either 'd' (t.me URL), 'chat_id' + 'message_id', or 'file_id' + 'file_size'",
|
||||
)
|
||||
|
||||
if telegram_url:
|
||||
ref = parse_telegram_url(telegram_url)
|
||||
elif chat_id and message_id:
|
||||
try:
|
||||
parsed_chat_id: int | str = int(chat_id)
|
||||
except ValueError:
|
||||
parsed_chat_id = chat_id
|
||||
ref = TelegramMediaRef(chat_id=parsed_chat_id, message_id=message_id)
|
||||
else:
|
||||
if not file_size:
|
||||
from fastapi import HTTPException
|
||||
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail="file_size is required when using file_id",
|
||||
)
|
||||
ref = TelegramMediaRef(file_id=file_id)
|
||||
|
||||
media_info = await telegram_manager.get_media_info(ref, file_size=file_size)
|
||||
actual_file_size = media_info.file_size
|
||||
media_filename = filename or media_info.file_name
|
||||
|
||||
return TelegramMediaSource(
|
||||
ref,
|
||||
actual_file_size,
|
||||
file_name=media_filename or "",
|
||||
use_single_client=use_single_client,
|
||||
)
|
||||
|
||||
|
||||
@telegram_router.head("/telegram/transcode/playlist.m3u8")
|
||||
@telegram_router.get("/telegram/transcode/playlist.m3u8")
|
||||
async def telegram_transcode_hls_playlist(
|
||||
request: Request,
|
||||
d: Optional[str] = Query(None, description="t.me link or Telegram URL"),
|
||||
url: Optional[str] = Query(None, description="Alias for 'd'"),
|
||||
chat_id: Optional[str] = Query(None, description="Chat/Channel ID"),
|
||||
message_id: Optional[int] = Query(None, description="Message ID"),
|
||||
file_id: Optional[str] = Query(None, description="Bot API file_id"),
|
||||
file_size: Optional[int] = Query(None, description="File size in bytes"),
|
||||
filename: Optional[str] = Query(None, description="Optional filename"),
|
||||
):
|
||||
"""Generate an HLS VOD M3U8 playlist for a Telegram media file."""
|
||||
if not settings.enable_transcode:
|
||||
raise HTTPException(status_code=503, detail="Transcoding support is disabled")
|
||||
source = await _resolve_telegram_source(
|
||||
d,
|
||||
url,
|
||||
chat_id,
|
||||
message_id,
|
||||
file_id,
|
||||
file_size,
|
||||
filename,
|
||||
use_single_client=True,
|
||||
)
|
||||
|
||||
# Build sub-request params using the *resolved* file_id + file_size so
|
||||
# that init/segment requests skip the Telegram API call for get_message.
|
||||
base_params = _build_telegram_hls_resolved_params(request, source)
|
||||
init_url = f"/proxy/telegram/transcode/init.mp4?{base_params}"
|
||||
segment_url_template = (
|
||||
f"/proxy/telegram/transcode/segment.m4s?{base_params}&seg={{seg}}&start_ms={{start_ms}}&end_ms={{end_ms}}"
|
||||
)
|
||||
|
||||
return await handle_transcode_hls_playlist(
|
||||
request,
|
||||
source,
|
||||
init_url=init_url,
|
||||
segment_url_template=segment_url_template,
|
||||
)
|
||||
|
||||
|
||||
@telegram_router.head("/telegram/transcode/init.mp4")
|
||||
@telegram_router.get("/telegram/transcode/init.mp4")
|
||||
async def telegram_transcode_hls_init(
|
||||
request: Request,
|
||||
d: Optional[str] = Query(None, description="t.me link or Telegram URL"),
|
||||
url: Optional[str] = Query(None, description="Alias for 'd'"),
|
||||
chat_id: Optional[str] = Query(None, description="Chat/Channel ID"),
|
||||
message_id: Optional[int] = Query(None, description="Message ID"),
|
||||
file_id: Optional[str] = Query(None, description="Bot API file_id"),
|
||||
file_size: Optional[int] = Query(None, description="File size in bytes"),
|
||||
filename: Optional[str] = Query(None, description="Optional filename"),
|
||||
):
|
||||
"""Serve the fMP4 init segment for a Telegram media file."""
|
||||
if not settings.enable_transcode:
|
||||
raise HTTPException(status_code=503, detail="Transcoding support is disabled")
|
||||
source = await _resolve_telegram_source(
|
||||
d,
|
||||
url,
|
||||
chat_id,
|
||||
message_id,
|
||||
file_id,
|
||||
file_size,
|
||||
filename,
|
||||
use_single_client=True,
|
||||
)
|
||||
return await handle_transcode_hls_init(request, source)
|
||||
|
||||
|
||||
@telegram_router.get("/telegram/transcode/segment.m4s")
|
||||
async def telegram_transcode_hls_segment(
|
||||
request: Request,
|
||||
start_ms: float = Query(..., description="Segment start time in milliseconds"),
|
||||
end_ms: float = Query(..., description="Segment end time in milliseconds"),
|
||||
seg: int | None = Query(None, description="Segment number (informational, for logging)"),
|
||||
d: Optional[str] = Query(None, description="t.me link or Telegram URL"),
|
||||
url: Optional[str] = Query(None, description="Alias for 'd'"),
|
||||
chat_id: Optional[str] = Query(None, description="Chat/Channel ID"),
|
||||
message_id: Optional[int] = Query(None, description="Message ID"),
|
||||
file_id: Optional[str] = Query(None, description="Bot API file_id"),
|
||||
file_size: Optional[int] = Query(None, description="File size in bytes"),
|
||||
filename: Optional[str] = Query(None, description="Optional filename"),
|
||||
):
|
||||
"""Serve a single HLS fMP4 media segment for a Telegram media file."""
|
||||
if not settings.enable_transcode:
|
||||
raise HTTPException(status_code=503, detail="Transcoding support is disabled")
|
||||
source = await _resolve_telegram_source(
|
||||
d,
|
||||
url,
|
||||
chat_id,
|
||||
message_id,
|
||||
file_id,
|
||||
file_size,
|
||||
filename,
|
||||
use_single_client=True,
|
||||
)
|
||||
return await handle_transcode_hls_segment(
|
||||
request, source, start_time_ms=start_ms, end_time_ms=end_ms, segment_number=seg
|
||||
)
|
||||
|
||||
|
||||
def _build_telegram_hls_params(request: Request) -> str:
|
||||
"""Build query string for Telegram HLS sub-requests, preserving all input params."""
|
||||
from urllib.parse import quote
|
||||
|
||||
params = []
|
||||
original = request.query_params
|
||||
# Copy all original params except segment-specific ones (added per-segment)
|
||||
_seg_keys = {"seg", "start_ms", "end_ms"}
|
||||
for key in original:
|
||||
if key not in _seg_keys:
|
||||
params.append(f"{key}={quote(original[key], safe='')}")
|
||||
return "&".join(params)
|
||||
|
||||
|
||||
def _build_telegram_hls_resolved_params(
|
||||
request: Request,
|
||||
source: "TelegramMediaSource",
|
||||
) -> str:
|
||||
"""
|
||||
Build query string for HLS sub-request URLs using the *resolved* source.
|
||||
|
||||
Unlike ``_build_telegram_hls_params`` which blindly copies the original
|
||||
query params, this version replaces chat_id/message_id/d/url with the
|
||||
resolved file reference so that init and segment requests can skip the
|
||||
expensive ``get_message()`` Telegram API call.
|
||||
|
||||
The original query params are used as a fallback for any extra parameters
|
||||
(api_password, filename, etc.).
|
||||
"""
|
||||
from urllib.parse import quote
|
||||
|
||||
ref = source._ref
|
||||
params: dict[str, str] = {}
|
||||
|
||||
# Carry over non-identifying params from the original request
|
||||
# (api_password, filename, etc.)
|
||||
_skip_keys = {"d", "url", "chat_id", "message_id", "file_id", "file_size", "seg", "start_ms", "end_ms"}
|
||||
for key in request.query_params:
|
||||
if key not in _skip_keys:
|
||||
params[key] = request.query_params[key]
|
||||
|
||||
# Use the resolved reference -- prefer chat_id + message_id (most reliable
|
||||
# for streaming), but also include file_size from the resolved source.
|
||||
if ref.chat_id is not None and ref.message_id is not None:
|
||||
params["chat_id"] = str(ref.chat_id)
|
||||
params["message_id"] = str(ref.message_id)
|
||||
elif ref.file_id:
|
||||
params["file_id"] = ref.file_id
|
||||
# Always include file_size -- it prevents unnecessary lookups
|
||||
params["file_size"] = str(source.file_size)
|
||||
|
||||
return "&".join(f"{k}={quote(v, safe='')}" for k, v in params.items())
|
||||
|
||||
|
||||
@telegram_router.get("/telegram/info")
|
||||
async def telegram_info(
|
||||
d: Optional[str] = Query(None, description="t.me link or Telegram URL"),
|
||||
url: Optional[str] = Query(None, description="Alias for 'd' parameter"),
|
||||
chat_id: Optional[str] = Query(None, description="Chat/Channel ID (use with message_id)"),
|
||||
message_id: Optional[int] = Query(None, description="Message ID (use with chat_id)"),
|
||||
file_id: Optional[str] = Query(None, description="Bot API file_id"),
|
||||
file_size: Optional[int] = Query(None, description="File size in bytes (optional for file_id)"),
|
||||
):
|
||||
"""
|
||||
Get metadata about a Telegram media file.
|
||||
|
||||
Args:
|
||||
d: t.me link or Telegram URL
|
||||
url: Alias for 'd' parameter
|
||||
chat_id: Chat/Channel ID (numeric or username)
|
||||
message_id: Message ID within the chat
|
||||
file_id: Bot API file_id
|
||||
file_size: File size in bytes (optional, will be 0 if not provided for file_id)
|
||||
|
||||
Returns:
|
||||
JSON with media information (size, mime_type, filename, dimensions, duration)
|
||||
"""
|
||||
if not settings.enable_telegram:
|
||||
raise HTTPException(status_code=503, detail="Telegram proxy support is disabled")
|
||||
|
||||
telegram_url = d or url
|
||||
|
||||
if not telegram_url and not file_id and not (chat_id and message_id):
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail="Provide either 'd' (t.me URL), 'chat_id' + 'message_id', or 'file_id' parameter",
|
||||
)
|
||||
|
||||
try:
|
||||
if telegram_url:
|
||||
ref = parse_telegram_url(telegram_url)
|
||||
elif chat_id and message_id:
|
||||
try:
|
||||
parsed_chat_id: int | str = int(chat_id)
|
||||
except ValueError:
|
||||
parsed_chat_id = chat_id
|
||||
ref = TelegramMediaRef(chat_id=parsed_chat_id, message_id=message_id)
|
||||
else:
|
||||
ref = TelegramMediaRef(file_id=file_id)
|
||||
|
||||
media_info = await telegram_manager.get_media_info(ref, file_size=file_size)
|
||||
|
||||
return {
|
||||
"file_id": media_info.file_id,
|
||||
"file_size": media_info.file_size,
|
||||
"mime_type": media_info.mime_type,
|
||||
"file_name": media_info.file_name,
|
||||
"duration": media_info.duration,
|
||||
"width": media_info.width,
|
||||
"height": media_info.height,
|
||||
"dc_id": media_info.dc_id,
|
||||
}
|
||||
|
||||
except ValueError as e:
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
except Exception as e:
|
||||
error_name = type(e).__name__
|
||||
if error_name == "ChannelPrivateError":
|
||||
raise HTTPException(
|
||||
status_code=403,
|
||||
detail="Cannot access private channel. The session user is not a member.",
|
||||
)
|
||||
elif error_name == "MessageIdInvalidError":
|
||||
raise HTTPException(status_code=404, detail="Message not found in the specified chat.")
|
||||
elif error_name == "FileReferenceExpiredError":
|
||||
raise HTTPException(
|
||||
status_code=410,
|
||||
detail="File reference expired or inaccessible. This file_id belongs to a different session.",
|
||||
)
|
||||
elif error_name == "PeerIdInvalidError":
|
||||
raise HTTPException(
|
||||
status_code=404,
|
||||
detail="Invalid chat ID. The chat/channel/user ID is incorrect or inaccessible.",
|
||||
)
|
||||
logger.exception(f"[telegram_info] Error: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Internal error: {error_name}")
|
||||
|
||||
|
||||
@telegram_router.get("/telegram/status")
|
||||
async def telegram_status():
|
||||
"""
|
||||
Get Telegram session status.
|
||||
|
||||
Returns:
|
||||
JSON with session status information
|
||||
"""
|
||||
if not settings.enable_telegram:
|
||||
return {
|
||||
"enabled": False,
|
||||
"status": "disabled",
|
||||
"message": "Telegram proxy support is disabled in configuration",
|
||||
}
|
||||
|
||||
# Check if credentials are configured
|
||||
if not settings.telegram_api_id or not settings.telegram_api_hash:
|
||||
return {
|
||||
"enabled": True,
|
||||
"status": "not_configured",
|
||||
"message": "Telegram API credentials not configured (telegram_api_id, telegram_api_hash)",
|
||||
}
|
||||
|
||||
if not settings.telegram_session_string:
|
||||
return {
|
||||
"enabled": True,
|
||||
"status": "no_session",
|
||||
"message": "Session string not configured. Generate one using the web UI.",
|
||||
}
|
||||
|
||||
# Check if client is connected
|
||||
if telegram_manager.is_initialized:
|
||||
return {
|
||||
"enabled": True,
|
||||
"status": "connected",
|
||||
"message": "Telegram client is connected and ready",
|
||||
"max_connections": settings.telegram_max_connections,
|
||||
}
|
||||
|
||||
# Don't trigger connection - just report ready status
|
||||
# Connection will be established on first actual request
|
||||
return {
|
||||
"enabled": True,
|
||||
"status": "ready",
|
||||
"message": "Telegram client is configured and ready. Will connect on first request.",
|
||||
"max_connections": settings.telegram_max_connections,
|
||||
}
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Session String Generation Endpoints
|
||||
# =============================================================================
|
||||
|
||||
# In-memory storage for pending session generation (simple approach for single-instance)
|
||||
# Maps session_id -> { client, api_id, api_hash, phone_code_hash, step }
|
||||
_pending_sessions: dict = {}
|
||||
|
||||
|
||||
class SessionStartRequest(BaseModel):
|
||||
"""Request to start session generation."""
|
||||
|
||||
api_id: int
|
||||
api_hash: str
|
||||
auth_type: str # "phone" or "bot"
|
||||
phone: Optional[str] = None
|
||||
bot_token: Optional[str] = None
|
||||
|
||||
|
||||
class SessionCodeRequest(BaseModel):
|
||||
"""Request to submit verification code."""
|
||||
|
||||
session_id: str
|
||||
code: str
|
||||
|
||||
|
||||
class Session2FARequest(BaseModel):
|
||||
"""Request to submit 2FA password."""
|
||||
|
||||
session_id: str
|
||||
password: str
|
||||
|
||||
|
||||
@telegram_router.post("/telegram/session/start")
|
||||
async def session_start(request: SessionStartRequest):
|
||||
"""
|
||||
Start the session generation process.
|
||||
|
||||
For phone auth: sends verification code to user's Telegram
|
||||
For bot auth: validates the bot token immediately
|
||||
|
||||
Returns:
|
||||
session_id for subsequent requests, or session_string if bot auth succeeds
|
||||
"""
|
||||
session_id = secrets.token_urlsafe(16)
|
||||
|
||||
try:
|
||||
client = TelegramClient(StringSession(), request.api_id, request.api_hash)
|
||||
await client.connect()
|
||||
|
||||
if request.auth_type == "bot":
|
||||
# Bot authentication - complete immediately
|
||||
if not request.bot_token:
|
||||
await client.disconnect()
|
||||
raise HTTPException(status_code=400, detail="Bot token is required for bot authentication")
|
||||
|
||||
try:
|
||||
await client.sign_in(bot_token=request.bot_token)
|
||||
session_string = client.session.save()
|
||||
await client.disconnect()
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"step": "complete",
|
||||
"session_string": session_string,
|
||||
"api_id": request.api_id,
|
||||
"api_hash": request.api_hash,
|
||||
}
|
||||
except Exception as e:
|
||||
await client.disconnect()
|
||||
raise HTTPException(status_code=400, detail=f"Bot authentication failed: {str(e)}")
|
||||
|
||||
else:
|
||||
# Phone authentication - send code
|
||||
phone = request.phone.strip() if request.phone else None
|
||||
if not phone:
|
||||
await client.disconnect()
|
||||
raise HTTPException(status_code=400, detail="Phone number is required for phone authentication")
|
||||
|
||||
logger.info(f"[session_start] Sending code to phone: {phone[:4]}***")
|
||||
|
||||
try:
|
||||
result = await client.send_code_request(phone)
|
||||
|
||||
# Store pending session
|
||||
_pending_sessions[session_id] = {
|
||||
"client": client,
|
||||
"api_id": request.api_id,
|
||||
"api_hash": request.api_hash,
|
||||
"phone": phone,
|
||||
"phone_code_hash": result.phone_code_hash,
|
||||
"step": "code_sent",
|
||||
}
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"session_id": session_id,
|
||||
"step": "code_sent",
|
||||
"message": "Verification code sent to your Telegram app",
|
||||
}
|
||||
except Exception as e:
|
||||
await client.disconnect()
|
||||
error_msg = str(e)
|
||||
if "PHONE_NUMBER_INVALID" in error_msg:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail="Invalid phone number format. Use international format (e.g., +1234567890)",
|
||||
)
|
||||
elif "PHONE_NUMBER_BANNED" in error_msg:
|
||||
raise HTTPException(status_code=400, detail="This phone number is banned from Telegram")
|
||||
elif "FLOOD" in error_msg.upper():
|
||||
raise HTTPException(status_code=429, detail="Too many attempts. Please wait before trying again.")
|
||||
raise HTTPException(status_code=400, detail=f"Failed to send code: {error_msg}")
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.exception(f"[session_start] Error: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Failed to start session: {type(e).__name__}: {str(e)}")
|
||||
|
||||
|
||||
@telegram_router.post("/telegram/session/verify")
|
||||
async def session_verify(request: SessionCodeRequest):
|
||||
"""
|
||||
Verify the code sent to user's Telegram.
|
||||
|
||||
Returns:
|
||||
session_string if successful, or indicates 2FA is required
|
||||
"""
|
||||
session_data = _pending_sessions.get(request.session_id)
|
||||
if not session_data:
|
||||
raise HTTPException(status_code=404, detail="Session not found or expired. Please start again.")
|
||||
|
||||
client = session_data["client"]
|
||||
phone = session_data["phone"]
|
||||
|
||||
try:
|
||||
await client.sign_in(phone, request.code, phone_code_hash=session_data["phone_code_hash"])
|
||||
|
||||
# Success - get session string
|
||||
session_string = client.session.save()
|
||||
await client.disconnect()
|
||||
del _pending_sessions[request.session_id]
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"step": "complete",
|
||||
"session_string": session_string,
|
||||
"api_id": session_data["api_id"],
|
||||
"api_hash": session_data["api_hash"],
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
error_msg = str(e)
|
||||
|
||||
# Check for 2FA requirement
|
||||
if (
|
||||
"Two-step verification" in error_msg
|
||||
or "password" in error_msg.lower()
|
||||
or "SessionPasswordNeededError" in type(e).__name__
|
||||
):
|
||||
session_data["step"] = "2fa_required"
|
||||
return {
|
||||
"success": True,
|
||||
"session_id": request.session_id,
|
||||
"step": "2fa_required",
|
||||
"message": "Two-factor authentication is enabled. Please enter your 2FA password.",
|
||||
}
|
||||
|
||||
# Check for invalid code
|
||||
if "PHONE_CODE_INVALID" in error_msg or "PHONE_CODE_EXPIRED" in error_msg:
|
||||
raise HTTPException(status_code=400, detail="Invalid or expired verification code. Please try again.")
|
||||
|
||||
# Other error - cleanup
|
||||
await client.disconnect()
|
||||
del _pending_sessions[request.session_id]
|
||||
raise HTTPException(status_code=400, detail=f"Verification failed: {error_msg}")
|
||||
|
||||
|
||||
@telegram_router.post("/telegram/session/2fa")
|
||||
async def session_2fa(request: Session2FARequest):
|
||||
"""
|
||||
Complete 2FA authentication.
|
||||
|
||||
Returns:
|
||||
session_string on success
|
||||
"""
|
||||
session_data = _pending_sessions.get(request.session_id)
|
||||
if not session_data:
|
||||
raise HTTPException(status_code=404, detail="Session not found or expired. Please start again.")
|
||||
|
||||
if session_data.get("step") != "2fa_required":
|
||||
raise HTTPException(status_code=400, detail="2FA not required for this session")
|
||||
|
||||
client = session_data["client"]
|
||||
|
||||
try:
|
||||
await client.sign_in(password=request.password)
|
||||
|
||||
# Success - get session string
|
||||
session_string = client.session.save()
|
||||
await client.disconnect()
|
||||
del _pending_sessions[request.session_id]
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"step": "complete",
|
||||
"session_string": session_string,
|
||||
"api_id": session_data["api_id"],
|
||||
"api_hash": session_data["api_hash"],
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
error_msg = str(e)
|
||||
|
||||
if "PASSWORD_HASH_INVALID" in error_msg:
|
||||
raise HTTPException(status_code=400, detail="Incorrect 2FA password")
|
||||
|
||||
# Other error - cleanup
|
||||
await client.disconnect()
|
||||
del _pending_sessions[request.session_id]
|
||||
raise HTTPException(status_code=400, detail=f"2FA verification failed: {error_msg}")
|
||||
|
||||
|
||||
@telegram_router.post("/telegram/session/cancel")
|
||||
async def session_cancel(session_id: str = Query(..., description="Session ID to cancel")):
|
||||
"""
|
||||
Cancel a pending session generation.
|
||||
"""
|
||||
session_data = _pending_sessions.pop(session_id, None)
|
||||
if session_data:
|
||||
try:
|
||||
await session_data["client"].disconnect()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return {"success": True, "message": "Session cancelled"}
|
||||
1146
mediaflow_proxy/routes/xtream.py
Normal file
1146
mediaflow_proxy/routes/xtream.py
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user