mirror of
https://github.com/UrloMythus/UnHided.git
synced 2026-04-11 03:40:54 +00:00
updated to lastest version
This commit is contained in:
@@ -29,7 +29,7 @@ class BaseExtractor(ABC):
|
||||
"""Make HTTP request with error handling."""
|
||||
try:
|
||||
async with create_httpx_client() as client:
|
||||
request_headers = self.base_headers
|
||||
request_headers = self.base_headers.copy()
|
||||
request_headers.update(headers or {})
|
||||
response = await client.request(
|
||||
method,
|
||||
@@ -40,9 +40,9 @@ class BaseExtractor(ABC):
|
||||
response.raise_for_status()
|
||||
return response
|
||||
except httpx.HTTPError as e:
|
||||
raise ExtractorError(f"HTTP request failed: {str(e)}")
|
||||
raise ExtractorError(f"HTTP request failed for URL {url}: {str(e)}")
|
||||
except Exception as e:
|
||||
raise ExtractorError(f"Request failed: {str(e)}")
|
||||
raise ExtractorError(f"Request failed for URL {url}: {str(e)}")
|
||||
|
||||
@abstractmethod
|
||||
async def extract(self, url: str, **kwargs) -> Dict[str, Any]:
|
||||
|
||||
@@ -1,373 +1,501 @@
|
||||
import re
|
||||
from typing import Dict, Any, Optional
|
||||
from urllib.parse import urlparse, quote
|
||||
|
||||
from mediaflow_proxy.extractors.base import BaseExtractor, ExtractorError
|
||||
|
||||
|
||||
class DLHDExtractor(BaseExtractor):
|
||||
"""DLHD (DaddyLive) URL extractor for M3U8 streams."""
|
||||
|
||||
def __init__(self, request_headers: dict):
|
||||
super().__init__(request_headers)
|
||||
# Default to HLS proxy endpoint
|
||||
self.mediaflow_endpoint = "hls_manifest_proxy"
|
||||
|
||||
async def extract(self, url: str, **kwargs) -> Dict[str, Any]:
|
||||
"""Extract DLHD stream URL and required headers.
|
||||
|
||||
Args:
|
||||
url: The DaddyLive channel URL (required)
|
||||
|
||||
Keyword Args:
|
||||
player_url: Direct player URL (optional)
|
||||
stream_url: The stream URL (optional)
|
||||
auth_url_base: Base URL for auth requests (optional)
|
||||
|
||||
Returns:
|
||||
Dict containing stream URL and required headers
|
||||
"""
|
||||
try:
|
||||
# Channel URL is required and serves as the referer
|
||||
channel_url = url
|
||||
channel_origin = self._get_origin(channel_url) # Channel page origin
|
||||
|
||||
# Check for direct parameters
|
||||
player_url_from_arg = kwargs.get("player_url")
|
||||
stream_url_from_arg = kwargs.get("stream_url")
|
||||
auth_url_base_from_arg = kwargs.get("auth_url_base")
|
||||
|
||||
current_player_url_for_processing: str
|
||||
|
||||
# If player URL not provided, extract it from channel page
|
||||
if not player_url_from_arg:
|
||||
# Get the channel page to extract the player iframe URL
|
||||
channel_headers = {
|
||||
"referer": channel_origin + "/",
|
||||
"origin": channel_origin,
|
||||
"user-agent": self.base_headers["user-agent"],
|
||||
}
|
||||
|
||||
channel_response = await self._make_request(channel_url, headers=channel_headers)
|
||||
extracted_iframe_url = self._extract_player_url(channel_response.text)
|
||||
|
||||
if not extracted_iframe_url:
|
||||
raise ExtractorError("Could not extract player URL from channel page")
|
||||
current_player_url_for_processing = extracted_iframe_url
|
||||
else:
|
||||
current_player_url_for_processing = player_url_from_arg
|
||||
|
||||
# Attempt 1: _handle_vecloud with current_player_url_for_processing
|
||||
# The referer for _handle_vecloud is the origin of the channel page (channel_origin)
|
||||
# or the origin of the player itself if it is a /stream/ URL.
|
||||
try:
|
||||
referer_for_vecloud = channel_origin + "/"
|
||||
if re.search(r"/stream/([a-zA-Z0-9-]+)", current_player_url_for_processing):
|
||||
referer_for_vecloud = self._get_origin(current_player_url_for_processing) + "/"
|
||||
return await self._handle_vecloud(current_player_url_for_processing, referer_for_vecloud)
|
||||
except Exception:
|
||||
pass # Fail, Continue
|
||||
|
||||
# Attempt 2: If _handle_vecloud fail and the URL is not /stream/, try _handle_playnow
|
||||
# and then _handle_vecloud again with the URL resulting from playnow.
|
||||
if not re.search(r"/stream/([a-zA-Z0-9-]+)", current_player_url_for_processing):
|
||||
try:
|
||||
playnow_derived_player_url = await self._handle_playnow(current_player_url_for_processing, channel_origin + "/")
|
||||
if re.search(r"/stream/([a-zA-Z0-9-]+)", playnow_derived_player_url):
|
||||
try:
|
||||
referer_for_vecloud_after_playnow = self._get_origin(playnow_derived_player_url) + "/"
|
||||
return await self._handle_vecloud(playnow_derived_player_url, referer_for_vecloud_after_playnow)
|
||||
except Exception:
|
||||
pass
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# If all previous attempts have failed, proceed with standard authentication.
|
||||
player_url_for_auth = current_player_url_for_processing
|
||||
player_origin_for_auth = self._get_origin(player_url_for_auth)
|
||||
|
||||
# Get player page to extract authentication information
|
||||
player_headers = {
|
||||
"referer": player_origin_for_auth + "/",
|
||||
"origin": player_origin_for_auth,
|
||||
"user-agent": self.base_headers["user-agent"],
|
||||
}
|
||||
|
||||
player_response = await self._make_request(player_url_for_auth, headers=player_headers)
|
||||
player_content = player_response.text
|
||||
|
||||
# Extract authentication details from script tag
|
||||
auth_data = self._extract_auth_data(player_content)
|
||||
if not auth_data:
|
||||
raise ExtractorError("Failed to extract authentication data from player")
|
||||
|
||||
# Extract auth URL base if not provided
|
||||
final_auth_url_base = auth_url_base_from_arg
|
||||
if not final_auth_url_base:
|
||||
final_auth_url_base = self._extract_auth_url_base(player_content)
|
||||
|
||||
# If still no auth URL base, try to derive from stream URL or player URL
|
||||
if not final_auth_url_base:
|
||||
if stream_url_from_arg:
|
||||
final_auth_url_base = self._get_origin(stream_url_from_arg)
|
||||
else:
|
||||
# Try to extract from player URL structure
|
||||
player_domain_for_auth_derive = self._get_origin(player_url_for_auth)
|
||||
# Attempt to construct a standard auth domain
|
||||
final_auth_url_base = self._derive_auth_url_base(player_domain_for_auth_derive)
|
||||
|
||||
if not final_auth_url_base:
|
||||
raise ExtractorError("Could not determine auth URL base")
|
||||
|
||||
# Construct auth URL
|
||||
auth_url = (
|
||||
f"{final_auth_url_base}/auth.php?channel_id={auth_data['channel_key']}"
|
||||
f"&ts={auth_data['auth_ts']}&rnd={auth_data['auth_rnd']}"
|
||||
f"&sig={quote(auth_data['auth_sig'])}"
|
||||
)
|
||||
|
||||
# Make auth request
|
||||
auth_req_headers = {
|
||||
"referer": player_origin_for_auth + "/",
|
||||
"origin": player_origin_for_auth,
|
||||
"user-agent": self.base_headers["user-agent"],
|
||||
}
|
||||
|
||||
auth_response = await self._make_request(auth_url, headers=auth_req_headers)
|
||||
|
||||
# Check if authentication succeeded
|
||||
if auth_response.json().get("status") != "ok":
|
||||
raise ExtractorError("Authentication failed")
|
||||
|
||||
# If no stream URL provided, look up the server and generate the stream URL
|
||||
final_stream_url = stream_url_from_arg
|
||||
if not final_stream_url:
|
||||
final_stream_url = await self._lookup_server(
|
||||
lookup_url_base=player_origin_for_auth,
|
||||
auth_url_base=final_auth_url_base,
|
||||
auth_data=auth_data,
|
||||
headers=auth_req_headers,
|
||||
)
|
||||
|
||||
# Set up the final stream headers
|
||||
stream_headers = {
|
||||
"referer": player_url_for_auth,
|
||||
"origin": player_origin_for_auth,
|
||||
"user-agent": self.base_headers["user-agent"],
|
||||
}
|
||||
|
||||
# Return the stream URL with headers
|
||||
return {
|
||||
"destination_url": final_stream_url,
|
||||
"request_headers": stream_headers,
|
||||
"mediaflow_endpoint": self.mediaflow_endpoint,
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
raise ExtractorError(f"Extraction failed: {str(e)}")
|
||||
|
||||
async def _handle_vecloud(self, player_url: str, channel_referer: str) -> Dict[str, Any]:
|
||||
"""Handle vecloud URLs with their specific API.
|
||||
|
||||
Args:
|
||||
player_url: The vecloud player URL
|
||||
channel_referer: The referer of the channel page
|
||||
Returns:
|
||||
Dict containing stream URL and required headers
|
||||
"""
|
||||
try:
|
||||
# Extract stream ID from vecloud URL
|
||||
stream_id_match = re.search(r"/stream/([a-zA-Z0-9-]+)", player_url)
|
||||
if not stream_id_match:
|
||||
raise ExtractorError("Could not extract stream ID from vecloud URL")
|
||||
|
||||
stream_id = stream_id_match.group(1)
|
||||
|
||||
response = await self._make_request(
|
||||
player_url, headers={"referer": channel_referer, "user-agent": self.base_headers["user-agent"]}
|
||||
)
|
||||
player_url = str(response.url)
|
||||
|
||||
# Construct API URL
|
||||
player_parsed = urlparse(player_url)
|
||||
player_domain = player_parsed.netloc
|
||||
player_origin = f"{player_parsed.scheme}://{player_parsed.netloc}"
|
||||
api_url = f"{player_origin}/api/source/{stream_id}?type=live"
|
||||
|
||||
# Set up headers for API request
|
||||
api_headers = {
|
||||
"referer": player_url,
|
||||
"origin": player_origin,
|
||||
"user-agent": self.base_headers["user-agent"],
|
||||
"content-type": "application/json",
|
||||
}
|
||||
|
||||
api_data = {"r": channel_referer, "d": player_domain}
|
||||
|
||||
# Make API request
|
||||
api_response = await self._make_request(api_url, method="POST", headers=api_headers, json=api_data)
|
||||
api_data = api_response.json()
|
||||
|
||||
# Check if request was successful
|
||||
if not api_data.get("success"):
|
||||
raise ExtractorError("Vecloud API request failed")
|
||||
|
||||
# Extract stream URL from response
|
||||
stream_url = api_data.get("player", {}).get("source_file")
|
||||
|
||||
if not stream_url:
|
||||
raise ExtractorError("Could not find stream URL in vecloud response")
|
||||
|
||||
# Set up stream headers
|
||||
stream_headers = {
|
||||
"referer": player_origin + "/",
|
||||
"origin": player_origin,
|
||||
"user-agent": self.base_headers["user-agent"],
|
||||
}
|
||||
|
||||
# Return the stream URL with headers
|
||||
return {
|
||||
"destination_url": stream_url,
|
||||
"request_headers": stream_headers,
|
||||
"mediaflow_endpoint": self.mediaflow_endpoint,
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
raise ExtractorError(f"Vecloud extraction failed: {str(e)}")
|
||||
|
||||
async def _handle_playnow(self, player_iframe: str, channel_origin: str) -> str:
|
||||
"""Handle playnow URLs."""
|
||||
# Set up headers for the playnow request
|
||||
playnow_headers = {"referer": channel_origin + "/", "user-agent": self.base_headers["user-agent"]}
|
||||
|
||||
# Make the playnow request
|
||||
playnow_response = await self._make_request(player_iframe, headers=playnow_headers)
|
||||
player_url = self._extract_player_url(playnow_response.text)
|
||||
if not player_url:
|
||||
raise ExtractorError("Could not extract player URL from playnow response")
|
||||
return player_url
|
||||
|
||||
def _extract_player_url(self, html_content: str) -> Optional[str]:
|
||||
"""Extract player iframe URL from channel page HTML."""
|
||||
try:
|
||||
# Look for iframe with allowfullscreen attribute
|
||||
iframe_match = re.search(
|
||||
r'<iframe[^>]*src=["\']([^"\']+)["\'][^>]*allowfullscreen', html_content, re.IGNORECASE
|
||||
)
|
||||
|
||||
if not iframe_match:
|
||||
# Try alternative pattern without requiring allowfullscreen
|
||||
iframe_match = re.search(
|
||||
r'<iframe[^>]*src=["\']([^"\']+(?:premiumtv|daddylivehd|vecloud)[^"\']*)["\']',
|
||||
html_content,
|
||||
re.IGNORECASE,
|
||||
)
|
||||
|
||||
if iframe_match:
|
||||
return iframe_match.group(1).strip()
|
||||
|
||||
return None
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
async def _lookup_server(
|
||||
self, lookup_url_base: str, auth_url_base: str, auth_data: Dict[str, str], headers: Dict[str, str]
|
||||
) -> str:
|
||||
"""Lookup server information and generate stream URL."""
|
||||
try:
|
||||
# Construct server lookup URL
|
||||
server_lookup_url = f"{lookup_url_base}/server_lookup.php?channel_id={quote(auth_data['channel_key'])}"
|
||||
|
||||
# Make server lookup request
|
||||
server_response = await self._make_request(server_lookup_url, headers=headers)
|
||||
|
||||
server_data = server_response.json()
|
||||
server_key = server_data.get("server_key")
|
||||
|
||||
if not server_key:
|
||||
raise ExtractorError("Failed to get server key")
|
||||
|
||||
# Extract domain parts from auth URL for constructing stream URL
|
||||
auth_domain_parts = urlparse(auth_url_base).netloc.split(".")
|
||||
domain_suffix = ".".join(auth_domain_parts[1:]) if len(auth_domain_parts) > 1 else auth_domain_parts[0]
|
||||
|
||||
# Generate the m3u8 URL based on server response pattern
|
||||
if "/" in server_key:
|
||||
# Handle special case like "top1/cdn"
|
||||
parts = server_key.split("/")
|
||||
return f"https://{parts[0]}.{domain_suffix}/{server_key}/{auth_data['channel_key']}/mono.m3u8"
|
||||
else:
|
||||
# Handle normal case
|
||||
return f"https://{server_key}new.{domain_suffix}/{server_key}/{auth_data['channel_key']}/mono.m3u8"
|
||||
|
||||
except Exception as e:
|
||||
raise ExtractorError(f"Server lookup failed: {str(e)}")
|
||||
|
||||
def _extract_auth_data(self, html_content: str) -> Dict[str, str]:
|
||||
"""Extract authentication data from player page."""
|
||||
try:
|
||||
# Extract channel key
|
||||
channel_key_match = re.search(r'var\s+channelKey\s*=\s*["\']([^"\']+)["\']', html_content)
|
||||
# Extract auth timestamp
|
||||
auth_ts_match = re.search(r'var\s+authTs\s*=\s*["\']([^"\']+)["\']', html_content)
|
||||
# Extract auth random value
|
||||
auth_rnd_match = re.search(r'var\s+authRnd\s*=\s*["\']([^"\']+)["\']', html_content)
|
||||
# Extract auth signature
|
||||
auth_sig_match = re.search(r'var\s+authSig\s*=\s*["\']([^"\']+)["\']', html_content)
|
||||
|
||||
if not all([channel_key_match, auth_ts_match, auth_rnd_match, auth_sig_match]):
|
||||
return {}
|
||||
|
||||
return {
|
||||
"channel_key": channel_key_match.group(1),
|
||||
"auth_ts": auth_ts_match.group(1),
|
||||
"auth_rnd": auth_rnd_match.group(1),
|
||||
"auth_sig": auth_sig_match.group(1),
|
||||
}
|
||||
except Exception:
|
||||
return {}
|
||||
|
||||
def _extract_auth_url_base(self, html_content: str) -> Optional[str]:
|
||||
"""Extract auth URL base from player page script content."""
|
||||
try:
|
||||
# Look for auth URL or domain in fetchWithRetry call or similar patterns
|
||||
auth_url_match = re.search(r'fetchWithRetry\([\'"]([^\'"]*/auth\.php)', html_content)
|
||||
|
||||
if auth_url_match:
|
||||
auth_url = auth_url_match.group(1)
|
||||
# Extract base URL up to the auth.php part
|
||||
return auth_url.split("/auth.php")[0]
|
||||
|
||||
# Try finding domain directly
|
||||
domain_match = re.search(r'[\'"]https://([^/\'\"]+)(?:/[^\'\"]*)?/auth\.php', html_content)
|
||||
|
||||
if domain_match:
|
||||
return f"https://{domain_match.group(1)}"
|
||||
|
||||
return None
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
def _get_origin(self, url: str) -> str:
|
||||
"""Extract origin from URL."""
|
||||
parsed = urlparse(url)
|
||||
return f"{parsed.scheme}://{parsed.netloc}"
|
||||
|
||||
def _derive_auth_url_base(self, player_domain: str) -> Optional[str]:
|
||||
"""Attempt to derive auth URL base from player domain."""
|
||||
try:
|
||||
# Typical pattern is to use a subdomain for auth domain
|
||||
parsed = urlparse(player_domain)
|
||||
domain_parts = parsed.netloc.split(".")
|
||||
|
||||
# Get the top-level domain and second-level domain
|
||||
if len(domain_parts) >= 2:
|
||||
base_domain = ".".join(domain_parts[-2:])
|
||||
# Try common subdomains for auth
|
||||
for prefix in ["auth", "api", "cdn"]:
|
||||
potential_auth_domain = f"https://{prefix}.{base_domain}"
|
||||
return potential_auth_domain
|
||||
|
||||
return None
|
||||
except Exception:
|
||||
return None
|
||||
import re
|
||||
import base64
|
||||
import logging
|
||||
from typing import Any, Dict, Optional
|
||||
from urllib.parse import urlparse, quote, urlunparse
|
||||
|
||||
from mediaflow_proxy.extractors.base import BaseExtractor, ExtractorError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DLHDExtractor(BaseExtractor):
|
||||
"""DLHD (DaddyLive) URL extractor for M3U8 streams."""
|
||||
|
||||
def __init__(self, request_headers: dict):
|
||||
super().__init__(request_headers)
|
||||
# Default to HLS proxy endpoint
|
||||
self.mediaflow_endpoint = "hls_manifest_proxy"
|
||||
# Cache for the resolved base URL to avoid repeated network calls
|
||||
self._cached_base_url = None
|
||||
# Store iframe context for newkso.ru requests
|
||||
self._iframe_context = None
|
||||
|
||||
def _get_headers_for_url(self, url: str, base_headers: dict) -> dict:
|
||||
"""Get appropriate headers for the given URL, applying newkso.ru specific headers if needed."""
|
||||
headers = base_headers.copy()
|
||||
|
||||
# Check if URL contains newkso.ru domain
|
||||
parsed_url = urlparse(url)
|
||||
if "newkso.ru" in parsed_url.netloc:
|
||||
# Use iframe URL as referer if available, otherwise use the newkso domain itself
|
||||
if self._iframe_context:
|
||||
iframe_origin = f"https://{urlparse(self._iframe_context).netloc}"
|
||||
newkso_headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/136.0.0.0 Safari/537.36',
|
||||
'Referer': self._iframe_context,
|
||||
'Origin': iframe_origin
|
||||
}
|
||||
logger.info(f"Applied newkso.ru specific headers with iframe context for URL: {url}")
|
||||
logger.debug(f"Headers applied: {newkso_headers}")
|
||||
else:
|
||||
# Fallback to newkso domain itself
|
||||
newkso_origin = f"{parsed_url.scheme}://{parsed_url.netloc}"
|
||||
newkso_headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/136.0.0.0 Safari/537.36',
|
||||
'Referer': newkso_origin,
|
||||
'Origin': newkso_origin
|
||||
}
|
||||
logger.info(f"Applied newkso.ru specific headers (fallback) for URL: {url}")
|
||||
logger.debug(f"Headers applied: {newkso_headers}")
|
||||
|
||||
headers.update(newkso_headers)
|
||||
|
||||
return headers
|
||||
|
||||
async def _make_request(self, url: str, method: str = "GET", headers: dict = None, **kwargs):
|
||||
"""Override _make_request to apply newkso.ru specific headers when needed."""
|
||||
request_headers = headers or {}
|
||||
|
||||
# Apply newkso.ru specific headers if the URL contains newkso.ru
|
||||
final_headers = self._get_headers_for_url(url, request_headers)
|
||||
|
||||
return await super()._make_request(url, method, final_headers, **kwargs)
|
||||
|
||||
async def extract(self, url: str, **kwargs) -> Dict[str, Any]:
|
||||
"""Extract DLHD stream URL and required headers (logica tvproxy adattata async, con fallback su endpoint alternativi)."""
|
||||
from urllib.parse import urlparse, quote_plus
|
||||
|
||||
async def get_daddylive_base_url():
|
||||
if self._cached_base_url:
|
||||
return self._cached_base_url
|
||||
try:
|
||||
resp = await self._make_request("https://daddylive.sx/")
|
||||
# resp.url is the final URL after redirects
|
||||
base_url = str(resp.url)
|
||||
if not base_url.endswith('/'):
|
||||
base_url += '/'
|
||||
self._cached_base_url = base_url
|
||||
return base_url
|
||||
except Exception:
|
||||
# Fallback to default if request fails
|
||||
return "https://daddylive.sx/"
|
||||
|
||||
def extract_channel_id(url):
|
||||
match_premium = re.search(r'/premium(\d+)/mono\.m3u8$', url)
|
||||
if match_premium:
|
||||
return match_premium.group(1)
|
||||
# Handle both normal and URL-encoded patterns
|
||||
match_player = re.search(r'/(?:watch|stream|cast|player)/stream-(\d+)\.php', url)
|
||||
if match_player:
|
||||
return match_player.group(1)
|
||||
# Handle URL-encoded patterns like %2Fstream%2Fstream-123.php or just stream-123.php
|
||||
match_encoded = re.search(r'(?:%2F|/)stream-(\d+)\.php', url, re.IGNORECASE)
|
||||
if match_encoded:
|
||||
return match_encoded.group(1)
|
||||
# Handle direct stream- pattern without path
|
||||
match_direct = re.search(r'stream-(\d+)\.php', url)
|
||||
if match_direct:
|
||||
return match_direct.group(1)
|
||||
return None
|
||||
|
||||
async def try_endpoint(baseurl, endpoint, channel_id):
|
||||
stream_url = f"{baseurl}{endpoint}stream-{channel_id}.php"
|
||||
daddy_origin = urlparse(baseurl).scheme + "://" + urlparse(baseurl).netloc
|
||||
daddylive_headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/136.0.0.0 Safari/537.36',
|
||||
'Referer': baseurl,
|
||||
'Origin': daddy_origin
|
||||
}
|
||||
# 1. Richiesta alla pagina stream/cast/player/watch
|
||||
resp1 = await self._make_request(stream_url, headers=daddylive_headers)
|
||||
# 2. Estrai link Player 2
|
||||
iframes = re.findall(r'<a[^>]*href="([^"]+)"[^>]*>\s*<button[^>]*>\s*Player\s*2\s*</button>', resp1.text)
|
||||
if not iframes:
|
||||
raise ExtractorError("No Player 2 link found")
|
||||
url2 = iframes[0]
|
||||
url2 = baseurl + url2
|
||||
url2 = url2.replace('//cast', '/cast')
|
||||
daddylive_headers['Referer'] = url2
|
||||
daddylive_headers['Origin'] = url2
|
||||
# 3. Richiesta alla pagina Player 2
|
||||
resp2 = await self._make_request(url2, headers=daddylive_headers)
|
||||
# 4. Estrai iframe
|
||||
iframes2 = re.findall(r'iframe src="([^"]*)', resp2.text)
|
||||
if not iframes2:
|
||||
raise ExtractorError("No iframe found in Player 2 page")
|
||||
iframe_url = iframes2[0]
|
||||
# Store iframe context for newkso.ru requests
|
||||
self._iframe_context = iframe_url
|
||||
resp3 = await self._make_request(iframe_url, headers=daddylive_headers)
|
||||
iframe_content = resp3.text
|
||||
# 5. Estrai parametri auth (robusto) - Handle both old and new formats
|
||||
def extract_var_old_format(js, name):
|
||||
# Try multiple patterns for variable extraction (old format)
|
||||
patterns = [
|
||||
rf'var (?:__)?{name}\s*=\s*atob\("([^"]+)"\)',
|
||||
rf'var (?:__)?{name}\s*=\s*atob\(\'([^\']+)\'\)',
|
||||
rf'(?:var\s+)?(?:__)?{name}\s*=\s*atob\s*\(\s*["\']([^"\']+)["\']\s*\)',
|
||||
rf'(?:let|const)\s+(?:__)?{name}\s*=\s*atob\s*\(\s*["\']([^"\']+)["\']\s*\)'
|
||||
]
|
||||
for pattern in patterns:
|
||||
m = re.search(pattern, js)
|
||||
if m:
|
||||
try:
|
||||
return base64.b64decode(m.group(1)).decode('utf-8')
|
||||
except Exception as decode_error:
|
||||
logger.warning(f"Failed to decode base64 for variable {name}: {decode_error}")
|
||||
continue
|
||||
return None
|
||||
|
||||
def extract_bundle_format(js):
|
||||
"""Extract parameters from new BUNDLE format"""
|
||||
try:
|
||||
# Look for BUNDLE variable
|
||||
bundle_patterns = [
|
||||
r'const\s+BUNDLE\s*=\s*["\']([^"\']+)["\']',
|
||||
r'var\s+BUNDLE\s*=\s*["\']([^"\']+)["\']',
|
||||
r'let\s+BUNDLE\s*=\s*["\']([^"\']+)["\']'
|
||||
]
|
||||
|
||||
bundle_data = None
|
||||
for pattern in bundle_patterns:
|
||||
match = re.search(pattern, js)
|
||||
if match:
|
||||
bundle_data = match.group(1)
|
||||
break
|
||||
|
||||
if not bundle_data:
|
||||
return None
|
||||
|
||||
# Decode the bundle (base64 -> JSON -> decode each field)
|
||||
import json
|
||||
bundle_json = base64.b64decode(bundle_data).decode('utf-8')
|
||||
bundle_obj = json.loads(bundle_json)
|
||||
|
||||
# Decode each base64 field
|
||||
decoded_bundle = {}
|
||||
for key, value in bundle_obj.items():
|
||||
try:
|
||||
decoded_bundle[key] = base64.b64decode(value).decode('utf-8')
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to decode bundle field {key}: {e}")
|
||||
decoded_bundle[key] = value
|
||||
|
||||
return decoded_bundle
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to extract bundle format: {e}")
|
||||
return None
|
||||
|
||||
# Try multiple patterns for channel key extraction
|
||||
channel_key = None
|
||||
channel_key_patterns = [
|
||||
r'const\s+CHANNEL_KEY\s*=\s*["\']([^"\']+)["\']',
|
||||
r'var\s+CHANNEL_KEY\s*=\s*["\']([^"\']+)["\']',
|
||||
r'let\s+CHANNEL_KEY\s*=\s*["\']([^"\']+)["\']',
|
||||
r'channelKey\s*=\s*["\']([^"\']+)["\']',
|
||||
r'var\s+channelKey\s*=\s*["\']([^"\']+)["\']',
|
||||
r'(?:let|const)\s+channelKey\s*=\s*["\']([^"\']+)["\']'
|
||||
]
|
||||
for pattern in channel_key_patterns:
|
||||
match = re.search(pattern, iframe_content)
|
||||
if match:
|
||||
channel_key = match.group(1)
|
||||
break
|
||||
|
||||
# Try new bundle format first
|
||||
bundle_data = extract_bundle_format(iframe_content)
|
||||
if bundle_data:
|
||||
logger.info("Using new BUNDLE format for parameter extraction")
|
||||
auth_host = bundle_data.get('b_host')
|
||||
auth_php = bundle_data.get('b_script')
|
||||
auth_ts = bundle_data.get('b_ts')
|
||||
auth_rnd = bundle_data.get('b_rnd')
|
||||
auth_sig = bundle_data.get('b_sig')
|
||||
logger.debug(f"Bundle data extracted: {bundle_data}")
|
||||
else:
|
||||
logger.info("Falling back to old format for parameter extraction")
|
||||
# Fall back to old format
|
||||
auth_ts = extract_var_old_format(iframe_content, 'c')
|
||||
auth_rnd = extract_var_old_format(iframe_content, 'd')
|
||||
auth_sig = extract_var_old_format(iframe_content, 'e')
|
||||
auth_host = extract_var_old_format(iframe_content, 'a')
|
||||
auth_php = extract_var_old_format(iframe_content, 'b')
|
||||
|
||||
# Log what we found for debugging
|
||||
logger.debug(f"Extracted parameters: channel_key={channel_key}, auth_ts={auth_ts}, auth_rnd={auth_rnd}, auth_sig={auth_sig}, auth_host={auth_host}, auth_php={auth_php}")
|
||||
|
||||
# Check which parameters are missing
|
||||
missing_params = []
|
||||
if not channel_key:
|
||||
missing_params.append('channel_key/CHANNEL_KEY')
|
||||
if not auth_ts:
|
||||
missing_params.append('auth_ts (var c / b_ts)')
|
||||
if not auth_rnd:
|
||||
missing_params.append('auth_rnd (var d / b_rnd)')
|
||||
if not auth_sig:
|
||||
missing_params.append('auth_sig (var e / b_sig)')
|
||||
if not auth_host:
|
||||
missing_params.append('auth_host (var a / b_host)')
|
||||
if not auth_php:
|
||||
missing_params.append('auth_php (var b / b_script)')
|
||||
|
||||
if missing_params:
|
||||
logger.error(f"Missing parameters: {', '.join(missing_params)}")
|
||||
# Log a portion of the iframe content for debugging (first 2000 chars)
|
||||
logger.debug(f"Iframe content sample: {iframe_content[:2000]}")
|
||||
raise ExtractorError(f"Error extracting parameters: missing {', '.join(missing_params)}")
|
||||
auth_sig = quote_plus(auth_sig)
|
||||
# 6. Richiesta auth
|
||||
auth_url = f'{auth_host}{auth_php}?channel_id={channel_key}&ts={auth_ts}&rnd={auth_rnd}&sig={auth_sig}'
|
||||
auth_resp = await self._make_request(auth_url, headers=daddylive_headers)
|
||||
# 7. Lookup server - Extract host parameter
|
||||
host = None
|
||||
host_patterns = [
|
||||
r'(?s)m3u8 =.*?:.*?:.*?".*?".*?"([^"]*)', # Original pattern
|
||||
r'm3u8\s*=.*?"([^"]*)"', # Simplified m3u8 pattern
|
||||
r'host["\']?\s*[:=]\s*["\']([^"\']*)', # host: or host= pattern
|
||||
r'["\']([^"\']*\.newkso\.ru[^"\']*)', # Direct newkso.ru pattern
|
||||
r'["\']([^"\']*\/premium\d+[^"\']*)', # premium path pattern
|
||||
r'url.*?["\']([^"\']*newkso[^"\']*)', # URL with newkso
|
||||
]
|
||||
|
||||
for pattern in host_patterns:
|
||||
matches = re.findall(pattern, iframe_content)
|
||||
if matches:
|
||||
host = matches[0]
|
||||
logger.debug(f"Found host with pattern '{pattern}': {host}")
|
||||
break
|
||||
|
||||
if not host:
|
||||
logger.error("Failed to extract host from iframe content")
|
||||
logger.debug(f"Iframe content for host extraction: {iframe_content[:2000]}")
|
||||
# Try to find any newkso.ru related URLs
|
||||
potential_hosts = re.findall(r'["\']([^"\']*newkso[^"\']*)', iframe_content)
|
||||
if potential_hosts:
|
||||
logger.debug(f"Potential host URLs found: {potential_hosts}")
|
||||
raise ExtractorError("Failed to extract host parameter")
|
||||
|
||||
# Extract server lookup URL from fetchWithRetry call (dynamic extraction)
|
||||
server_lookup = None
|
||||
|
||||
# Look for the server_lookup.php pattern in JavaScript
|
||||
if "fetchWithRetry('/server_lookup.php?channel_id='" in iframe_content:
|
||||
server_lookup = '/server_lookup.php?channel_id='
|
||||
logger.debug('Found server lookup URL: /server_lookup.php?channel_id=')
|
||||
elif '/server_lookup.php' in iframe_content:
|
||||
# Try to extract the full path
|
||||
js_lines = iframe_content.split('\n')
|
||||
for js_line in js_lines:
|
||||
if 'server_lookup.php' in js_line and 'fetchWithRetry' in js_line:
|
||||
# Extract the URL from the fetchWithRetry call
|
||||
start = js_line.find("'")
|
||||
if start != -1:
|
||||
end = js_line.find("'", start + 1)
|
||||
if end != -1:
|
||||
potential_url = js_line[start+1:end]
|
||||
if 'server_lookup' in potential_url:
|
||||
server_lookup = potential_url
|
||||
logger.debug(f'Extracted server lookup URL: {server_lookup}')
|
||||
break
|
||||
|
||||
if not server_lookup:
|
||||
logger.error('Failed to extract server lookup URL from iframe content')
|
||||
logger.debug(f'Iframe content sample: {iframe_content[:2000]}')
|
||||
raise ExtractorError('Failed to extract server lookup URL')
|
||||
|
||||
server_lookup_url = f"https://{urlparse(iframe_url).netloc}{server_lookup}{channel_key}"
|
||||
logger.debug(f"Server lookup URL: {server_lookup_url}")
|
||||
|
||||
try:
|
||||
lookup_resp = await self._make_request(server_lookup_url, headers=daddylive_headers)
|
||||
server_data = lookup_resp.json()
|
||||
server_key = server_data.get('server_key')
|
||||
if not server_key:
|
||||
logger.error(f"No server_key in response: {server_data}")
|
||||
raise ExtractorError("Failed to get server key from lookup response")
|
||||
|
||||
logger.info(f"Server lookup successful - Server key: {server_key}")
|
||||
except Exception as lookup_error:
|
||||
logger.error(f"Server lookup request failed: {lookup_error}")
|
||||
raise ExtractorError(f"Server lookup failed: {str(lookup_error)}")
|
||||
|
||||
referer_raw = f'https://{urlparse(iframe_url).netloc}'
|
||||
|
||||
# Extract URL construction logic dynamically from JavaScript
|
||||
# Simple approach: look for newkso.ru URLs and construct based on server_key
|
||||
|
||||
# Check if we have the special case server_key
|
||||
if server_key == 'top1/cdn':
|
||||
clean_m3u8_url = f'https://top1.newkso.ru/top1/cdn/{channel_key}/mono.m3u8'
|
||||
logger.info(f'Using special case URL for server_key \'top1/cdn\': {clean_m3u8_url}')
|
||||
else:
|
||||
clean_m3u8_url = f'https://{server_key}new.newkso.ru/{server_key}/{channel_key}/mono.m3u8'
|
||||
logger.info(f'Using general case URL for server_key \'{server_key}\': {clean_m3u8_url}')
|
||||
|
||||
logger.info(f'Generated stream URL: {clean_m3u8_url}')
|
||||
logger.debug(f'Server key: {server_key}, Channel key: {channel_key}')
|
||||
|
||||
# Check if the final stream URL is on newkso.ru domain
|
||||
if "newkso.ru" in clean_m3u8_url:
|
||||
# For newkso.ru streams, use iframe URL as referer
|
||||
stream_headers = {
|
||||
'User-Agent': daddylive_headers['User-Agent'],
|
||||
'Referer': iframe_url,
|
||||
'Origin': referer_raw
|
||||
}
|
||||
logger.info(f"Applied iframe-specific headers for newkso.ru stream URL: {clean_m3u8_url}")
|
||||
logger.debug(f"Stream headers for newkso.ru: {stream_headers}")
|
||||
else:
|
||||
# For other domains, use the original logic
|
||||
stream_headers = {
|
||||
'User-Agent': daddylive_headers['User-Agent'],
|
||||
'Referer': referer_raw,
|
||||
'Origin': referer_raw
|
||||
}
|
||||
return {
|
||||
"destination_url": clean_m3u8_url,
|
||||
"request_headers": stream_headers,
|
||||
"mediaflow_endpoint": self.mediaflow_endpoint,
|
||||
}
|
||||
|
||||
try:
|
||||
clean_url = url
|
||||
channel_id = extract_channel_id(clean_url)
|
||||
if not channel_id:
|
||||
raise ExtractorError(f"Unable to extract channel ID from {clean_url}")
|
||||
|
||||
baseurl = await get_daddylive_base_url()
|
||||
endpoints = ["stream/", "cast/", "player/", "watch/"]
|
||||
last_exc = None
|
||||
for endpoint in endpoints:
|
||||
try:
|
||||
return await try_endpoint(baseurl, endpoint, channel_id)
|
||||
except Exception as exc:
|
||||
last_exc = exc
|
||||
continue
|
||||
raise ExtractorError(f"Extraction failed: {str(last_exc)}")
|
||||
except Exception as e:
|
||||
raise ExtractorError(f"Extraction failed: {str(e)}")
|
||||
|
||||
async def _lookup_server(
|
||||
self, lookup_url_base: str, auth_url_base: str, auth_data: Dict[str, str], headers: Dict[str, str]
|
||||
) -> str:
|
||||
"""Lookup server information and generate stream URL."""
|
||||
try:
|
||||
# Construct server lookup URL
|
||||
server_lookup_url = f"{lookup_url_base}/server_lookup.php?channel_id={quote(auth_data['channel_key'])}"
|
||||
|
||||
# Make server lookup request
|
||||
server_response = await self._make_request(server_lookup_url, headers=headers)
|
||||
|
||||
server_data = server_response.json()
|
||||
server_key = server_data.get("server_key")
|
||||
|
||||
if not server_key:
|
||||
raise ExtractorError("Failed to get server key")
|
||||
|
||||
# Extract domain parts from auth URL for constructing stream URL
|
||||
auth_domain_parts = urlparse(auth_url_base).netloc.split(".")
|
||||
domain_suffix = ".".join(auth_domain_parts[1:]) if len(auth_domain_parts) > 1 else auth_domain_parts[0]
|
||||
|
||||
# Generate the m3u8 URL based on server response pattern
|
||||
if "/" in server_key:
|
||||
# Handle special case like "top1/cdn"
|
||||
parts = server_key.split("/")
|
||||
return f"https://{parts[0]}.{domain_suffix}/{server_key}/{auth_data['channel_key']}/mono.m3u8"
|
||||
else:
|
||||
# Handle normal case
|
||||
return f"https://{server_key}new.{domain_suffix}/{server_key}/{auth_data['channel_key']}/mono.m3u8"
|
||||
|
||||
except Exception as e:
|
||||
raise ExtractorError(f"Server lookup failed: {str(e)}")
|
||||
|
||||
def _extract_auth_data(self, html_content: str) -> Dict[str, str]:
|
||||
"""Extract authentication data from player page."""
|
||||
try:
|
||||
channel_key_match = re.search(r'var\s+channelKey\s*=\s*["\']([^"\']+)["\']', html_content)
|
||||
if not channel_key_match:
|
||||
return {}
|
||||
channel_key = channel_key_match.group(1)
|
||||
|
||||
# New pattern with atob
|
||||
auth_ts_match = re.search(r'var\s+__c\s*=\s*atob\([\'"]([^\'"]+)[\'"]\)', html_content)
|
||||
auth_rnd_match = re.search(r'var\s+__d\s*=\s*atob\([\'"]([^\'"]+)[\'"]\)', html_content)
|
||||
auth_sig_match = re.search(r'var\s+__e\s*=\s*atob\([\'"]([^\'"]+)[\'"]\)', html_content)
|
||||
|
||||
if auth_ts_match and auth_rnd_match and auth_sig_match:
|
||||
return {
|
||||
"channel_key": channel_key,
|
||||
"auth_ts": base64.b64decode(auth_ts_match.group(1)).decode("utf-8"),
|
||||
"auth_rnd": base64.b64decode(auth_rnd_match.group(1)).decode("utf-8"),
|
||||
"auth_sig": base64.b64decode(auth_sig_match.group(1)).decode("utf-8"),
|
||||
}
|
||||
|
||||
# Original pattern
|
||||
auth_ts_match = re.search(r'var\s+authTs\s*=\s*["\']([^"\']+)["\']', html_content)
|
||||
auth_rnd_match = re.search(r'var\s+authRnd\s*=\s*["\']([^"\']+)["\']', html_content)
|
||||
auth_sig_match = re.search(r'var\s+authSig\s*=\s*["\']([^"\']+)["\']', html_content)
|
||||
|
||||
if auth_ts_match and auth_rnd_match and auth_sig_match:
|
||||
return {
|
||||
"channel_key": channel_key,
|
||||
"auth_ts": auth_ts_match.group(1),
|
||||
"auth_rnd": auth_rnd_match.group(1),
|
||||
"auth_sig": auth_sig_match.group(1),
|
||||
}
|
||||
return {}
|
||||
except Exception:
|
||||
return {}
|
||||
|
||||
def _extract_auth_url_base(self, html_content: str) -> Optional[str]:
|
||||
"""Extract auth URL base from player page script content."""
|
||||
try:
|
||||
# New atob pattern for auth base URL
|
||||
auth_url_base_match = re.search(r'var\s+__a\s*=\s*atob\([\'"]([^\'"]+)[\'"]\)', html_content)
|
||||
if auth_url_base_match:
|
||||
decoded_url = base64.b64decode(auth_url_base_match.group(1)).decode("utf-8")
|
||||
return decoded_url.strip().rstrip("/")
|
||||
|
||||
# Look for auth URL or domain in fetchWithRetry call or similar patterns
|
||||
auth_url_match = re.search(r'fetchWithRetry\([\'"]([^\'"]*/auth\.php)', html_content)
|
||||
|
||||
if auth_url_match:
|
||||
auth_url = auth_url_match.group(1)
|
||||
# Extract base URL up to the auth.php part
|
||||
return auth_url.split("/auth.php")[0]
|
||||
|
||||
# Try finding domain directly
|
||||
domain_match = re.search(r'[\'"]https://([^/\'\"]+)(?:/[^\'\"]*)?/auth\.php', html_content)
|
||||
|
||||
if domain_match:
|
||||
return f"https://{domain_match.group(1)}"
|
||||
|
||||
return None
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
def _get_origin(self, url: str) -> str:
|
||||
"""Extract origin from URL."""
|
||||
parsed = urlparse(url)
|
||||
return f"{parsed.scheme}://{parsed.netloc}"
|
||||
|
||||
def _derive_auth_url_base(self, player_domain: str) -> Optional[str]:
|
||||
"""Attempt to derive auth URL base from player domain."""
|
||||
try:
|
||||
# Typical pattern is to use a subdomain for auth domain
|
||||
parsed = urlparse(player_domain)
|
||||
domain_parts = parsed.netloc.split(".")
|
||||
|
||||
# Get the top-level domain and second-level domain
|
||||
if len(domain_parts) >= 2:
|
||||
base_domain = ".".join(domain_parts[-2:])
|
||||
# Try common subdomains for auth
|
||||
for prefix in ["auth", "api", "cdn"]:
|
||||
potential_auth_domain = f"https://{prefix}.{base_domain}"
|
||||
return potential_auth_domain
|
||||
|
||||
return None
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
@@ -10,6 +10,7 @@ from mediaflow_proxy.extractors.okru import OkruExtractor
|
||||
from mediaflow_proxy.extractors.streamtape import StreamtapeExtractor
|
||||
from mediaflow_proxy.extractors.supervideo import SupervideoExtractor
|
||||
from mediaflow_proxy.extractors.uqload import UqloadExtractor
|
||||
from mediaflow_proxy.extractors.vavoo import VavooExtractor
|
||||
from mediaflow_proxy.extractors.vixcloud import VixCloudExtractor
|
||||
from mediaflow_proxy.extractors.fastream import FastreamExtractor
|
||||
|
||||
@@ -27,6 +28,7 @@ class ExtractorFactory:
|
||||
"Maxstream": MaxstreamExtractor,
|
||||
"LiveTV": LiveTVExtractor,
|
||||
"DLHD": DLHDExtractor,
|
||||
"Vavoo": VavooExtractor,
|
||||
"Fastream": FastreamExtractor
|
||||
}
|
||||
|
||||
|
||||
@@ -13,10 +13,10 @@ class FastreamExtractor(BaseExtractor):
|
||||
self.mediaflow_endpoint = "hls_manifest_proxy"
|
||||
|
||||
async def extract(self, url: str, **kwargs) -> Dict[str, Any]:
|
||||
#Init headers needed for the request.
|
||||
headers = {'Accept': '*/*', 'Connection': 'keep-alive','Accept-Language': 'en-US,en;q=0.5','Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:138.0) Gecko/20100101 Firefox/138.0'}
|
||||
"""Extract Fastream URL."""
|
||||
final_url = await eval_solver(self,url,headers, r'file:"(.*?)"')
|
||||
pattern = r'file:"(.*?)"'
|
||||
|
||||
final_url = await eval_solver(self, url, headers, pattern)
|
||||
|
||||
self.base_headers["referer"] = f'https://{url.replace("https://","").split("/")[0]}/'
|
||||
self.base_headers["origin"] = f'https://{url.replace("https://","").split("/")[0]}'
|
||||
|
||||
@@ -13,8 +13,9 @@ class MixdropExtractor(BaseExtractor):
|
||||
url = url.replace("club", "ps").split("/2")[0]
|
||||
|
||||
headers = {"accept-language": "en-US,en;q=0.5"}
|
||||
pattern = r'MDCore.wurl ?= ?"(.*?)"'
|
||||
|
||||
final_url = f"https:{await eval_solver(self, url, headers, r'MDCore.wurl ?= ?"(.*?)"')}"
|
||||
final_url = f"https:{await eval_solver(self, url, headers, pattern)}"
|
||||
|
||||
self.base_headers["referer"] = url
|
||||
return {
|
||||
|
||||
@@ -14,12 +14,10 @@ class SupervideoExtractor(BaseExtractor):
|
||||
self.mediaflow_endpoint = "hls_manifest_proxy"
|
||||
|
||||
async def extract(self, url: str, **kwargs) -> Dict[str, Any]:
|
||||
#Init headers needed for the request.
|
||||
headers = {'Accept': '*/*', 'Connection': 'keep-alive', 'User-Agent': 'Mozilla/5.0 (Linux; Android 12) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.5060.71 Mobile Safari/537.36', 'user-agent': 'Mozilla/5.0 (Linux; Android 12) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.5060.71 Mobile Safari/537.36'}
|
||||
pattern = r'file:"(.*?)"'
|
||||
|
||||
|
||||
"""Extract Supervideo URL."""
|
||||
final_url = await eval_solver(self,url,headers, r'file:"(.*?)"')
|
||||
final_url = await eval_solver(self, url, headers, pattern)
|
||||
|
||||
self.base_headers["referer"] = url
|
||||
return {
|
||||
|
||||
169
mediaflow_proxy/extractors/vavoo.py
Normal file
169
mediaflow_proxy/extractors/vavoo.py
Normal file
@@ -0,0 +1,169 @@
|
||||
import logging
|
||||
from typing import Any, Dict, Optional
|
||||
from mediaflow_proxy.extractors.base import BaseExtractor, ExtractorError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class VavooExtractor(BaseExtractor):
|
||||
"""Vavoo URL extractor for resolving vavoo.to links (solo httpx, async)."""
|
||||
|
||||
def __init__(self, request_headers: dict):
|
||||
super().__init__(request_headers)
|
||||
self.mediaflow_endpoint = "proxy_stream_endpoint"
|
||||
|
||||
async def get_auth_signature(self) -> Optional[str]:
|
||||
"""Get authentication signature for Vavoo API (async, httpx, pulito)."""
|
||||
headers = {
|
||||
"user-agent": "okhttp/4.11.0",
|
||||
"accept": "application/json",
|
||||
"content-type": "application/json; charset=utf-8",
|
||||
"accept-encoding": "gzip"
|
||||
}
|
||||
import time
|
||||
current_time = int(time.time() * 1000)
|
||||
|
||||
data = {
|
||||
"token": "tosFwQCJMS8qrW_AjLoHPQ41646J5dRNha6ZWHnijoYQQQoADQoXYSo7ki7O5-CsgN4CH0uRk6EEoJ0728ar9scCRQW3ZkbfrPfeCXW2VgopSW2FWDqPOoVYIuVPAOnXCZ5g",
|
||||
"reason": "app-blur",
|
||||
"locale": "de",
|
||||
"theme": "dark",
|
||||
"metadata": {
|
||||
"device": {
|
||||
"type": "Handset",
|
||||
"brand": "google",
|
||||
"model": "Pixel",
|
||||
"name": "sdk_gphone64_arm64",
|
||||
"uniqueId": "d10e5d99ab665233"
|
||||
},
|
||||
"os": {
|
||||
"name": "android",
|
||||
"version": "13",
|
||||
"abis": ["arm64-v8a", "armeabi-v7a", "armeabi"],
|
||||
"host": "android"
|
||||
},
|
||||
"app": {
|
||||
"platform": "android",
|
||||
"version": "3.1.21",
|
||||
"buildId": "289515000",
|
||||
"engine": "hbc85",
|
||||
"signatures": ["6e8a975e3cbf07d5de823a760d4c2547f86c1403105020adee5de67ac510999e"],
|
||||
"installer": "app.revanced.manager.flutter"
|
||||
},
|
||||
"version": {
|
||||
"package": "tv.vavoo.app",
|
||||
"binary": "3.1.21",
|
||||
"js": "3.1.21"
|
||||
}
|
||||
},
|
||||
"appFocusTime": 0,
|
||||
"playerActive": False,
|
||||
"playDuration": 0,
|
||||
"devMode": False,
|
||||
"hasAddon": True,
|
||||
"castConnected": False,
|
||||
"package": "tv.vavoo.app",
|
||||
"version": "3.1.21",
|
||||
"process": "app",
|
||||
"firstAppStart": current_time,
|
||||
"lastAppStart": current_time,
|
||||
"ipLocation": "",
|
||||
"adblockEnabled": True,
|
||||
"proxy": {
|
||||
"supported": ["ss", "openvpn"],
|
||||
"engine": "ss",
|
||||
"ssVersion": 1,
|
||||
"enabled": True,
|
||||
"autoServer": True,
|
||||
"id": "de-fra"
|
||||
},
|
||||
"iap": {
|
||||
"supported": False
|
||||
}
|
||||
}
|
||||
|
||||
try:
|
||||
resp = await self._make_request(
|
||||
"https://www.vavoo.tv/api/app/ping",
|
||||
method="POST",
|
||||
json=data,
|
||||
headers=headers
|
||||
)
|
||||
result = resp.json()
|
||||
addon_sig = result.get("addonSig")
|
||||
if addon_sig:
|
||||
logger.info("Successfully obtained Vavoo authentication signature")
|
||||
return addon_sig
|
||||
else:
|
||||
logger.warning("No addonSig in Vavoo API response")
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.exception(f"Failed to get Vavoo authentication signature: {str(e)}")
|
||||
return None
|
||||
|
||||
async def extract(self, url: str, **kwargs) -> Dict[str, Any]:
|
||||
"""Extract Vavoo stream URL (async, httpx)."""
|
||||
if "vavoo.to" not in url:
|
||||
raise ExtractorError("Not a valid Vavoo URL")
|
||||
|
||||
# Get authentication signature
|
||||
signature = await self.get_auth_signature()
|
||||
if not signature:
|
||||
raise ExtractorError("Failed to get Vavoo authentication signature")
|
||||
|
||||
# Resolve the URL
|
||||
resolved_url = await self._resolve_vavoo_link(url, signature)
|
||||
if not resolved_url:
|
||||
raise ExtractorError("Failed to resolve Vavoo URL")
|
||||
|
||||
# Set up headers for the resolved stream
|
||||
stream_headers = {
|
||||
"user-agent": self.base_headers["user-agent"],
|
||||
"referer": "https://vavoo.to/",
|
||||
}
|
||||
|
||||
return {
|
||||
"destination_url": resolved_url,
|
||||
"request_headers": stream_headers,
|
||||
"mediaflow_endpoint": self.mediaflow_endpoint,
|
||||
}
|
||||
|
||||
async def _resolve_vavoo_link(self, link: str, signature: str) -> Optional[str]:
|
||||
"""Resolve a Vavoo link using the MediaHubMX API (async, httpx)."""
|
||||
headers = {
|
||||
"user-agent": "MediaHubMX/2",
|
||||
"accept": "application/json",
|
||||
"content-type": "application/json; charset=utf-8",
|
||||
"accept-encoding": "gzip",
|
||||
"mediahubmx-signature": signature
|
||||
}
|
||||
data = {
|
||||
"language": "de",
|
||||
"region": "AT",
|
||||
"url": link,
|
||||
"clientVersion": "3.1.21"
|
||||
}
|
||||
try:
|
||||
logger.info(f"Attempting to resolve Vavoo URL: {link}")
|
||||
resp = await self._make_request(
|
||||
"https://vavoo.to/mediahubmx-resolve.json",
|
||||
method="POST",
|
||||
json=data,
|
||||
headers=headers
|
||||
)
|
||||
result = resp.json()
|
||||
logger.info(f"Vavoo API response: {result}")
|
||||
|
||||
if isinstance(result, list) and result and result[0].get("url"):
|
||||
resolved_url = result[0]["url"]
|
||||
logger.info(f"Successfully resolved Vavoo URL to: {resolved_url}")
|
||||
return resolved_url
|
||||
elif isinstance(result, dict) and result.get("url"):
|
||||
resolved_url = result["url"]
|
||||
logger.info(f"Successfully resolved Vavoo URL to: {resolved_url}")
|
||||
return resolved_url
|
||||
else:
|
||||
logger.warning(f"No URL found in Vavoo API response: {result}")
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.exception(f"Vavoo resolution failed for URL {link}: {str(e)}")
|
||||
raise ExtractorError(f"Vavoo resolution failed: {str(e)}") from e
|
||||
Reference in New Issue
Block a user