Version 2.2.1

This commit is contained in:
UrloMythus
2025-09-10 12:02:54 +02:00
parent 8f8c3b195e
commit 29a9c01418
58 changed files with 258 additions and 49 deletions

View File

@@ -148,32 +148,51 @@ class DLHDExtractor(BaseExtractor):
continue
return None
def extract_bundle_format(js):
"""Extract parameters from new BUNDLE format"""
def extract_xjz_format(js):
"""Extract parameters from the new XJZ base64-encoded JSON format."""
try:
# Look for the XJZ variable assignment
xjz_pattern = r'const\s+XJZ\s*=\s*["\']([^"\']+)["\']'
match = re.search(xjz_pattern, js)
if not match:
return None
xjz_b64 = match.group(1)
import json
# Decode the first base64 layer (JSON)
xjz_json = base64.b64decode(xjz_b64).decode('utf-8')
xjz_obj = json.loads(xjz_json)
# Each value is also base64-encoded, decode each
decoded = {}
for k, v in xjz_obj.items():
try:
decoded[k] = base64.b64decode(v).decode('utf-8')
except Exception as e:
logger.warning(f"Failed to decode XJZ field {k}: {e}")
decoded[k] = v
return decoded
except Exception as e:
logger.warning(f"Failed to extract XJZ format: {e}")
return None
def extract_bundle_format(js):
"""Extract parameters from new BUNDLE format (legacy fallback)."""
try:
# Look for BUNDLE variable
bundle_patterns = [
r'const\s+BUNDLE\s*=\s*["\']([^"\']+)["\']',
r'var\s+BUNDLE\s*=\s*["\']([^"\']+)["\']',
r'let\s+BUNDLE\s*=\s*["\']([^"\']+)["\']'
]
bundle_data = None
for pattern in bundle_patterns:
match = re.search(pattern, js)
if match:
bundle_data = match.group(1)
break
if not bundle_data:
return None
# Decode the bundle (base64 -> JSON -> decode each field)
import json
bundle_json = base64.b64decode(bundle_data).decode('utf-8')
bundle_obj = json.loads(bundle_json)
# Decode each base64 field
decoded_bundle = {}
for key, value in bundle_obj.items():
try:
@@ -181,9 +200,7 @@ class DLHDExtractor(BaseExtractor):
except Exception as e:
logger.warning(f"Failed to decode bundle field {key}: {e}")
decoded_bundle[key] = value
return decoded_bundle
except Exception as e:
logger.warning(f"Failed to extract bundle format: {e}")
return None
@@ -204,28 +221,39 @@ class DLHDExtractor(BaseExtractor):
channel_key = match.group(1)
break
# Try new bundle format first
bundle_data = extract_bundle_format(iframe_content)
if bundle_data:
logger.info("Using new BUNDLE format for parameter extraction")
auth_host = bundle_data.get('b_host')
auth_php = bundle_data.get('b_script')
auth_ts = bundle_data.get('b_ts')
auth_rnd = bundle_data.get('b_rnd')
auth_sig = bundle_data.get('b_sig')
logger.debug(f"Bundle data extracted: {bundle_data}")
# Try new XJZ format first
xjz_data = extract_xjz_format(iframe_content)
if xjz_data:
logger.info("Using new XJZ format for parameter extraction")
auth_host = xjz_data.get('b_host')
auth_php = xjz_data.get('b_script')
auth_ts = xjz_data.get('b_ts')
auth_rnd = xjz_data.get('b_rnd')
auth_sig = xjz_data.get('b_sig')
logger.debug(f"XJZ data extracted: {xjz_data}")
else:
logger.info("Falling back to old format for parameter extraction")
# Fall back to old format
auth_ts = extract_var_old_format(iframe_content, 'c')
auth_rnd = extract_var_old_format(iframe_content, 'd')
auth_sig = extract_var_old_format(iframe_content, 'e')
auth_host = extract_var_old_format(iframe_content, 'a')
auth_php = extract_var_old_format(iframe_content, 'b')
# Try bundle format (legacy fallback)
bundle_data = extract_bundle_format(iframe_content)
if bundle_data:
logger.info("Using BUNDLE format for parameter extraction")
auth_host = bundle_data.get('b_host')
auth_php = bundle_data.get('b_script')
auth_ts = bundle_data.get('b_ts')
auth_rnd = bundle_data.get('b_rnd')
auth_sig = bundle_data.get('b_sig')
logger.debug(f"Bundle data extracted: {bundle_data}")
else:
logger.info("Falling back to old format for parameter extraction")
# Fall back to old format
auth_ts = extract_var_old_format(iframe_content, 'c')
auth_rnd = extract_var_old_format(iframe_content, 'd')
auth_sig = extract_var_old_format(iframe_content, 'e')
auth_host = extract_var_old_format(iframe_content, 'a')
auth_php = extract_var_old_format(iframe_content, 'b')
# Log what we found for debugging
logger.debug(f"Extracted parameters: channel_key={channel_key}, auth_ts={auth_ts}, auth_rnd={auth_rnd}, auth_sig={auth_sig}, auth_host={auth_host}, auth_php={auth_php}")
# Check which parameters are missing
missing_params = []
if not channel_key:
@@ -240,7 +268,7 @@ class DLHDExtractor(BaseExtractor):
missing_params.append('auth_host (var a / b_host)')
if not auth_php:
missing_params.append('auth_php (var b / b_script)')
if missing_params:
logger.error(f"Missing parameters: {', '.join(missing_params)}")
# Log a portion of the iframe content for debugging (first 2000 chars)
@@ -248,7 +276,21 @@ class DLHDExtractor(BaseExtractor):
raise ExtractorError(f"Error extracting parameters: missing {', '.join(missing_params)}")
auth_sig = quote_plus(auth_sig)
# 6. Richiesta auth
auth_url = f'{auth_host}{auth_php}?channel_id={channel_key}&ts={auth_ts}&rnd={auth_rnd}&sig={auth_sig}'
# Se il sito fornisce ancora /a.php ma ora serve /auth.php, sostituisci
# Normalize and robustly replace any variant of a.php with /auth.php
if auth_php:
normalized_auth_php = auth_php.strip().lstrip('/')
if normalized_auth_php == 'a.php':
logger.info("Sostituisco qualunque variante di a.php con /auth.php per compatibilità.")
auth_php = '/auth.php'
# Unisci host e script senza doppio slash
if auth_host.endswith('/') and auth_php.startswith('/'):
auth_url = f'{auth_host[:-1]}{auth_php}'
elif not auth_host.endswith('/') and not auth_php.startswith('/'):
auth_url = f'{auth_host}/{auth_php}'
else:
auth_url = f'{auth_host}{auth_php}'
auth_url = f'{auth_url}?channel_id={channel_key}&ts={auth_ts}&rnd={auth_rnd}&sig={auth_sig}'
auth_resp = await self._make_request(auth_url, headers=daddylive_headers)
# 7. Lookup server - Extract host parameter
host = None

View File

@@ -3,6 +3,7 @@ from typing import Dict, Type
from mediaflow_proxy.extractors.base import BaseExtractor, ExtractorError
from mediaflow_proxy.extractors.dlhd import DLHDExtractor
from mediaflow_proxy.extractors.doodstream import DoodStreamExtractor
from mediaflow_proxy.extractors.filelions import FileLionsExtractor
from mediaflow_proxy.extractors.livetv import LiveTVExtractor
from mediaflow_proxy.extractors.maxstream import MaxstreamExtractor
from mediaflow_proxy.extractors.mixdrop import MixdropExtractor
@@ -19,6 +20,7 @@ class ExtractorFactory:
_extractors: Dict[str, Type[BaseExtractor]] = {
"Doodstream": DoodStreamExtractor,
"FileLions": FileLionsExtractor,
"Uqload": UqloadExtractor,
"Mixdrop": MixdropExtractor,
"Streamtape": StreamtapeExtractor,

View File

@@ -14,9 +14,9 @@ class FastreamExtractor(BaseExtractor):
async def extract(self, url: str, **kwargs) -> Dict[str, Any]:
headers = {'Accept': '*/*', 'Connection': 'keep-alive','Accept-Language': 'en-US,en;q=0.5','Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:138.0) Gecko/20100101 Firefox/138.0'}
pattern = r'file:"(.*?)"'
patterns = [r'file:"(.*?)"']
final_url = await eval_solver(self, url, headers, pattern)
final_url = await eval_solver(self, url, headers, patterns)
self.base_headers["referer"] = f'https://{url.replace("https://","").split("/")[0]}/'
self.base_headers["origin"] = f'https://{url.replace("https://","").split("/")[0]}'

View File

@@ -0,0 +1,25 @@
from typing import Dict, Any
from mediaflow_proxy.extractors.base import BaseExtractor
from mediaflow_proxy.utils.packed import eval_solver
class FileLionsExtractor(BaseExtractor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.mediaflow_endpoint = "hls_manifest_proxy"
async def extract(self, url: str, **kwargs) -> Dict[str, Any]:
headers = {}
patterns = [ # See https://github.com/Gujal00/ResolveURL/blob/master/script.module.resolveurl/lib/resolveurl/plugins/filelions.py
r'''sources:\s*\[{file:\s*["'](?P<url>[^"']+)''',
r'''["']hls[24]["']:\s*["'](?P<url>[^"']+)'''
]
final_url = await eval_solver(self, url, headers, patterns)
self.base_headers["referer"] = url
return {
"destination_url": final_url,
"request_headers": self.base_headers,
"mediaflow_endpoint": self.mediaflow_endpoint,
}

View File

@@ -13,9 +13,9 @@ class MixdropExtractor(BaseExtractor):
url = url.replace("club", "ps").split("/2")[0]
headers = {"accept-language": "en-US,en;q=0.5"}
pattern = r'MDCore.wurl ?= ?"(.*?)"'
patterns = [r'MDCore.wurl ?= ?"(.*?)"']
final_url = f"https:{await eval_solver(self, url, headers, pattern)}"
final_url = await eval_solver(self, url, headers, patterns)
self.base_headers["referer"] = url
return {

View File

@@ -22,8 +22,7 @@ class OkruExtractor(BaseExtractor):
data_options = div.get("data-options")
data = json.loads(data_options)
metadata = json.loads(data["flashvars"]["metadata"])
final_url = metadata["hlsMasterPlaylistUrl"]
final_url = metadata.get("hlsMasterPlaylistUrl") or metadata.get("hlsManifestUrl")
self.base_headers["referer"] = url
return {
"destination_url": final_url,

View File

@@ -15,9 +15,9 @@ class SupervideoExtractor(BaseExtractor):
async def extract(self, url: str, **kwargs) -> Dict[str, Any]:
headers = {'Accept': '*/*', 'Connection': 'keep-alive', 'User-Agent': 'Mozilla/5.0 (Linux; Android 12) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.5060.71 Mobile Safari/537.36', 'user-agent': 'Mozilla/5.0 (Linux; Android 12) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.5060.71 Mobile Safari/537.36'}
pattern = r'file:"(.*?)"'
patterns = [r'file:"(.*?)"']
final_url = await eval_solver(self, url, headers, pattern)
final_url = await eval_solver(self, url, headers, patterns)
self.base_headers["referer"] = url
return {