mirror of
https://github.com/UrloMythus/UnHided.git
synced 2026-04-09 02:40:47 +00:00
Add files via upload
This commit is contained in:
@@ -3,12 +3,13 @@ from typing import Dict, Type
|
||||
from mediaflow_proxy.extractors.base import BaseExtractor, ExtractorError
|
||||
from mediaflow_proxy.extractors.doodstream import DoodStreamExtractor
|
||||
from mediaflow_proxy.extractors.livetv import LiveTVExtractor
|
||||
from mediaflow_proxy.extractors.maxstream import MaxstreamExtractor
|
||||
from mediaflow_proxy.extractors.mixdrop import MixdropExtractor
|
||||
from mediaflow_proxy.extractors.uqload import UqloadExtractor
|
||||
from mediaflow_proxy.extractors.okru import OkruExtractor
|
||||
from mediaflow_proxy.extractors.streamtape import StreamtapeExtractor
|
||||
from mediaflow_proxy.extractors.supervideo import SupervideoExtractor
|
||||
|
||||
|
||||
from mediaflow_proxy.extractors.uqload import UqloadExtractor
|
||||
from mediaflow_proxy.extractors.vixcloud import VixCloudExtractor
|
||||
|
||||
|
||||
class ExtractorFactory:
|
||||
@@ -20,6 +21,9 @@ class ExtractorFactory:
|
||||
"Mixdrop": MixdropExtractor,
|
||||
"Streamtape": StreamtapeExtractor,
|
||||
"Supervideo": SupervideoExtractor,
|
||||
"VixCloud": VixCloudExtractor,
|
||||
"Okru": OkruExtractor,
|
||||
"Maxstream": MaxstreamExtractor,
|
||||
"LiveTV": LiveTVExtractor,
|
||||
}
|
||||
|
||||
|
||||
71
mediaflow_proxy/extractors/maxstream.py
Normal file
71
mediaflow_proxy/extractors/maxstream.py
Normal file
@@ -0,0 +1,71 @@
|
||||
import re
|
||||
from typing import Dict, Any
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
from mediaflow_proxy.extractors.base import BaseExtractor, ExtractorError
|
||||
|
||||
|
||||
class MaxstreamExtractor(BaseExtractor):
|
||||
"""Maxstream URL extractor."""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.mediaflow_endpoint = "hls_manifest_proxy"
|
||||
|
||||
async def get_uprot(self, link: str):
|
||||
"""Extract MaxStream URL."""
|
||||
if "msf" in link:
|
||||
link = link.replace("msf", "mse")
|
||||
response = await self._make_request(link)
|
||||
soup = BeautifulSoup(response.text, "lxml")
|
||||
maxstream_url = soup.find("a")
|
||||
maxstream_url = maxstream_url.get("href")
|
||||
return maxstream_url
|
||||
|
||||
async def extract(self, url: str, **kwargs) -> Dict[str, Any]:
|
||||
"""Extract Maxstream URL."""
|
||||
maxstream_url = await self.get_uprot(url)
|
||||
response = await self._make_request(maxstream_url, headers={"accept-language": "en-US,en;q=0.5"})
|
||||
|
||||
# Extract and decode URL
|
||||
match = re.search(r"\}\('(.+)',.+,'(.+)'\.split", response.text)
|
||||
if not match:
|
||||
raise ExtractorError("Failed to extract URL components")
|
||||
|
||||
s1 = match.group(2)
|
||||
# Extract Terms
|
||||
terms = s1.split("|")
|
||||
urlset_index = terms.index("urlset")
|
||||
hls_index = terms.index("hls")
|
||||
sources_index = terms.index("sources")
|
||||
result = terms[urlset_index + 1 : hls_index]
|
||||
reversed_elements = result[::-1]
|
||||
first_part = terms[hls_index + 1 : sources_index]
|
||||
reversed_first_part = first_part[::-1]
|
||||
first_url_part = ""
|
||||
for first_part in reversed_first_part:
|
||||
if "0" in first_part:
|
||||
first_url_part += first_part
|
||||
else:
|
||||
first_url_part += first_part + "-"
|
||||
|
||||
base_url = f"https://{first_url_part}.host-cdn.net/hls/"
|
||||
if len(reversed_elements) == 1:
|
||||
final_url = base_url + "," + reversed_elements[0] + ".urlset/master.m3u8"
|
||||
lenght = len(reversed_elements)
|
||||
i = 1
|
||||
for element in reversed_elements:
|
||||
base_url += element + ","
|
||||
if lenght == i:
|
||||
base_url += ".urlset/master.m3u8"
|
||||
else:
|
||||
i += 1
|
||||
final_url = base_url
|
||||
|
||||
self.base_headers["referer"] = url
|
||||
return {
|
||||
"destination_url": final_url,
|
||||
"request_headers": self.base_headers,
|
||||
"mediaflow_endpoint": self.mediaflow_endpoint,
|
||||
}
|
||||
@@ -10,6 +10,8 @@ class MixdropExtractor(BaseExtractor):
|
||||
|
||||
async def extract(self, url: str, **kwargs) -> Dict[str, Any]:
|
||||
"""Extract Mixdrop URL."""
|
||||
if "club" in url:
|
||||
url = url.replace("club", "ps").split("/2")[0]
|
||||
response = await self._make_request(url, headers={"accept-language": "en-US,en;q=0.5"})
|
||||
|
||||
# Extract and decode URL
|
||||
|
||||
32
mediaflow_proxy/extractors/okru.py
Normal file
32
mediaflow_proxy/extractors/okru.py
Normal file
@@ -0,0 +1,32 @@
|
||||
import json
|
||||
from typing import Dict, Any
|
||||
|
||||
from bs4 import BeautifulSoup, SoupStrainer
|
||||
|
||||
from mediaflow_proxy.extractors.base import BaseExtractor
|
||||
|
||||
|
||||
class OkruExtractor(BaseExtractor):
|
||||
"""Okru URL extractor."""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.mediaflow_endpoint = "hls_manifest_proxy"
|
||||
|
||||
async def extract(self, url: str, **kwargs) -> Dict[str, Any]:
|
||||
"""Extract Okru URL."""
|
||||
response = await self._make_request(url)
|
||||
soup = BeautifulSoup(response.text, "lxml", parse_only=SoupStrainer("div"))
|
||||
if soup:
|
||||
div = soup.find("div", {"data-module": "OKVideo"})
|
||||
data_options = div.get("data-options")
|
||||
data = json.loads(data_options)
|
||||
metadata = json.loads(data["flashvars"]["metadata"])
|
||||
final_url = metadata["hlsMasterPlaylistUrl"]
|
||||
|
||||
self.base_headers["referer"] = url
|
||||
return {
|
||||
"destination_url": final_url,
|
||||
"request_headers": self.base_headers,
|
||||
"mediaflow_endpoint": self.mediaflow_endpoint,
|
||||
}
|
||||
@@ -1,7 +1,7 @@
|
||||
import re
|
||||
from typing import Dict, Any
|
||||
|
||||
from mediaflow_proxy.extractors.base import BaseExtractor, ExtractorError
|
||||
from mediaflow_proxy.extractors.base import BaseExtractor
|
||||
|
||||
|
||||
class SupervideoExtractor(BaseExtractor):
|
||||
|
||||
72
mediaflow_proxy/extractors/vixcloud.py
Normal file
72
mediaflow_proxy/extractors/vixcloud.py
Normal file
@@ -0,0 +1,72 @@
|
||||
import json
|
||||
import re
|
||||
from typing import Dict, Any
|
||||
from urllib.parse import urlparse, parse_qs
|
||||
|
||||
from bs4 import BeautifulSoup, SoupStrainer
|
||||
|
||||
from mediaflow_proxy.extractors.base import BaseExtractor, ExtractorError
|
||||
|
||||
|
||||
class VixCloudExtractor(BaseExtractor):
|
||||
"""VixCloud URL extractor."""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.mediaflow_endpoint = "hls_manifest_proxy"
|
||||
|
||||
async def version(self, domain: str) -> str:
|
||||
"""Get version of VixCloud Parent Site."""
|
||||
base_url = f"https://streamingcommunity.{domain}/richiedi-un-titolo"
|
||||
response = await self._make_request(
|
||||
base_url,
|
||||
headers={
|
||||
"Referer": f"https://streamingcommunity.{domain}/",
|
||||
"Origin": f"https://streamingcommunity.{domain}",
|
||||
},
|
||||
)
|
||||
if response.status_code != 200:
|
||||
raise ExtractorError("Outdated Domain")
|
||||
# Soup the response
|
||||
soup = BeautifulSoup(response.text, "lxml", parse_only=SoupStrainer("div", {"id": "app"}))
|
||||
if soup:
|
||||
# Extract version
|
||||
try:
|
||||
data = json.loads(soup.find("div", {"id": "app"}).get("data-page"))
|
||||
return data["version"]
|
||||
except (KeyError, json.JSONDecodeError, AttributeError) as e:
|
||||
raise ExtractorError(f"Failed to parse version: {e}")
|
||||
|
||||
async def extract(self, url: str, **kwargs) -> Dict[str, Any]:
|
||||
"""Extract Vixcloud URL."""
|
||||
domain = url.split("://")[1].split("/")[0].split(".")[1]
|
||||
version = await self.version(domain)
|
||||
response = await self._make_request(url, headers={"x-inertia": "true", "x-inertia-version": version})
|
||||
soup = BeautifulSoup(response.text, "lxml", parse_only=SoupStrainer("iframe"))
|
||||
iframe = soup.find("iframe").get("src")
|
||||
parsed_url = urlparse(iframe)
|
||||
query_params = parse_qs(parsed_url.query)
|
||||
response = await self._make_request(iframe, headers={"x-inertia": "true", "x-inertia-version": version})
|
||||
|
||||
if response.status_code != 200:
|
||||
raise ExtractorError("Failed to extract URL components, Invalid Request")
|
||||
soup = BeautifulSoup(response.text, "lxml", parse_only=SoupStrainer("body"))
|
||||
if soup:
|
||||
script = soup.find("body").find("script").text
|
||||
token = re.search(r"'token':\s*'(\w+)'", script).group(1)
|
||||
expires = re.search(r"'expires':\s*'(\d+)'", script).group(1)
|
||||
vixid = iframe.split("/embed/")[1].split("?")[0]
|
||||
base_url = iframe.split("://")[1].split("/")[0]
|
||||
final_url = f"https://{base_url}/playlist/{vixid}.m3u8?token={token}&expires={expires}"
|
||||
if "canPlayFHD" in query_params:
|
||||
# canPlayFHD = "h=1"
|
||||
final_url += "&h=1"
|
||||
if "b" in query_params:
|
||||
# b = "b=1"
|
||||
final_url += "&b=1"
|
||||
self.base_headers["referer"] = url
|
||||
return {
|
||||
"destination_url": final_url,
|
||||
"request_headers": self.base_headers,
|
||||
"mediaflow_endpoint": self.mediaflow_endpoint,
|
||||
}
|
||||
@@ -63,7 +63,7 @@ class MPDSegmentParams(GenericParams):
|
||||
|
||||
|
||||
class ExtractorURLParams(GenericParams):
|
||||
host: Literal["Doodstream", "Mixdrop", "Uqload", "Streamtape", "Supervideo", "LiveTV"] = Field(
|
||||
host: Literal["Doodstream", "Mixdrop", "Uqload", "Streamtape", "Supervideo", "VixCloud", "Okru", "Maxstream", "LiveTV"] = Field(
|
||||
..., description="The host to extract the URL from."
|
||||
)
|
||||
destination: str = Field(..., description="The URL of the stream.", alias="d")
|
||||
|
||||
Reference in New Issue
Block a user