From fb60e99822785c0c270855458175d8178b7b242c Mon Sep 17 00:00:00 2001 From: Urlo30 <91965455+UrloMythus@users.noreply.github.com> Date: Sun, 29 Dec 2024 23:18:53 +0100 Subject: [PATCH] Add files via upload --- mediaflow_proxy/__init__.py | 0 mediaflow_proxy/configs.py | 68 ++ mediaflow_proxy/const.py | 17 + mediaflow_proxy/drm/__init__.py | 11 + mediaflow_proxy/drm/decrypter.py | 778 ++++++++++++++++++ mediaflow_proxy/extractors/__init__.py | 0 mediaflow_proxy/extractors/base.py | 50 ++ mediaflow_proxy/extractors/doodstream.py | 39 + mediaflow_proxy/extractors/factory.py | 32 + mediaflow_proxy/extractors/livetv.py | 251 ++++++ mediaflow_proxy/extractors/mixdrop.py | 36 + mediaflow_proxy/extractors/streamtape.py | 32 + mediaflow_proxy/extractors/supervideo.py | 27 + mediaflow_proxy/extractors/uqload.py | 24 + mediaflow_proxy/handlers.py | 358 ++++++++ mediaflow_proxy/main.py | 99 +++ mediaflow_proxy/mpd_processor.py | 214 +++++ mediaflow_proxy/routes.py | 164 ++++ mediaflow_proxy/routes/__init__.py | 5 + mediaflow_proxy/routes/extractor.py | 61 ++ mediaflow_proxy/routes/proxy.py | 138 ++++ mediaflow_proxy/routes/speedtest.py | 43 + mediaflow_proxy/schemas.py | 74 ++ mediaflow_proxy/speedtest/__init__.py | 0 mediaflow_proxy/speedtest/models.py | 46 ++ .../speedtest/providers/all_debrid.py | 50 ++ mediaflow_proxy/speedtest/providers/base.py | 24 + .../speedtest/providers/real_debrid.py | 32 + mediaflow_proxy/speedtest/service.py | 129 +++ mediaflow_proxy/static/index.html | 76 ++ mediaflow_proxy/static/logo.png | Bin 0 -> 87421 bytes mediaflow_proxy/static/speedtest.html | 697 ++++++++++++++++ mediaflow_proxy/utils/__init__.py | 0 mediaflow_proxy/utils/cache_utils.py | 376 +++++++++ mediaflow_proxy/utils/crypto_utils.py | 110 +++ mediaflow_proxy/utils/http_utils.py | 430 ++++++++++ mediaflow_proxy/utils/m3u8_processor.py | 87 ++ mediaflow_proxy/utils/mpd_utils.py | 555 +++++++++++++ requirements.txt | 19 + run.py | 18 + 40 files changed, 5170 insertions(+) create mode 100644 mediaflow_proxy/__init__.py create mode 100644 mediaflow_proxy/configs.py create mode 100644 mediaflow_proxy/const.py create mode 100644 mediaflow_proxy/drm/__init__.py create mode 100644 mediaflow_proxy/drm/decrypter.py create mode 100644 mediaflow_proxy/extractors/__init__.py create mode 100644 mediaflow_proxy/extractors/base.py create mode 100644 mediaflow_proxy/extractors/doodstream.py create mode 100644 mediaflow_proxy/extractors/factory.py create mode 100644 mediaflow_proxy/extractors/livetv.py create mode 100644 mediaflow_proxy/extractors/mixdrop.py create mode 100644 mediaflow_proxy/extractors/streamtape.py create mode 100644 mediaflow_proxy/extractors/supervideo.py create mode 100644 mediaflow_proxy/extractors/uqload.py create mode 100644 mediaflow_proxy/handlers.py create mode 100644 mediaflow_proxy/main.py create mode 100644 mediaflow_proxy/mpd_processor.py create mode 100644 mediaflow_proxy/routes.py create mode 100644 mediaflow_proxy/routes/__init__.py create mode 100644 mediaflow_proxy/routes/extractor.py create mode 100644 mediaflow_proxy/routes/proxy.py create mode 100644 mediaflow_proxy/routes/speedtest.py create mode 100644 mediaflow_proxy/schemas.py create mode 100644 mediaflow_proxy/speedtest/__init__.py create mode 100644 mediaflow_proxy/speedtest/models.py create mode 100644 mediaflow_proxy/speedtest/providers/all_debrid.py create mode 100644 mediaflow_proxy/speedtest/providers/base.py create mode 100644 mediaflow_proxy/speedtest/providers/real_debrid.py create mode 100644 mediaflow_proxy/speedtest/service.py create mode 100644 mediaflow_proxy/static/index.html create mode 100644 mediaflow_proxy/static/logo.png create mode 100644 mediaflow_proxy/static/speedtest.html create mode 100644 mediaflow_proxy/utils/__init__.py create mode 100644 mediaflow_proxy/utils/cache_utils.py create mode 100644 mediaflow_proxy/utils/crypto_utils.py create mode 100644 mediaflow_proxy/utils/http_utils.py create mode 100644 mediaflow_proxy/utils/m3u8_processor.py create mode 100644 mediaflow_proxy/utils/mpd_utils.py create mode 100644 requirements.txt create mode 100644 run.py diff --git a/mediaflow_proxy/__init__.py b/mediaflow_proxy/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/mediaflow_proxy/configs.py b/mediaflow_proxy/configs.py new file mode 100644 index 0000000..7d24dba --- /dev/null +++ b/mediaflow_proxy/configs.py @@ -0,0 +1,68 @@ +from typing import Dict, Optional, Union + +import httpx +from pydantic import BaseModel, Field +from pydantic_settings import BaseSettings + + +class RouteConfig(BaseModel): + """Configuration for a specific route""" + + proxy: bool = True + proxy_url: Optional[str] = None + verify_ssl: bool = True + + +class TransportConfig(BaseSettings): + """Main proxy configuration""" + + proxy_url: Optional[str] = Field( + None, description="Primary proxy URL. Example: socks5://user:pass@proxy:1080 or http://proxy:8080" + ) + all_proxy: bool = Field(False, description="Enable proxy for all routes by default") + transport_routes: Dict[str, RouteConfig] = Field( + default_factory=dict, description="Pattern-based route configuration" + ) + + def get_mounts( + self, async_http: bool = True + ) -> Dict[str, Optional[Union[httpx.HTTPTransport, httpx.AsyncHTTPTransport]]]: + """ + Get a dictionary of httpx mount points to transport instances. + """ + mounts = {} + transport_cls = httpx.AsyncHTTPTransport if async_http else httpx.HTTPTransport + + # Configure specific routes + for pattern, route in self.transport_routes.items(): + mounts[pattern] = transport_cls( + verify=route.verify_ssl, proxy=route.proxy_url or self.proxy_url if route.proxy else None + ) + + # Set default proxy for all routes if enabled + if self.all_proxy: + mounts["all://"] = transport_cls(proxy=self.proxy_url) + + return mounts + + class Config: + env_file = ".env" + extra = "ignore" + + +class Settings(BaseSettings): + api_password: str | None = None # The password for protecting the API endpoints. + log_level: str = "INFO" # The logging level to use. + transport_config: TransportConfig = Field(default_factory=TransportConfig) # Configuration for httpx transport. + enable_streaming_progress: bool = False # Whether to enable streaming progress tracking. + + user_agent: str = ( + "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3" # The user agent to use for HTTP requests. + ) + + class Config: + env_file = ".env" + extra = "ignore" + + +settings = Settings() diff --git a/mediaflow_proxy/const.py b/mediaflow_proxy/const.py new file mode 100644 index 0000000..49d2464 --- /dev/null +++ b/mediaflow_proxy/const.py @@ -0,0 +1,17 @@ +SUPPORTED_RESPONSE_HEADERS = [ + "accept-ranges", + "content-type", + "content-length", + "content-range", + "connection", + "transfer-encoding", + "last-modified", + "etag", + "cache-control", + "expires", +] + +SUPPORTED_REQUEST_HEADERS = [ + "range", + "if-range", +] diff --git a/mediaflow_proxy/drm/__init__.py b/mediaflow_proxy/drm/__init__.py new file mode 100644 index 0000000..b92db72 --- /dev/null +++ b/mediaflow_proxy/drm/__init__.py @@ -0,0 +1,11 @@ +import os +import tempfile + + +async def create_temp_file(suffix: str, content: bytes = None, prefix: str = None) -> tempfile.NamedTemporaryFile: + temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=suffix, prefix=prefix) + temp_file.delete_file = lambda: os.unlink(temp_file.name) + if content: + temp_file.write(content) + temp_file.close() + return temp_file diff --git a/mediaflow_proxy/drm/decrypter.py b/mediaflow_proxy/drm/decrypter.py new file mode 100644 index 0000000..a6514a9 --- /dev/null +++ b/mediaflow_proxy/drm/decrypter.py @@ -0,0 +1,778 @@ +import argparse +import struct +import sys +from typing import Optional, Union + +from Crypto.Cipher import AES +from collections import namedtuple +import array + +CENCSampleAuxiliaryDataFormat = namedtuple("CENCSampleAuxiliaryDataFormat", ["is_encrypted", "iv", "sub_samples"]) + + +class MP4Atom: + """ + Represents an MP4 atom, which is a basic unit of data in an MP4 file. + Each atom contains a header (size and type) and data. + """ + + __slots__ = ("atom_type", "size", "data") + + def __init__(self, atom_type: bytes, size: int, data: Union[memoryview, bytearray]): + """ + Initializes an MP4Atom instance. + + Args: + atom_type (bytes): The type of the atom. + size (int): The size of the atom. + data (Union[memoryview, bytearray]): The data contained in the atom. + """ + self.atom_type = atom_type + self.size = size + self.data = data + + def __repr__(self): + return f"" + + def pack(self): + """ + Packs the atom into binary data. + + Returns: + bytes: Packed binary data with size, type, and data. + """ + return struct.pack(">I", self.size) + self.atom_type + self.data + + +class MP4Parser: + """ + Parses MP4 data to extract atoms and their structure. + """ + + def __init__(self, data: memoryview): + """ + Initializes an MP4Parser instance. + + Args: + data (memoryview): The binary data of the MP4 file. + """ + self.data = data + self.position = 0 + + def read_atom(self) -> Optional[MP4Atom]: + """ + Reads the next atom from the data. + + Returns: + Optional[MP4Atom]: MP4Atom object or None if no more atoms are available. + """ + pos = self.position + if pos + 8 > len(self.data): + return None + + size, atom_type = struct.unpack_from(">I4s", self.data, pos) + pos += 8 + + if size == 1: + if pos + 8 > len(self.data): + return None + size = struct.unpack_from(">Q", self.data, pos)[0] + pos += 8 + + if size < 8 or pos + size - 8 > len(self.data): + return None + + atom_data = self.data[pos : pos + size - 8] + self.position = pos + size - 8 + return MP4Atom(atom_type, size, atom_data) + + def list_atoms(self) -> list[MP4Atom]: + """ + Lists all atoms in the data. + + Returns: + list[MP4Atom]: List of MP4Atom objects. + """ + atoms = [] + original_position = self.position + self.position = 0 + while self.position + 8 <= len(self.data): + atom = self.read_atom() + if not atom: + break + atoms.append(atom) + self.position = original_position + return atoms + + def _read_atom_at(self, pos: int, end: int) -> Optional[MP4Atom]: + if pos + 8 > end: + return None + + size, atom_type = struct.unpack_from(">I4s", self.data, pos) + pos += 8 + + if size == 1: + if pos + 8 > end: + return None + size = struct.unpack_from(">Q", self.data, pos)[0] + pos += 8 + + if size < 8 or pos + size - 8 > end: + return None + + atom_data = self.data[pos : pos + size - 8] + return MP4Atom(atom_type, size, atom_data) + + def print_atoms_structure(self, indent: int = 0): + """ + Prints the structure of all atoms in the data. + + Args: + indent (int): The indentation level for printing. + """ + pos = 0 + end = len(self.data) + while pos + 8 <= end: + atom = self._read_atom_at(pos, end) + if not atom: + break + self.print_single_atom_structure(atom, pos, indent) + pos += atom.size + + def print_single_atom_structure(self, atom: MP4Atom, parent_position: int, indent: int): + """ + Prints the structure of a single atom. + + Args: + atom (MP4Atom): The atom to print. + parent_position (int): The position of the parent atom. + indent (int): The indentation level for printing. + """ + try: + atom_type = atom.atom_type.decode("utf-8") + except UnicodeDecodeError: + atom_type = repr(atom.atom_type) + print(" " * indent + f"Type: {atom_type}, Size: {atom.size}") + + child_pos = 0 + child_end = len(atom.data) + while child_pos + 8 <= child_end: + child_atom = self._read_atom_at(parent_position + 8 + child_pos, parent_position + 8 + child_end) + if not child_atom: + break + self.print_single_atom_structure(child_atom, parent_position, indent + 2) + child_pos += child_atom.size + + +class MP4Decrypter: + """ + Class to handle the decryption of CENC encrypted MP4 segments. + + Attributes: + key_map (dict[bytes, bytes]): Mapping of track IDs to decryption keys. + current_key (Optional[bytes]): Current decryption key. + trun_sample_sizes (array.array): Array of sample sizes from the 'trun' box. + current_sample_info (list): List of sample information from the 'senc' box. + encryption_overhead (int): Total size of encryption-related boxes. + """ + + def __init__(self, key_map: dict[bytes, bytes]): + """ + Initializes the MP4Decrypter with a key map. + + Args: + key_map (dict[bytes, bytes]): Mapping of track IDs to decryption keys. + """ + self.key_map = key_map + self.current_key = None + self.trun_sample_sizes = array.array("I") + self.current_sample_info = [] + self.encryption_overhead = 0 + + def decrypt_segment(self, combined_segment: bytes) -> bytes: + """ + Decrypts a combined MP4 segment. + + Args: + combined_segment (bytes): Combined initialization and media segment. + + Returns: + bytes: Decrypted segment content. + """ + data = memoryview(combined_segment) + parser = MP4Parser(data) + atoms = parser.list_atoms() + + atom_process_order = [b"moov", b"moof", b"sidx", b"mdat"] + + processed_atoms = {} + for atom_type in atom_process_order: + if atom := next((a for a in atoms if a.atom_type == atom_type), None): + processed_atoms[atom_type] = self._process_atom(atom_type, atom) + + result = bytearray() + for atom in atoms: + if atom.atom_type in processed_atoms: + processed_atom = processed_atoms[atom.atom_type] + result.extend(processed_atom.pack()) + else: + result.extend(atom.pack()) + + return bytes(result) + + def _process_atom(self, atom_type: bytes, atom: MP4Atom) -> MP4Atom: + """ + Processes an MP4 atom based on its type. + + Args: + atom_type (bytes): Type of the atom. + atom (MP4Atom): The atom to process. + + Returns: + MP4Atom: Processed atom. + """ + if atom_type == b"moov": + return self._process_moov(atom) + elif atom_type == b"moof": + return self._process_moof(atom) + elif atom_type == b"sidx": + return self._process_sidx(atom) + elif atom_type == b"mdat": + return self._decrypt_mdat(atom) + else: + return atom + + def _process_moov(self, moov: MP4Atom) -> MP4Atom: + """ + Processes the 'moov' (Movie) atom, which contains metadata about the entire presentation. + This includes information about tracks, media data, and other movie-level metadata. + + Args: + moov (MP4Atom): The 'moov' atom to process. + + Returns: + MP4Atom: Processed 'moov' atom with updated track information. + """ + parser = MP4Parser(moov.data) + new_moov_data = bytearray() + + for atom in iter(parser.read_atom, None): + if atom.atom_type == b"trak": + new_trak = self._process_trak(atom) + new_moov_data.extend(new_trak.pack()) + elif atom.atom_type != b"pssh": + # Skip PSSH boxes as they are not needed in the decrypted output + new_moov_data.extend(atom.pack()) + + return MP4Atom(b"moov", len(new_moov_data) + 8, new_moov_data) + + def _process_moof(self, moof: MP4Atom) -> MP4Atom: + """ + Processes the 'moov' (Movie) atom, which contains metadata about the entire presentation. + This includes information about tracks, media data, and other movie-level metadata. + + Args: + moov (MP4Atom): The 'moov' atom to process. + + Returns: + MP4Atom: Processed 'moov' atom with updated track information. + """ + parser = MP4Parser(moof.data) + new_moof_data = bytearray() + + for atom in iter(parser.read_atom, None): + if atom.atom_type == b"traf": + new_traf = self._process_traf(atom) + new_moof_data.extend(new_traf.pack()) + else: + new_moof_data.extend(atom.pack()) + + return MP4Atom(b"moof", len(new_moof_data) + 8, new_moof_data) + + def _process_traf(self, traf: MP4Atom) -> MP4Atom: + """ + Processes the 'traf' (Track Fragment) atom, which contains information about a track fragment. + This includes sample information, sample encryption data, and other track-level metadata. + + Args: + traf (MP4Atom): The 'traf' atom to process. + + Returns: + MP4Atom: Processed 'traf' atom with updated sample information. + """ + parser = MP4Parser(traf.data) + new_traf_data = bytearray() + tfhd = None + sample_count = 0 + sample_info = [] + + atoms = parser.list_atoms() + + # calculate encryption_overhead earlier to avoid dependency on trun + self.encryption_overhead = sum(a.size for a in atoms if a.atom_type in {b"senc", b"saiz", b"saio"}) + + for atom in atoms: + if atom.atom_type == b"tfhd": + tfhd = atom + new_traf_data.extend(atom.pack()) + elif atom.atom_type == b"trun": + sample_count = self._process_trun(atom) + new_trun = self._modify_trun(atom) + new_traf_data.extend(new_trun.pack()) + elif atom.atom_type == b"senc": + # Parse senc but don't include it in the new decrypted traf data and similarly don't include saiz and saio + sample_info = self._parse_senc(atom, sample_count) + elif atom.atom_type not in {b"saiz", b"saio"}: + new_traf_data.extend(atom.pack()) + + if tfhd: + tfhd_track_id = struct.unpack_from(">I", tfhd.data, 4)[0] + self.current_key = self._get_key_for_track(tfhd_track_id) + self.current_sample_info = sample_info + + return MP4Atom(b"traf", len(new_traf_data) + 8, new_traf_data) + + def _decrypt_mdat(self, mdat: MP4Atom) -> MP4Atom: + """ + Decrypts the 'mdat' (Media Data) atom, which contains the actual media data (audio, video, etc.). + The decryption is performed using the current decryption key and sample information. + + Args: + mdat (MP4Atom): The 'mdat' atom to decrypt. + + Returns: + MP4Atom: Decrypted 'mdat' atom with decrypted media data. + """ + if not self.current_key or not self.current_sample_info: + return mdat # Return original mdat if we don't have decryption info + + decrypted_samples = bytearray() + mdat_data = mdat.data + position = 0 + + for i, info in enumerate(self.current_sample_info): + if position >= len(mdat_data): + break # No more data to process + + sample_size = self.trun_sample_sizes[i] if i < len(self.trun_sample_sizes) else len(mdat_data) - position + sample = mdat_data[position : position + sample_size] + position += sample_size + decrypted_sample = self._process_sample(sample, info, self.current_key) + decrypted_samples.extend(decrypted_sample) + + return MP4Atom(b"mdat", len(decrypted_samples) + 8, decrypted_samples) + + def _parse_senc(self, senc: MP4Atom, sample_count: int) -> list[CENCSampleAuxiliaryDataFormat]: + """ + Parses the 'senc' (Sample Encryption) atom, which contains encryption information for samples. + This includes initialization vectors (IVs) and sub-sample encryption data. + + Args: + senc (MP4Atom): The 'senc' atom to parse. + sample_count (int): The number of samples. + + Returns: + list[CENCSampleAuxiliaryDataFormat]: List of sample auxiliary data formats with encryption information. + """ + data = memoryview(senc.data) + version_flags = struct.unpack_from(">I", data, 0)[0] + version, flags = version_flags >> 24, version_flags & 0xFFFFFF + position = 4 + + if version == 0: + sample_count = struct.unpack_from(">I", data, position)[0] + position += 4 + + sample_info = [] + for _ in range(sample_count): + if position + 8 > len(data): + break + + iv = data[position : position + 8].tobytes() + position += 8 + + sub_samples = [] + if flags & 0x000002 and position + 2 <= len(data): # Check if subsample information is present + subsample_count = struct.unpack_from(">H", data, position)[0] + position += 2 + + for _ in range(subsample_count): + if position + 6 <= len(data): + clear_bytes, encrypted_bytes = struct.unpack_from(">HI", data, position) + position += 6 + sub_samples.append((clear_bytes, encrypted_bytes)) + else: + break + + sample_info.append(CENCSampleAuxiliaryDataFormat(True, iv, sub_samples)) + + return sample_info + + def _get_key_for_track(self, track_id: int) -> bytes: + """ + Retrieves the decryption key for a given track ID from the key map. + + Args: + track_id (int): The track ID. + + Returns: + bytes: The decryption key for the specified track ID. + """ + if len(self.key_map) == 1: + return next(iter(self.key_map.values())) + key = self.key_map.get(track_id.pack(4, "big")) + if not key: + raise ValueError(f"No key found for track ID {track_id}") + return key + + @staticmethod + def _process_sample( + sample: memoryview, sample_info: CENCSampleAuxiliaryDataFormat, key: bytes + ) -> Union[memoryview, bytearray, bytes]: + """ + Processes and decrypts a sample using the provided sample information and decryption key. + This includes handling sub-sample encryption if present. + + Args: + sample (memoryview): The sample data. + sample_info (CENCSampleAuxiliaryDataFormat): The sample auxiliary data format with encryption information. + key (bytes): The decryption key. + + Returns: + Union[memoryview, bytearray, bytes]: The decrypted sample. + """ + if not sample_info.is_encrypted: + return sample + + # pad IV to 16 bytes + iv = sample_info.iv + b"\x00" * (16 - len(sample_info.iv)) + cipher = AES.new(key, AES.MODE_CTR, initial_value=iv, nonce=b"") + + if not sample_info.sub_samples: + # If there are no sub_samples, decrypt the entire sample + return cipher.decrypt(sample) + + result = bytearray() + offset = 0 + for clear_bytes, encrypted_bytes in sample_info.sub_samples: + result.extend(sample[offset : offset + clear_bytes]) + offset += clear_bytes + result.extend(cipher.decrypt(sample[offset : offset + encrypted_bytes])) + offset += encrypted_bytes + + # If there's any remaining data, treat it as encrypted + if offset < len(sample): + result.extend(cipher.decrypt(sample[offset:])) + + return result + + def _process_trun(self, trun: MP4Atom) -> int: + """ + Processes the 'trun' (Track Fragment Run) atom, which contains information about the samples in a track fragment. + This includes sample sizes, durations, flags, and composition time offsets. + + Args: + trun (MP4Atom): The 'trun' atom to process. + + Returns: + int: The number of samples in the 'trun' atom. + """ + trun_flags, sample_count = struct.unpack_from(">II", trun.data, 0) + data_offset = 8 + + if trun_flags & 0x000001: + data_offset += 4 + if trun_flags & 0x000004: + data_offset += 4 + + self.trun_sample_sizes = array.array("I") + + for _ in range(sample_count): + if trun_flags & 0x000100: # sample-duration-present flag + data_offset += 4 + if trun_flags & 0x000200: # sample-size-present flag + sample_size = struct.unpack_from(">I", trun.data, data_offset)[0] + self.trun_sample_sizes.append(sample_size) + data_offset += 4 + else: + self.trun_sample_sizes.append(0) # Using 0 instead of None for uniformity in the array + if trun_flags & 0x000400: # sample-flags-present flag + data_offset += 4 + if trun_flags & 0x000800: # sample-composition-time-offsets-present flag + data_offset += 4 + + return sample_count + + def _modify_trun(self, trun: MP4Atom) -> MP4Atom: + """ + Modifies the 'trun' (Track Fragment Run) atom to update the data offset. + This is necessary to account for the encryption overhead. + + Args: + trun (MP4Atom): The 'trun' atom to modify. + + Returns: + MP4Atom: Modified 'trun' atom with updated data offset. + """ + trun_data = bytearray(trun.data) + current_flags = struct.unpack_from(">I", trun_data, 0)[0] & 0xFFFFFF + + # If the data-offset-present flag is set, update the data offset to account for encryption overhead + if current_flags & 0x000001: + current_data_offset = struct.unpack_from(">i", trun_data, 8)[0] + struct.pack_into(">i", trun_data, 8, current_data_offset - self.encryption_overhead) + + return MP4Atom(b"trun", len(trun_data) + 8, trun_data) + + def _process_sidx(self, sidx: MP4Atom) -> MP4Atom: + """ + Processes the 'sidx' (Segment Index) atom, which contains indexing information for media segments. + This includes references to media segments and their durations. + + Args: + sidx (MP4Atom): The 'sidx' atom to process. + + Returns: + MP4Atom: Processed 'sidx' atom with updated segment references. + """ + sidx_data = bytearray(sidx.data) + + current_size = struct.unpack_from(">I", sidx_data, 32)[0] + reference_type = current_size >> 31 + current_referenced_size = current_size & 0x7FFFFFFF + + # Remove encryption overhead from referenced size + new_referenced_size = current_referenced_size - self.encryption_overhead + new_size = (reference_type << 31) | new_referenced_size + struct.pack_into(">I", sidx_data, 32, new_size) + + return MP4Atom(b"sidx", len(sidx_data) + 8, sidx_data) + + def _process_trak(self, trak: MP4Atom) -> MP4Atom: + """ + Processes the 'trak' (Track) atom, which contains information about a single track in the movie. + This includes track header, media information, and other track-level metadata. + + Args: + trak (MP4Atom): The 'trak' atom to process. + + Returns: + MP4Atom: Processed 'trak' atom with updated track information. + """ + parser = MP4Parser(trak.data) + new_trak_data = bytearray() + + for atom in iter(parser.read_atom, None): + if atom.atom_type == b"mdia": + new_mdia = self._process_mdia(atom) + new_trak_data.extend(new_mdia.pack()) + else: + new_trak_data.extend(atom.pack()) + + return MP4Atom(b"trak", len(new_trak_data) + 8, new_trak_data) + + def _process_mdia(self, mdia: MP4Atom) -> MP4Atom: + """ + Processes the 'mdia' (Media) atom, which contains media information for a track. + This includes media header, handler reference, and media information container. + + Args: + mdia (MP4Atom): The 'mdia' atom to process. + + Returns: + MP4Atom: Processed 'mdia' atom with updated media information. + """ + parser = MP4Parser(mdia.data) + new_mdia_data = bytearray() + + for atom in iter(parser.read_atom, None): + if atom.atom_type == b"minf": + new_minf = self._process_minf(atom) + new_mdia_data.extend(new_minf.pack()) + else: + new_mdia_data.extend(atom.pack()) + + return MP4Atom(b"mdia", len(new_mdia_data) + 8, new_mdia_data) + + def _process_minf(self, minf: MP4Atom) -> MP4Atom: + """ + Processes the 'minf' (Media Information) atom, which contains information about the media data in a track. + This includes data information, sample table, and other media-level metadata. + + Args: + minf (MP4Atom): The 'minf' atom to process. + + Returns: + MP4Atom: Processed 'minf' atom with updated media information. + """ + parser = MP4Parser(minf.data) + new_minf_data = bytearray() + + for atom in iter(parser.read_atom, None): + if atom.atom_type == b"stbl": + new_stbl = self._process_stbl(atom) + new_minf_data.extend(new_stbl.pack()) + else: + new_minf_data.extend(atom.pack()) + + return MP4Atom(b"minf", len(new_minf_data) + 8, new_minf_data) + + def _process_stbl(self, stbl: MP4Atom) -> MP4Atom: + """ + Processes the 'stbl' (Sample Table) atom, which contains information about the samples in a track. + This includes sample descriptions, sample sizes, sample times, and other sample-level metadata. + + Args: + stbl (MP4Atom): The 'stbl' atom to process. + + Returns: + MP4Atom: Processed 'stbl' atom with updated sample information. + """ + parser = MP4Parser(stbl.data) + new_stbl_data = bytearray() + + for atom in iter(parser.read_atom, None): + if atom.atom_type == b"stsd": + new_stsd = self._process_stsd(atom) + new_stbl_data.extend(new_stsd.pack()) + else: + new_stbl_data.extend(atom.pack()) + + return MP4Atom(b"stbl", len(new_stbl_data) + 8, new_stbl_data) + + def _process_stsd(self, stsd: MP4Atom) -> MP4Atom: + """ + Processes the 'stsd' (Sample Description) atom, which contains descriptions of the sample entries in a track. + This includes codec information, sample entry details, and other sample description metadata. + + Args: + stsd (MP4Atom): The 'stsd' atom to process. + + Returns: + MP4Atom: Processed 'stsd' atom with updated sample descriptions. + """ + parser = MP4Parser(stsd.data) + entry_count = struct.unpack_from(">I", parser.data, 4)[0] + new_stsd_data = bytearray(stsd.data[:8]) + + parser.position = 8 # Move past version_flags and entry_count + + for _ in range(entry_count): + sample_entry = parser.read_atom() + if not sample_entry: + break + + processed_entry = self._process_sample_entry(sample_entry) + new_stsd_data.extend(processed_entry.pack()) + + return MP4Atom(b"stsd", len(new_stsd_data) + 8, new_stsd_data) + + def _process_sample_entry(self, entry: MP4Atom) -> MP4Atom: + """ + Processes a sample entry atom, which contains information about a specific type of sample. + This includes codec-specific information and other sample entry details. + + Args: + entry (MP4Atom): The sample entry atom to process. + + Returns: + MP4Atom: Processed sample entry atom with updated information. + """ + # Determine the size of fixed fields based on sample entry type + if entry.atom_type in {b"mp4a", b"enca"}: + fixed_size = 28 # 8 bytes for size, type and reserved, 20 bytes for fixed fields in Audio Sample Entry. + elif entry.atom_type in {b"mp4v", b"encv", b"avc1", b"hev1", b"hvc1"}: + fixed_size = 78 # 8 bytes for size, type and reserved, 70 bytes for fixed fields in Video Sample Entry. + else: + fixed_size = 16 # 8 bytes for size, type and reserved, 8 bytes for fixed fields in other Sample Entries. + + new_entry_data = bytearray(entry.data[:fixed_size]) + parser = MP4Parser(entry.data[fixed_size:]) + codec_format = None + + for atom in iter(parser.read_atom, None): + if atom.atom_type in {b"sinf", b"schi", b"tenc", b"schm"}: + if atom.atom_type == b"sinf": + codec_format = self._extract_codec_format(atom) + continue # Skip encryption-related atoms + new_entry_data.extend(atom.pack()) + + # Replace the atom type with the extracted codec format + new_type = codec_format if codec_format else entry.atom_type + return MP4Atom(new_type, len(new_entry_data) + 8, new_entry_data) + + def _extract_codec_format(self, sinf: MP4Atom) -> Optional[bytes]: + """ + Extracts the codec format from the 'sinf' (Protection Scheme Information) atom. + This includes information about the original format of the protected content. + + Args: + sinf (MP4Atom): The 'sinf' atom to extract from. + + Returns: + Optional[bytes]: The codec format or None if not found. + """ + parser = MP4Parser(sinf.data) + for atom in iter(parser.read_atom, None): + if atom.atom_type == b"frma": + return atom.data + return None + + +def decrypt_segment(init_segment: bytes, segment_content: bytes, key_id: str, key: str) -> bytes: + """ + Decrypts a CENC encrypted MP4 segment. + + Args: + init_segment (bytes): Initialization segment data. + segment_content (bytes): Encrypted segment content. + key_id (str): Key ID in hexadecimal format. + key (str): Key in hexadecimal format. + """ + key_map = {bytes.fromhex(key_id): bytes.fromhex(key)} + decrypter = MP4Decrypter(key_map) + decrypted_content = decrypter.decrypt_segment(init_segment + segment_content) + return decrypted_content + + +def cli(): + """ + Command line interface for decrypting a CENC encrypted MP4 segment. + """ + init_segment = b"" + + if args.init and args.segment: + with open(args.init, "rb") as f: + init_segment = f.read() + with open(args.segment, "rb") as f: + segment_content = f.read() + elif args.combined_segment: + with open(args.combined_segment, "rb") as f: + segment_content = f.read() + else: + print("Usage: python mp4decrypt.py --help") + sys.exit(1) + + try: + decrypted_segment = decrypt_segment(init_segment, segment_content, args.key_id, args.key) + print(f"Decrypted content size is {len(decrypted_segment)} bytes") + with open(args.output, "wb") as f: + f.write(decrypted_segment) + print(f"Decrypted segment written to {args.output}") + except Exception as e: + print(f"Error: {e}") + sys.exit(1) + + +if __name__ == "__main__": + arg_parser = argparse.ArgumentParser(description="Decrypts a MP4 init and media segment using CENC encryption.") + arg_parser.add_argument("--init", help="Path to the init segment file", required=False) + arg_parser.add_argument("--segment", help="Path to the media segment file", required=False) + arg_parser.add_argument( + "--combined_segment", help="Path to the combined init and media segment file", required=False + ) + arg_parser.add_argument("--key_id", help="Key ID in hexadecimal format", required=True) + arg_parser.add_argument("--key", help="Key in hexadecimal format", required=True) + arg_parser.add_argument("--output", help="Path to the output file", required=True) + args = arg_parser.parse_args() + cli() diff --git a/mediaflow_proxy/extractors/__init__.py b/mediaflow_proxy/extractors/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/mediaflow_proxy/extractors/base.py b/mediaflow_proxy/extractors/base.py new file mode 100644 index 0000000..bf8a15b --- /dev/null +++ b/mediaflow_proxy/extractors/base.py @@ -0,0 +1,50 @@ +from abc import ABC, abstractmethod +from typing import Dict, Optional, Any + +import httpx + +from mediaflow_proxy.configs import settings +from mediaflow_proxy.utils.http_utils import create_httpx_client + + +class ExtractorError(Exception): + """Base exception for all extractors.""" + + pass + + +class BaseExtractor(ABC): + """Base class for all URL extractors.""" + + def __init__(self, request_headers: dict): + self.base_headers = { + "user-agent": settings.user_agent, + } + self.mediaflow_endpoint = "proxy_stream_endpoint" + self.base_headers.update(request_headers) + + async def _make_request( + self, url: str, method: str = "GET", headers: Optional[Dict] = None, **kwargs + ) -> httpx.Response: + """Make HTTP request with error handling.""" + try: + async with create_httpx_client() as client: + request_headers = self.base_headers + request_headers.update(headers or {}) + response = await client.request( + method, + url, + headers=request_headers, + **kwargs, + ) + response.raise_for_status() + return response + except httpx.HTTPError as e: + raise ExtractorError(f"HTTP request failed: {str(e)}") + except Exception as e: + raise ExtractorError(f"Request failed: {str(e)}") + + @abstractmethod + async def extract(self, url: str, **kwargs) -> Dict[str, Any]: + """Extract final URL and required headers.""" + pass diff --git a/mediaflow_proxy/extractors/doodstream.py b/mediaflow_proxy/extractors/doodstream.py new file mode 100644 index 0000000..a8f851d --- /dev/null +++ b/mediaflow_proxy/extractors/doodstream.py @@ -0,0 +1,39 @@ +import re +import time +from typing import Dict + +from mediaflow_proxy.extractors.base import BaseExtractor, ExtractorError + + +class DoodStreamExtractor(BaseExtractor): + """DoodStream URL extractor.""" + + def __init__(self, request_headers: dict): + super().__init__(request_headers) + self.base_url = "https://d000d.com" + + async def extract(self, url: str, **kwargs) -> Dict[str, str]: + """Extract DoodStream URL.""" + response = await self._make_request(url) + + # Extract URL pattern + pattern = r"(\/pass_md5\/.*?)'.*(\?token=.*?expiry=)" + match = re.search(pattern, response.text, re.DOTALL) + if not match: + raise ExtractorError("Failed to extract URL pattern") + + # Build final URL + pass_url = f"{self.base_url}{match[1]}" + referer = f"{self.base_url}/" + headers = {"range": "bytes=0-", "referer": referer} + + response = await self._make_request(pass_url, headers=headers) + timestamp = str(int(time.time())) + final_url = f"{response.text}123456789{match[2]}{timestamp}" + + self.base_headers["referer"] = referer + return { + "destination_url": final_url, + "request_headers": self.base_headers, + "mediaflow_endpoint": self.mediaflow_endpoint, + } diff --git a/mediaflow_proxy/extractors/factory.py b/mediaflow_proxy/extractors/factory.py new file mode 100644 index 0000000..c61d408 --- /dev/null +++ b/mediaflow_proxy/extractors/factory.py @@ -0,0 +1,32 @@ +from typing import Dict, Type + +from mediaflow_proxy.extractors.base import BaseExtractor, ExtractorError +from mediaflow_proxy.extractors.doodstream import DoodStreamExtractor +from mediaflow_proxy.extractors.livetv import LiveTVExtractor +from mediaflow_proxy.extractors.mixdrop import MixdropExtractor +from mediaflow_proxy.extractors.uqload import UqloadExtractor +from mediaflow_proxy.extractors.streamtape import StreamtapeExtractor +from mediaflow_proxy.extractors.supervideo import SupervideoExtractor + + + + +class ExtractorFactory: + """Factory for creating URL extractors.""" + + _extractors: Dict[str, Type[BaseExtractor]] = { + "Doodstream": DoodStreamExtractor, + "Uqload": UqloadExtractor, + "Mixdrop": MixdropExtractor, + "Streamtape": StreamtapeExtractor, + "Supervideo": SupervideoExtractor, + "LiveTV": LiveTVExtractor, + } + + @classmethod + def get_extractor(cls, host: str, request_headers: dict) -> BaseExtractor: + """Get appropriate extractor instance for the given host.""" + extractor_class = cls._extractors.get(host) + if not extractor_class: + raise ExtractorError(f"Unsupported host: {host}") + return extractor_class(request_headers) diff --git a/mediaflow_proxy/extractors/livetv.py b/mediaflow_proxy/extractors/livetv.py new file mode 100644 index 0000000..fbe0c93 --- /dev/null +++ b/mediaflow_proxy/extractors/livetv.py @@ -0,0 +1,251 @@ +import re +from typing import Dict, Tuple, Optional +from urllib.parse import urljoin, urlparse, unquote + +from httpx import Response + +from mediaflow_proxy.extractors.base import BaseExtractor, ExtractorError + + +class LiveTVExtractor(BaseExtractor): + """LiveTV URL extractor for both M3U8 and MPD streams.""" + + def __init__(self, request_headers: dict): + super().__init__(request_headers) + # Default to HLS proxy endpoint, will be updated based on stream type + self.mediaflow_endpoint = "hls_manifest_proxy" + + # Patterns for stream URL extraction + self.fallback_pattern = re.compile( + r"source: [\'\"](.*?)[\'\"]\s*,\s*[\s\S]*?mimeType: [\'\"](application/x-mpegURL|application/vnd\.apple\.mpegURL|application/dash\+xml)[\'\"]", + re.IGNORECASE, + ) + self.any_m3u8_pattern = re.compile( + r'["\']?(https?://.*?\.m3u8(?:\?[^"\']*)?)["\']?', + re.IGNORECASE, + ) + + async def extract(self, url: str, stream_title: str = None, **kwargs) -> Dict[str, str]: + """Extract LiveTV URL and required headers. + + Args: + url: The channel page URL + stream_title: Optional stream title to filter specific stream + + Returns: + Tuple[str, Dict[str, str]]: Stream URL and required headers + """ + try: + # Get the channel page + response = await self._make_request(url) + self.base_headers["referer"] = urljoin(url, "/") + + # Extract player API details + player_api_base, method = await self._extract_player_api_base(response.text) + if not player_api_base: + raise ExtractorError("Failed to extract player API URL") + + # Get player options + options_data = await self._get_player_options(response.text) + if not options_data: + raise ExtractorError("No player options found") + + # Process player options to find matching stream + for option in options_data: + current_title = option.get("title") + if stream_title and current_title != stream_title: + continue + + # Get stream URL based on player option + stream_data = await self._process_player_option( + player_api_base, method, option.get("post"), option.get("nume"), option.get("type") + ) + + if stream_data: + stream_url = stream_data.get("url") + if not stream_url: + continue + + response = { + "destination_url": stream_url, + "request_headers": self.base_headers, + "mediaflow_endpoint": self.mediaflow_endpoint, + } + + # Set endpoint based on stream type + if stream_data.get("type") == "mpd": + if stream_data.get("drm_key_id") and stream_data.get("drm_key"): + response.update( + { + "query_params": { + "key_id": stream_data["drm_key_id"], + "key": stream_data["drm_key"], + }, + "mediaflow_endpoint": "mpd_manifest_proxy", + } + ) + + return response + + raise ExtractorError("No valid stream found") + + except Exception as e: + raise ExtractorError(f"Extraction failed: {str(e)}") + + async def _extract_player_api_base(self, html_content: str) -> Tuple[Optional[str], Optional[str]]: + """Extract player API base URL and method.""" + admin_ajax_pattern = r'"player_api"\s*:\s*"([^"]+)".*?"play_method"\s*:\s*"([^"]+)"' + match = re.search(admin_ajax_pattern, html_content) + if not match: + return None, None + url = match.group(1).replace("\\/", "/") + method = match.group(2) + if method == "wp_json": + return url, method + url = urljoin(url, "/wp-admin/admin-ajax.php") + return url, method + + async def _get_player_options(self, html_content: str) -> list: + """Extract player options from HTML content.""" + pattern = r']*class=["\']dooplay_player_option["\'][^>]*data-type=["\']([^"\']*)["\'][^>]*data-post=["\']([^"\']*)["\'][^>]*data-nume=["\']([^"\']*)["\'][^>]*>.*?([^<]*)' + matches = re.finditer(pattern, html_content, re.DOTALL) + return [ + {"type": match.group(1), "post": match.group(2), "nume": match.group(3), "title": match.group(4).strip()} + for match in matches + ] + + async def _process_player_option(self, api_base: str, method: str, post: str, nume: str, type_: str) -> Dict: + """Process player option to get stream URL.""" + if method == "wp_json": + api_url = f"{api_base}{post}/{type_}/{nume}" + response = await self._make_request(api_url) + else: + form_data = {"action": "doo_player_ajax", "post": post, "nume": nume, "type": type_} + response = await self._make_request(api_base, method="POST", data=form_data) + + # Get iframe URL from API response + try: + data = response.json() + iframe_url = urljoin(api_base, data.get("embed_url", "").replace("\\/", "/")) + + # Get stream URL from iframe + iframe_response = await self._make_request(iframe_url) + stream_data = await self._extract_stream_url(iframe_response, iframe_url) + return stream_data + + except Exception as e: + raise ExtractorError(f"Failed to process player option: {str(e)}") + + async def _extract_stream_url(self, iframe_response: Response, iframe_url: str) -> Dict: + """ + Extract final stream URL from iframe content. + """ + try: + # Parse URL components + parsed_url = urlparse(iframe_url) + query_params = dict(param.split("=") for param in parsed_url.query.split("&") if "=" in param) + + # Check if content is already a direct M3U8 stream + content_types = ["application/x-mpegurl", "application/vnd.apple.mpegurl"] + + if any(ext in iframe_response.headers["content-type"] for ext in content_types): + return {"url": iframe_url, "type": "m3u8"} + + stream_data = {} + + # Check for source parameter in URL + if "source" in query_params: + stream_data = { + "url": urljoin(iframe_url, unquote(query_params["source"])), + "type": "m3u8", + } + + # Check for MPD stream with DRM + elif "zy" in query_params and ".mpd``" in query_params["zy"]: + data = query_params["zy"].split("``") + url = data[0] + key_id, key = data[1].split(":") + stream_data = {"url": url, "type": "mpd", "drm_key_id": key_id, "drm_key": key} + + # Check for tamilultra specific format + elif "tamilultra" in iframe_url: + stream_data = {"url": urljoin(iframe_url, parsed_url.query), "type": "m3u8"} + + # Try pattern matching for stream URLs + else: + channel_id = query_params.get("id", [""]) + stream_url = None + + html_content = iframe_response.text + + if channel_id: + # Try channel ID specific pattern + pattern = rf'{re.escape(channel_id)}["\']:\s*{{\s*["\']?url["\']?\s*:\s*["\']([^"\']+)["\']' + match = re.search(pattern, html_content) + if match: + stream_url = match.group(1) + + # Try fallback patterns if channel ID pattern fails + if not stream_url: + for pattern in [self.fallback_pattern, self.any_m3u8_pattern]: + match = pattern.search(html_content) + if match: + stream_url = match.group(1) + break + + if stream_url: + stream_data = {"url": stream_url, "type": "m3u8"} # Default to m3u8, will be updated + + # Check for MPD stream and extract DRM keys + if stream_url.endswith(".mpd"): + stream_data["type"] = "mpd" + drm_data = await self._extract_drm_keys(html_content, channel_id) + if drm_data: + stream_data.update(drm_data) + + # If no stream data found, raise error + if not stream_data: + raise ExtractorError("No valid stream URL found") + + # Update stream type based on URL if not already set + if stream_data.get("type") == "m3u8": + if stream_data["url"].endswith(".mpd"): + stream_data["type"] = "mpd" + elif not any(ext in stream_data["url"] for ext in [".m3u8", ".m3u"]): + stream_data["type"] = "m3u8" # Default to m3u8 if no extension found + + return stream_data + + except Exception as e: + raise ExtractorError(f"Failed to extract stream URL: {str(e)}") + + async def _extract_drm_keys(self, html_content: str, channel_id: str) -> Dict: + """ + Extract DRM keys for MPD streams. + """ + try: + # Pattern for channel entry + channel_pattern = rf'"{re.escape(channel_id)}":\s*{{[^}}]+}}' + channel_match = re.search(channel_pattern, html_content) + + if channel_match: + channel_data = channel_match.group(0) + + # Try clearkeys pattern first + clearkey_pattern = r'["\']?clearkeys["\']?\s*:\s*{\s*["\'](.+?)["\']:\s*["\'](.+?)["\']' + clearkey_match = re.search(clearkey_pattern, channel_data) + + # Try k1/k2 pattern if clearkeys not found + if not clearkey_match: + k1k2_pattern = r'["\']?k1["\']?\s*:\s*["\'](.+?)["\'],\s*["\']?k2["\']?\s*:\s*["\'](.+?)["\']' + k1k2_match = re.search(k1k2_pattern, channel_data) + + if k1k2_match: + return {"drm_key_id": k1k2_match.group(1), "drm_key": k1k2_match.group(2)} + else: + return {"drm_key_id": clearkey_match.group(1), "drm_key": clearkey_match.group(2)} + + return {} + + except Exception: + return {} diff --git a/mediaflow_proxy/extractors/mixdrop.py b/mediaflow_proxy/extractors/mixdrop.py new file mode 100644 index 0000000..26d91a7 --- /dev/null +++ b/mediaflow_proxy/extractors/mixdrop.py @@ -0,0 +1,36 @@ +import re +import string +from typing import Dict, Any + +from mediaflow_proxy.extractors.base import BaseExtractor, ExtractorError + + +class MixdropExtractor(BaseExtractor): + """Mixdrop URL extractor.""" + + async def extract(self, url: str, **kwargs) -> Dict[str, Any]: + """Extract Mixdrop URL.""" + response = await self._make_request(url, headers={"accept-language": "en-US,en;q=0.5"}) + + # Extract and decode URL + match = re.search(r"}\('(.+)',.+,'(.+)'\.split", response.text) + if not match: + raise ExtractorError("Failed to extract URL components") + + s1, s2 = match.group(1, 2) + schema = s1.split(";")[2][5:-1] + terms = s2.split("|") + + # Build character mapping + charset = string.digits + string.ascii_letters + char_map = {charset[i]: terms[i] or charset[i] for i in range(len(terms))} + + # Construct final URL + final_url = "https:" + "".join(char_map.get(c, c) for c in schema) + + self.base_headers["referer"] = url + return { + "destination_url": final_url, + "request_headers": self.base_headers, + "mediaflow_endpoint": self.mediaflow_endpoint, + } diff --git a/mediaflow_proxy/extractors/streamtape.py b/mediaflow_proxy/extractors/streamtape.py new file mode 100644 index 0000000..6358a7d --- /dev/null +++ b/mediaflow_proxy/extractors/streamtape.py @@ -0,0 +1,32 @@ +import re +from typing import Dict, Any + +from mediaflow_proxy.extractors.base import BaseExtractor, ExtractorError + + +class StreamtapeExtractor(BaseExtractor): + """Streamtape URL extractor.""" + + async def extract(self, url: str, **kwargs) -> Dict[str, Any]: + """Extract Streamtape URL.""" + response = await self._make_request(url) + + # Extract and decode URL + matches = re.findall(r"id=.*?(?=')", response.text) + if not matches: + raise ExtractorError("Failed to extract URL components") + final_url = next( + ( + f"https://streamtape.com/get_video?{matches[i + 1]}" + for i in range(len(matches) - 1) + if matches[i] == matches[i + 1] + ), + None, + ) + + self.base_headers["referer"] = url + return { + "destination_url": final_url, + "request_headers": self.base_headers, + "mediaflow_endpoint": self.mediaflow_endpoint, + } diff --git a/mediaflow_proxy/extractors/supervideo.py b/mediaflow_proxy/extractors/supervideo.py new file mode 100644 index 0000000..ba0fc6b --- /dev/null +++ b/mediaflow_proxy/extractors/supervideo.py @@ -0,0 +1,27 @@ +import re +from typing import Dict, Any + +from mediaflow_proxy.extractors.base import BaseExtractor, ExtractorError + + +class SupervideoExtractor(BaseExtractor): + """Supervideo URL extractor.""" + + async def extract(self, url: str, **kwargs) -> Dict[str, Any]: + """Extract Supervideo URL.""" + response = await self._make_request(url) + # Extract and decode URL + s2 = re.search(r"\}\('(.+)',.+,'(.+)'\.split", response.text).group(2) + terms = s2.split("|") + hfs = next(terms[i] for i in range(terms.index("file"), len(terms)) if "hfs" in terms[i]) + result = terms[terms.index("urlset") + 1 : terms.index("hls")] + + base_url = f"https://{hfs}.serversicuro.cc/hls/" + final_url = base_url + ",".join(reversed(result)) + (".urlset/master.m3u8" if result else "") + + self.base_headers["referer"] = url + return { + "destination_url": final_url, + "request_headers": self.base_headers, + "mediaflow_endpoint": self.mediaflow_endpoint, + } diff --git a/mediaflow_proxy/extractors/uqload.py b/mediaflow_proxy/extractors/uqload.py new file mode 100644 index 0000000..19cdbd2 --- /dev/null +++ b/mediaflow_proxy/extractors/uqload.py @@ -0,0 +1,24 @@ +import re +from typing import Dict +from urllib.parse import urljoin + +from mediaflow_proxy.extractors.base import BaseExtractor, ExtractorError + + +class UqloadExtractor(BaseExtractor): + """Uqload URL extractor.""" + + async def extract(self, url: str, **kwargs) -> Dict[str, str]: + """Extract Uqload URL.""" + response = await self._make_request(url) + + video_url_match = re.search(r'sources: \["(.*?)"]', response.text) + if not video_url_match: + raise ExtractorError("Failed to extract video URL") + + self.base_headers["referer"] = urljoin(url, "/") + return { + "destination_url": video_url_match.group(1), + "request_headers": self.base_headers, + "mediaflow_endpoint": self.mediaflow_endpoint, + } diff --git a/mediaflow_proxy/handlers.py b/mediaflow_proxy/handlers.py new file mode 100644 index 0000000..957eb51 --- /dev/null +++ b/mediaflow_proxy/handlers.py @@ -0,0 +1,358 @@ +import base64 +import logging +from urllib.parse import urlparse + +import httpx +from fastapi import Request, Response, HTTPException +from starlette.background import BackgroundTask + +from .const import SUPPORTED_RESPONSE_HEADERS +from .mpd_processor import process_manifest, process_playlist, process_segment +from .schemas import HLSManifestParams, ProxyStreamParams, MPDManifestParams, MPDPlaylistParams, MPDSegmentParams +from .utils.cache_utils import get_cached_mpd, get_cached_init_segment +from .utils.http_utils import ( + Streamer, + DownloadError, + download_file_with_retry, + request_with_retry, + EnhancedStreamingResponse, + ProxyRequestHeaders, + create_httpx_client, +) +from .utils.m3u8_processor import M3U8Processor +from .utils.mpd_utils import pad_base64 + +logger = logging.getLogger(__name__) + + +async def setup_client_and_streamer() -> tuple[httpx.AsyncClient, Streamer]: + """ + Set up an HTTP client and a streamer. + + Returns: + tuple: An httpx.AsyncClient instance and a Streamer instance. + """ + client = create_httpx_client() + return client, Streamer(client) + + +def handle_exceptions(exception: Exception) -> Response: + """ + Handle exceptions and return appropriate HTTP responses. + + Args: + exception (Exception): The exception that was raised. + + Returns: + Response: An HTTP response corresponding to the exception type. + """ + if isinstance(exception, httpx.HTTPStatusError): + logger.error(f"Upstream service error while handling request: {exception}") + return Response(status_code=exception.response.status_code, content=f"Upstream service error: {exception}") + elif isinstance(exception, DownloadError): + logger.error(f"Error downloading content: {exception}") + return Response(status_code=exception.status_code, content=str(exception)) + else: + logger.exception(f"Internal server error while handling request: {exception}") + return Response(status_code=502, content=f"Internal server error: {exception}") + + +async def handle_hls_stream_proxy( + request: Request, hls_params: HLSManifestParams, proxy_headers: ProxyRequestHeaders +) -> Response: + """ + Handle HLS stream proxy requests. + + This function processes HLS manifest files and streams content based on the request parameters. + + Args: + request (Request): The incoming FastAPI request object. + hls_params (HLSManifestParams): Parameters for the HLS manifest. + proxy_headers (ProxyRequestHeaders): Headers to be used in the proxy request. + + Returns: + Union[Response, EnhancedStreamingResponse]: Either a processed m3u8 playlist or a streaming response. + """ + client, streamer = await setup_client_and_streamer() + + try: + if urlparse(hls_params.destination).path.endswith((".m3u", ".m3u8")): + return await fetch_and_process_m3u8( + streamer, hls_params.destination, proxy_headers, request, hls_params.key_url + ) + + # Create initial streaming response to check content type + await streamer.create_streaming_response(hls_params.destination, proxy_headers.request) + + if "mpegurl" in streamer.response.headers.get("content-type", "").lower(): + return await fetch_and_process_m3u8( + streamer, hls_params.destination, proxy_headers, request, hls_params.key_url + ) + + # Handle range requests + content_range = proxy_headers.request.get("range", "bytes=0-") + if "NaN" in content_range: + # Handle invalid range requests "bytes=NaN-NaN" + raise HTTPException(status_code=416, detail="Invalid Range Header") + proxy_headers.request.update({"range": content_range}) + + # Create new streaming response with updated headers + await streamer.create_streaming_response(hls_params.destination, proxy_headers.request) + response_headers = prepare_response_headers(streamer.response.headers, proxy_headers.response) + + return EnhancedStreamingResponse( + streamer.stream_content(), + status_code=streamer.response.status_code, + headers=response_headers, + background=BackgroundTask(streamer.close), + ) + except Exception as e: + await streamer.close() + return handle_exceptions(e) + + +async def handle_stream_request( + method: str, + video_url: str, + proxy_headers: ProxyRequestHeaders, +) -> Response: + """ + Handle general stream requests. + + This function processes both HEAD and GET requests for video streams. + + Args: + method (str): The HTTP method (e.g., 'GET' or 'HEAD'). + video_url (str): The URL of the video to stream. + proxy_headers (ProxyRequestHeaders): Headers to be used in the proxy request. + + Returns: + Union[Response, EnhancedStreamingResponse]: Either a HEAD response with headers or a streaming response. + """ + client, streamer = await setup_client_and_streamer() + + try: + await streamer.create_streaming_response(video_url, proxy_headers.request) + response_headers = prepare_response_headers(streamer.response.headers, proxy_headers.response) + + if method == "HEAD": + # For HEAD requests, just return the headers without streaming content + await streamer.close() + return Response(headers=response_headers, status_code=streamer.response.status_code) + else: + # For GET requests, return the streaming response + return EnhancedStreamingResponse( + streamer.stream_content(), + headers=response_headers, + status_code=streamer.response.status_code, + background=BackgroundTask(streamer.close), + ) + except Exception as e: + await streamer.close() + return handle_exceptions(e) + + +def prepare_response_headers(original_headers, proxy_response_headers) -> dict: + """ + Prepare response headers for the proxy response. + + This function filters the original headers, ensures proper transfer encoding, + and merges them with the proxy response headers. + + Args: + original_headers (httpx.Headers): The original headers from the upstream response. + proxy_response_headers (dict): Additional headers to be included in the proxy response. + + Returns: + dict: The prepared headers for the proxy response. + """ + response_headers = {k: v for k, v in original_headers.multi_items() if k in SUPPORTED_RESPONSE_HEADERS} + response_headers.update(proxy_response_headers) + return response_headers + + +async def proxy_stream(method: str, stream_params: ProxyStreamParams, proxy_headers: ProxyRequestHeaders): + """ + Proxies the stream request to the given video URL. + + Args: + method (str): The HTTP method (e.g., GET, HEAD). + stream_params (ProxyStreamParams): The parameters for the stream request. + proxy_headers (ProxyRequestHeaders): The headers to include in the request. + + Returns: + Response: The HTTP response with the streamed content. + """ + return await handle_stream_request(method, stream_params.destination, proxy_headers) + + +async def fetch_and_process_m3u8( + streamer: Streamer, url: str, proxy_headers: ProxyRequestHeaders, request: Request, key_url: str = None +): + """ + Fetches and processes the m3u8 playlist, converting it to an HLS playlist. + + Args: + streamer (Streamer): The HTTP client to use for streaming. + url (str): The URL of the m3u8 playlist. + proxy_headers (ProxyRequestHeaders): The headers to include in the request. + request (Request): The incoming HTTP request. + key_url (str, optional): The HLS Key URL to replace the original key URL. Defaults to None. + + Returns: + Response: The HTTP response with the processed m3u8 playlist. + """ + try: + content = await streamer.get_text(url, proxy_headers.request) + processor = M3U8Processor(request, key_url) + processed_content = await processor.process_m3u8(content, str(streamer.response.url)) + response_headers = {"Content-Disposition": "inline", "Accept-Ranges": "none"} + response_headers.update(proxy_headers.response) + return Response( + content=processed_content, + media_type="application/vnd.apple.mpegurl", + headers=response_headers, + ) + except Exception as e: + return handle_exceptions(e) + finally: + await streamer.close() + + +async def handle_drm_key_data(key_id, key, drm_info): + """ + Handles the DRM key data, retrieving the key ID and key from the DRM info if not provided. + + Args: + key_id (str): The DRM key ID. + key (str): The DRM key. + drm_info (dict): The DRM information from the MPD manifest. + + Returns: + tuple: The key ID and key. + """ + if drm_info and not drm_info.get("isDrmProtected"): + return None, None + + if not key_id or not key: + if "keyId" in drm_info and "key" in drm_info: + key_id = drm_info["keyId"] + key = drm_info["key"] + elif "laUrl" in drm_info and "keyId" in drm_info: + raise HTTPException(status_code=400, detail="LA URL is not supported yet") + else: + raise HTTPException( + status_code=400, detail="Unable to determine key_id and key, and they were not provided" + ) + + return key_id, key + + +async def get_manifest( + request: Request, + manifest_params: MPDManifestParams, + proxy_headers: ProxyRequestHeaders, +): + """ + Retrieves and processes the MPD manifest, converting it to an HLS manifest. + + Args: + request (Request): The incoming HTTP request. + manifest_params (MPDManifestParams): The parameters for the manifest request. + proxy_headers (ProxyRequestHeaders): The headers to include in the request. + + Returns: + Response: The HTTP response with the HLS manifest. + """ + try: + mpd_dict = await get_cached_mpd( + manifest_params.destination, + headers=proxy_headers.request, + parse_drm=not manifest_params.key_id and not manifest_params.key, + ) + except DownloadError as e: + raise HTTPException(status_code=e.status_code, detail=f"Failed to download MPD: {e.message}") + drm_info = mpd_dict.get("drmInfo", {}) + + if drm_info and not drm_info.get("isDrmProtected"): + # For non-DRM protected MPD, we still create an HLS manifest + return await process_manifest(request, mpd_dict, proxy_headers, None, None) + + key_id, key = await handle_drm_key_data(manifest_params.key_id, manifest_params.key, drm_info) + + # check if the provided key_id and key are valid + if key_id and len(key_id) != 32: + key_id = base64.urlsafe_b64decode(pad_base64(key_id)).hex() + if key and len(key) != 32: + key = base64.urlsafe_b64decode(pad_base64(key)).hex() + + return await process_manifest(request, mpd_dict, proxy_headers, key_id, key) + + +async def get_playlist( + request: Request, + playlist_params: MPDPlaylistParams, + proxy_headers: ProxyRequestHeaders, +): + """ + Retrieves and processes the MPD manifest, converting it to an HLS playlist for a specific profile. + + Args: + request (Request): The incoming HTTP request. + playlist_params (MPDPlaylistParams): The parameters for the playlist request. + proxy_headers (ProxyRequestHeaders): The headers to include in the request. + + Returns: + Response: The HTTP response with the HLS playlist. + """ + try: + mpd_dict = await get_cached_mpd( + playlist_params.destination, + headers=proxy_headers.request, + parse_drm=not playlist_params.key_id and not playlist_params.key, + parse_segment_profile_id=playlist_params.profile_id, + ) + except DownloadError as e: + raise HTTPException(status_code=e.status_code, detail=f"Failed to download MPD: {e.message}") + return await process_playlist(request, mpd_dict, playlist_params.profile_id, proxy_headers) + + +async def get_segment( + segment_params: MPDSegmentParams, + proxy_headers: ProxyRequestHeaders, +): + """ + Retrieves and processes a media segment, decrypting it if necessary. + + Args: + segment_params (MPDSegmentParams): The parameters for the segment request. + proxy_headers (ProxyRequestHeaders): The headers to include in the request. + + Returns: + Response: The HTTP response with the processed segment. + """ + try: + init_content = await get_cached_init_segment(segment_params.init_url, proxy_headers.request) + segment_content = await download_file_with_retry(segment_params.segment_url, proxy_headers.request) + except Exception as e: + return handle_exceptions(e) + + return await process_segment( + init_content, + segment_content, + segment_params.mime_type, + proxy_headers, + segment_params.key_id, + segment_params.key, + ) + + +async def get_public_ip(): + """ + Retrieves the public IP address of the MediaFlow proxy. + + Returns: + Response: The HTTP response with the public IP address. + """ + ip_address_data = await request_with_retry("GET", "https://api.ipify.org?format=json", {}) + return ip_address_data.json() diff --git a/mediaflow_proxy/main.py b/mediaflow_proxy/main.py new file mode 100644 index 0000000..9b45b81 --- /dev/null +++ b/mediaflow_proxy/main.py @@ -0,0 +1,99 @@ +import logging +from importlib import resources + +from fastapi import FastAPI, Depends, Security, HTTPException +from fastapi.security import APIKeyQuery, APIKeyHeader +from starlette.middleware.cors import CORSMiddleware +from starlette.responses import RedirectResponse +from starlette.staticfiles import StaticFiles + +from mediaflow_proxy.configs import settings +from mediaflow_proxy.routes import proxy_router, extractor_router, speedtest_router +from mediaflow_proxy.schemas import GenerateUrlRequest +from mediaflow_proxy.utils.crypto_utils import EncryptionHandler, EncryptionMiddleware +from mediaflow_proxy.utils.http_utils import encode_mediaflow_proxy_url + +logging.basicConfig(level=settings.log_level, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s") +app = FastAPI() +api_password_query = APIKeyQuery(name="api_password", auto_error=False) +api_password_header = APIKeyHeader(name="api_password", auto_error=False) +app.add_middleware( + CORSMiddleware, + allow_origins=["*"], + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) +app.add_middleware(EncryptionMiddleware) + + +async def verify_api_key(api_key: str = Security(api_password_query), api_key_alt: str = Security(api_password_header)): + """ + Verifies the API key for the request. + + Args: + api_key (str): The API key to validate. + api_key_alt (str): The alternative API key to validate. + + Raises: + HTTPException: If the API key is invalid. + """ + if not settings.api_password: + return + + if api_key == settings.api_password or api_key_alt == settings.api_password: + return + + raise HTTPException(status_code=403, detail="Could not validate credentials") + + +@app.get("/health") +async def health_check(): + return {"status": "healthy"} + + +@app.get("/favicon.ico") +async def get_favicon(): + return RedirectResponse(url="/logo.png") + + +@app.get("/speedtest") +async def show_speedtest_page(): + return RedirectResponse(url="/speedtest.html") + + +@app.post("/generate_encrypted_or_encoded_url") +async def generate_encrypted_or_encoded_url(request: GenerateUrlRequest): + if "api_password" not in request.query_params: + request.query_params["api_password"] = request.api_password + + encoded_url = encode_mediaflow_proxy_url( + request.mediaflow_proxy_url, + request.endpoint, + request.destination_url, + request.query_params, + request.request_headers, + request.response_headers, + EncryptionHandler(request.api_password) if request.api_password else None, + request.expiration, + str(request.ip) if request.ip else None, + ) + return {"encoded_url": encoded_url} + + +app.include_router(proxy_router, prefix="/proxy", tags=["proxy"], dependencies=[Depends(verify_api_key)]) +app.include_router(extractor_router, prefix="/extractor", tags=["extractors"], dependencies=[Depends(verify_api_key)]) +app.include_router(speedtest_router, prefix="/speedtest", tags=["speedtest"], dependencies=[Depends(verify_api_key)]) + +static_path = resources.files("mediaflow_proxy").joinpath("static") +app.mount("/", StaticFiles(directory=str(static_path), html=True), name="static") + + +def run(): + import uvicorn + + uvicorn.run(app, host="0.0.0.0", port=8888, log_level="info", workers=3) + + +if __name__ == "__main__": + run() diff --git a/mediaflow_proxy/mpd_processor.py b/mediaflow_proxy/mpd_processor.py new file mode 100644 index 0000000..76fba69 --- /dev/null +++ b/mediaflow_proxy/mpd_processor.py @@ -0,0 +1,214 @@ +import logging +import math +import time + +from fastapi import Request, Response, HTTPException + +from mediaflow_proxy.drm.decrypter import decrypt_segment +from mediaflow_proxy.utils.crypto_utils import encryption_handler +from mediaflow_proxy.utils.http_utils import encode_mediaflow_proxy_url, get_original_scheme, ProxyRequestHeaders + +logger = logging.getLogger(__name__) + + +async def process_manifest( + request: Request, mpd_dict: dict, proxy_headers: ProxyRequestHeaders, key_id: str = None, key: str = None +) -> Response: + """ + Processes the MPD manifest and converts it to an HLS manifest. + + Args: + request (Request): The incoming HTTP request. + mpd_dict (dict): The MPD manifest data. + proxy_headers (ProxyRequestHeaders): The headers to include in the request. + key_id (str, optional): The DRM key ID. Defaults to None. + key (str, optional): The DRM key. Defaults to None. + + Returns: + Response: The HLS manifest as an HTTP response. + """ + hls_content = build_hls(mpd_dict, request, key_id, key) + return Response(content=hls_content, media_type="application/vnd.apple.mpegurl", headers=proxy_headers.response) + + +async def process_playlist( + request: Request, mpd_dict: dict, profile_id: str, proxy_headers: ProxyRequestHeaders +) -> Response: + """ + Processes the MPD manifest and converts it to an HLS playlist for a specific profile. + + Args: + request (Request): The incoming HTTP request. + mpd_dict (dict): The MPD manifest data. + profile_id (str): The profile ID to generate the playlist for. + proxy_headers (ProxyRequestHeaders): The headers to include in the request. + + Returns: + Response: The HLS playlist as an HTTP response. + + Raises: + HTTPException: If the profile is not found in the MPD manifest. + """ + matching_profiles = [p for p in mpd_dict["profiles"] if p["id"] == profile_id] + if not matching_profiles: + raise HTTPException(status_code=404, detail="Profile not found") + + hls_content = build_hls_playlist(mpd_dict, matching_profiles, request) + return Response(content=hls_content, media_type="application/vnd.apple.mpegurl", headers=proxy_headers.response) + + +async def process_segment( + init_content: bytes, + segment_content: bytes, + mimetype: str, + proxy_headers: ProxyRequestHeaders, + key_id: str = None, + key: str = None, +) -> Response: + """ + Processes and decrypts a media segment. + + Args: + init_content (bytes): The initialization segment content. + segment_content (bytes): The media segment content. + mimetype (str): The MIME type of the segment. + proxy_headers (ProxyRequestHeaders): The headers to include in the request. + key_id (str, optional): The DRM key ID. Defaults to None. + key (str, optional): The DRM key. Defaults to None. + + Returns: + Response: The decrypted segment as an HTTP response. + """ + if key_id and key: + # For DRM protected content + now = time.time() + decrypted_content = decrypt_segment(init_content, segment_content, key_id, key) + logger.info(f"Decryption of {mimetype} segment took {time.time() - now:.4f} seconds") + else: + # For non-DRM protected content, we just concatenate init and segment content + decrypted_content = init_content + segment_content + + return Response(content=decrypted_content, media_type=mimetype, headers=proxy_headers.response) + + +def build_hls(mpd_dict: dict, request: Request, key_id: str = None, key: str = None) -> str: + """ + Builds an HLS manifest from the MPD manifest. + + Args: + mpd_dict (dict): The MPD manifest data. + request (Request): The incoming HTTP request. + key_id (str, optional): The DRM key ID. Defaults to None. + key (str, optional): The DRM key. Defaults to None. + + Returns: + str: The HLS manifest as a string. + """ + hls = ["#EXTM3U", "#EXT-X-VERSION:6"] + query_params = dict(request.query_params) + has_encrypted = query_params.pop("has_encrypted", False) + + video_profiles = {} + audio_profiles = {} + + # Get the base URL for the playlist_endpoint endpoint + proxy_url = request.url_for("playlist_endpoint") + proxy_url = str(proxy_url.replace(scheme=get_original_scheme(request))) + + for profile in mpd_dict["profiles"]: + query_params.update({"profile_id": profile["id"], "key_id": key_id or "", "key": key or ""}) + playlist_url = encode_mediaflow_proxy_url( + proxy_url, + query_params=query_params, + encryption_handler=encryption_handler if has_encrypted else None, + ) + + if "video" in profile["mimeType"]: + video_profiles[profile["id"]] = (profile, playlist_url) + elif "audio" in profile["mimeType"]: + audio_profiles[profile["id"]] = (profile, playlist_url) + + # Add audio streams + for i, (profile, playlist_url) in enumerate(audio_profiles.values()): + is_default = "YES" if i == 0 else "NO" # Set the first audio track as default + hls.append( + f'#EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID="audio",NAME="{profile["id"]}",DEFAULT={is_default},AUTOSELECT={is_default},LANGUAGE="{profile.get("lang", "und")}",URI="{playlist_url}"' + ) + + # Add video streams + for profile, playlist_url in video_profiles.values(): + hls.append( + f'#EXT-X-STREAM-INF:BANDWIDTH={profile["bandwidth"]},RESOLUTION={profile["width"]}x{profile["height"]},CODECS="{profile["codecs"]}",FRAME-RATE={profile["frameRate"]},AUDIO="audio"' + ) + hls.append(playlist_url) + + return "\n".join(hls) + + +def build_hls_playlist(mpd_dict: dict, profiles: list[dict], request: Request) -> str: + """ + Builds an HLS playlist from the MPD manifest for specific profiles. + + Args: + mpd_dict (dict): The MPD manifest data. + profiles (list[dict]): The profiles to include in the playlist. + request (Request): The incoming HTTP request. + + Returns: + str: The HLS playlist as a string. + """ + hls = ["#EXTM3U", "#EXT-X-VERSION:6"] + + added_segments = 0 + + proxy_url = request.url_for("segment_endpoint") + proxy_url = str(proxy_url.replace(scheme=get_original_scheme(request))) + + for index, profile in enumerate(profiles): + segments = profile["segments"] + if not segments: + logger.warning(f"No segments found for profile {profile['id']}") + continue + + # Add headers for only the first profile + if index == 0: + sequence = segments[0]["number"] + extinf_values = [f["extinf"] for f in segments if "extinf" in f] + target_duration = math.ceil(max(extinf_values)) if extinf_values else 3 + hls.extend( + [ + f"#EXT-X-TARGETDURATION:{target_duration}", + f"#EXT-X-MEDIA-SEQUENCE:{sequence}", + ] + ) + if mpd_dict["isLive"]: + hls.append("#EXT-X-PLAYLIST-TYPE:EVENT") + else: + hls.append("#EXT-X-PLAYLIST-TYPE:VOD") + + init_url = profile["initUrl"] + + query_params = dict(request.query_params) + query_params.pop("profile_id", None) + query_params.pop("d", None) + has_encrypted = query_params.pop("has_encrypted", False) + + for segment in segments: + hls.append(f'#EXTINF:{segment["extinf"]:.3f},') + query_params.update( + {"init_url": init_url, "segment_url": segment["media"], "mime_type": profile["mimeType"]} + ) + hls.append( + encode_mediaflow_proxy_url( + proxy_url, + query_params=query_params, + encryption_handler=encryption_handler if has_encrypted else None, + ) + ) + added_segments += 1 + + if not mpd_dict["isLive"]: + hls.append("#EXT-X-ENDLIST") + + logger.info(f"Added {added_segments} segments to HLS playlist") + return "\n".join(hls) diff --git a/mediaflow_proxy/routes.py b/mediaflow_proxy/routes.py new file mode 100644 index 0000000..078c243 --- /dev/null +++ b/mediaflow_proxy/routes.py @@ -0,0 +1,164 @@ +from fastapi import Request, Depends, APIRouter +from pydantic import HttpUrl + +from .handlers import handle_hls_stream_proxy, proxy_stream, get_manifest, get_playlist, get_segment, get_public_ip +from .utils.http_utils import get_proxy_headers, ProxyRequestHeaders + +proxy_router = APIRouter() + + +@proxy_router.head("/hls") +@proxy_router.get("/hls") +async def hls_stream_proxy( + request: Request, + d: HttpUrl, + proxy_headers: ProxyRequestHeaders = Depends(get_proxy_headers), + key_url: HttpUrl | None = None, + verify_ssl: bool = False, + use_request_proxy: bool = True, +): + """ + Proxify HLS stream requests, fetching and processing the m3u8 playlist or streaming the content. + + Args: + request (Request): The incoming HTTP request. + d (HttpUrl): The destination URL to fetch the content from. + key_url (HttpUrl, optional): The HLS Key URL to replace the original key URL. Defaults to None. (Useful for bypassing some sneaky protection) + proxy_headers (ProxyRequestHeaders): The headers to include in the request. + verify_ssl (bool, optional): Whether to verify the SSL certificate of the destination. Defaults to False. + use_request_proxy (bool, optional): Whether to use the MediaFlow proxy configuration. Defaults to True. + + Returns: + Response: The HTTP response with the processed m3u8 playlist or streamed content. + """ + destination = str(d) + return await handle_hls_stream_proxy(request, destination, proxy_headers, key_url, verify_ssl, use_request_proxy) + + +@proxy_router.head("/stream") +@proxy_router.get("/stream") +async def proxy_stream_endpoint( + request: Request, + d: HttpUrl, + proxy_headers: ProxyRequestHeaders = Depends(get_proxy_headers), + verify_ssl: bool = False, + use_request_proxy: bool = True, +): + """ + Proxies stream requests to the given video URL. + + Args: + request (Request): The incoming HTTP request. + d (HttpUrl): The URL of the video to stream. + proxy_headers (ProxyRequestHeaders): The headers to include in the request. + verify_ssl (bool, optional): Whether to verify the SSL certificate of the destination. Defaults to False. + use_request_proxy (bool, optional): Whether to use the MediaFlow proxy configuration. Defaults to True. + + Returns: + Response: The HTTP response with the streamed content. + """ + proxy_headers.request.update({"range": proxy_headers.request.get("range", "bytes=0-")}) + return await proxy_stream(request.method, str(d), proxy_headers, verify_ssl, use_request_proxy) + + +@proxy_router.get("/mpd/manifest") +async def manifest_endpoint( + request: Request, + d: HttpUrl, + proxy_headers: ProxyRequestHeaders = Depends(get_proxy_headers), + key_id: str = None, + key: str = None, + verify_ssl: bool = False, + use_request_proxy: bool = True, +): + """ + Retrieves and processes the MPD manifest, converting it to an HLS manifest. + + Args: + request (Request): The incoming HTTP request. + d (HttpUrl): The URL of the MPD manifest. + proxy_headers (ProxyRequestHeaders): The headers to include in the request. + key_id (str, optional): The DRM key ID. Defaults to None. + key (str, optional): The DRM key. Defaults to None. + verify_ssl (bool, optional): Whether to verify the SSL certificate of the destination. Defaults to False. + use_request_proxy (bool, optional): Whether to use the MediaFlow proxy configuration. Defaults to True. + + Returns: + Response: The HTTP response with the HLS manifest. + """ + return await get_manifest(request, str(d), proxy_headers, key_id, key, verify_ssl, use_request_proxy) + + +@proxy_router.get("/mpd/playlist") +async def playlist_endpoint( + request: Request, + d: HttpUrl, + profile_id: str, + proxy_headers: ProxyRequestHeaders = Depends(get_proxy_headers), + key_id: str = None, + key: str = None, + verify_ssl: bool = False, + use_request_proxy: bool = True, +): + """ + Retrieves and processes the MPD manifest, converting it to an HLS playlist for a specific profile. + + Args: + request (Request): The incoming HTTP request. + d (HttpUrl): The URL of the MPD manifest. + profile_id (str): The profile ID to generate the playlist for. + proxy_headers (ProxyRequestHeaders): The headers to include in the request. + key_id (str, optional): The DRM key ID. Defaults to None. + key (str, optional): The DRM key. Defaults to None. + verify_ssl (bool, optional): Whether to verify the SSL certificate of the destination. Defaults to False. + use_request_proxy (bool, optional): Whether to use the MediaFlow proxy configuration. Defaults to True. + + Returns: + Response: The HTTP response with the HLS playlist. + """ + return await get_playlist(request, str(d), profile_id, proxy_headers, key_id, key, verify_ssl, use_request_proxy) + + +@proxy_router.get("/mpd/segment") +async def segment_endpoint( + init_url: HttpUrl, + segment_url: HttpUrl, + mime_type: str, + proxy_headers: ProxyRequestHeaders = Depends(get_proxy_headers), + key_id: str = None, + key: str = None, + verify_ssl: bool = False, + use_request_proxy: bool = True, +): + """ + Retrieves and processes a media segment, decrypting it if necessary. + + Args: + init_url (HttpUrl): The URL of the initialization segment. + segment_url (HttpUrl): The URL of the media segment. + mime_type (str): The MIME type of the segment. + proxy_headers (ProxyRequestHeaders): The headers to include in the request. + key_id (str, optional): The DRM key ID. Defaults to None. + key (str, optional): The DRM key. Defaults to None. + verify_ssl (bool, optional): Whether to verify the SSL certificate of the destination. Defaults to False. + use_request_proxy (bool, optional): Whether to use the MediaFlow proxy configuration. Defaults to True. + + Returns: + Response: The HTTP response with the processed segment. + """ + return await get_segment( + str(init_url), str(segment_url), mime_type, proxy_headers, key_id, key, verify_ssl, use_request_proxy + ) + + +@proxy_router.get("/ip") +async def get_mediaflow_proxy_public_ip( + use_request_proxy: bool = True, +): + """ + Retrieves the public IP address of the MediaFlow proxy server. + + Returns: + Response: The HTTP response with the public IP address in the form of a JSON object. {"ip": "xxx.xxx.xxx.xxx"} + """ + return await get_public_ip(use_request_proxy) diff --git a/mediaflow_proxy/routes/__init__.py b/mediaflow_proxy/routes/__init__.py new file mode 100644 index 0000000..4a8a05c --- /dev/null +++ b/mediaflow_proxy/routes/__init__.py @@ -0,0 +1,5 @@ +from .proxy import proxy_router +from .extractor import extractor_router +from .speedtest import speedtest_router + +__all__ = ["proxy_router", "extractor_router", "speedtest_router"] diff --git a/mediaflow_proxy/routes/extractor.py b/mediaflow_proxy/routes/extractor.py new file mode 100644 index 0000000..09981f6 --- /dev/null +++ b/mediaflow_proxy/routes/extractor.py @@ -0,0 +1,61 @@ +import logging +from typing import Annotated + +from fastapi import APIRouter, Query, HTTPException, Request, Depends +from fastapi.responses import RedirectResponse + +from mediaflow_proxy.extractors.base import ExtractorError +from mediaflow_proxy.extractors.factory import ExtractorFactory +from mediaflow_proxy.schemas import ExtractorURLParams +from mediaflow_proxy.utils.cache_utils import get_cached_extractor_result, set_cache_extractor_result +from mediaflow_proxy.utils.http_utils import ( + encode_mediaflow_proxy_url, + get_original_scheme, + ProxyRequestHeaders, + get_proxy_headers, +) + +extractor_router = APIRouter() +logger = logging.getLogger(__name__) + + +@extractor_router.head("/video") +@extractor_router.get("/video") +async def extract_url( + extractor_params: Annotated[ExtractorURLParams, Query()], + request: Request, + proxy_headers: Annotated[ProxyRequestHeaders, Depends(get_proxy_headers)], +): + """Extract clean links from various video hosting services.""" + try: + cache_key = f"{extractor_params.host}_{extractor_params.model_dump_json()}" + response = await get_cached_extractor_result(cache_key) + if not response: + extractor = ExtractorFactory.get_extractor(extractor_params.host, proxy_headers.request) + response = await extractor.extract(extractor_params.destination, **extractor_params.extra_params) + await set_cache_extractor_result(cache_key, response) + else: + response["request_headers"].update(proxy_headers.request) + + response["mediaflow_proxy_url"] = str( + request.url_for(response.pop("mediaflow_endpoint")).replace(scheme=get_original_scheme(request)) + ) + response["query_params"] = response.get("query_params", {}) + # Add API password to query params + response["query_params"]["api_password"] = request.query_params.get("api_password") + + if extractor_params.redirect_stream: + stream_url = encode_mediaflow_proxy_url( + **response, + response_headers=proxy_headers.response, + ) + return RedirectResponse(url=stream_url, status_code=302) + + return response + + except ExtractorError as e: + logger.error(f"Extraction failed: {str(e)}") + raise HTTPException(status_code=400, detail=str(e)) + except Exception as e: + logger.exception(f"Extraction failed: {str(e)}") + raise HTTPException(status_code=500, detail=f"Extraction failed: {str(e)}") diff --git a/mediaflow_proxy/routes/proxy.py b/mediaflow_proxy/routes/proxy.py new file mode 100644 index 0000000..fb936fa --- /dev/null +++ b/mediaflow_proxy/routes/proxy.py @@ -0,0 +1,138 @@ +from typing import Annotated + +from fastapi import Request, Depends, APIRouter, Query, HTTPException + +from mediaflow_proxy.handlers import ( + handle_hls_stream_proxy, + proxy_stream, + get_manifest, + get_playlist, + get_segment, + get_public_ip, +) +from mediaflow_proxy.schemas import ( + MPDSegmentParams, + MPDPlaylistParams, + HLSManifestParams, + ProxyStreamParams, + MPDManifestParams, +) +from mediaflow_proxy.utils.http_utils import get_proxy_headers, ProxyRequestHeaders + +proxy_router = APIRouter() + + +@proxy_router.head("/hls/manifest.m3u8") +@proxy_router.get("/hls/manifest.m3u8") +async def hls_manifest_proxy( + request: Request, + hls_params: Annotated[HLSManifestParams, Query()], + proxy_headers: Annotated[ProxyRequestHeaders, Depends(get_proxy_headers)], +): + """ + Proxify HLS stream requests, fetching and processing the m3u8 playlist or streaming the content. + + Args: + request (Request): The incoming HTTP request. + hls_params (HLSPlaylistParams): The parameters for the HLS stream request. + proxy_headers (ProxyRequestHeaders): The headers to include in the request. + + Returns: + Response: The HTTP response with the processed m3u8 playlist or streamed content. + """ + return await handle_hls_stream_proxy(request, hls_params, proxy_headers) + + +@proxy_router.head("/stream") +@proxy_router.get("/stream") +async def proxy_stream_endpoint( + request: Request, + stream_params: Annotated[ProxyStreamParams, Query()], + proxy_headers: Annotated[ProxyRequestHeaders, Depends(get_proxy_headers)], +): + """ + Proxies stream requests to the given video URL. + + Args: + request (Request): The incoming HTTP request. + stream_params (ProxyStreamParams): The parameters for the stream request. + proxy_headers (ProxyRequestHeaders): The headers to include in the request. + + Returns: + Response: The HTTP response with the streamed content. + """ + content_range = proxy_headers.request.get("range", "bytes=0-") + if "nan" in content_range.casefold(): + # Handle invalid range requests "bytes=NaN-NaN" + raise HTTPException(status_code=416, detail="Invalid Range Header") + proxy_headers.request.update({"range": content_range}) + return await proxy_stream(request.method, stream_params, proxy_headers) + + +@proxy_router.get("/mpd/manifest.m3u8") +async def mpd_manifest_proxy( + request: Request, + manifest_params: Annotated[MPDManifestParams, Query()], + proxy_headers: Annotated[ProxyRequestHeaders, Depends(get_proxy_headers)], +): + """ + Retrieves and processes the MPD manifest, converting it to an HLS manifest. + + Args: + request (Request): The incoming HTTP request. + manifest_params (MPDManifestParams): The parameters for the manifest request. + proxy_headers (ProxyRequestHeaders): The headers to include in the request. + + Returns: + Response: The HTTP response with the HLS manifest. + """ + return await get_manifest(request, manifest_params, proxy_headers) + + +@proxy_router.get("/mpd/playlist.m3u8") +async def playlist_endpoint( + request: Request, + playlist_params: Annotated[MPDPlaylistParams, Query()], + proxy_headers: Annotated[ProxyRequestHeaders, Depends(get_proxy_headers)], +): + """ + Retrieves and processes the MPD manifest, converting it to an HLS playlist for a specific profile. + + Args: + request (Request): The incoming HTTP request. + playlist_params (MPDPlaylistParams): The parameters for the playlist request. + proxy_headers (ProxyRequestHeaders): The headers to include in the request. + + Returns: + Response: The HTTP response with the HLS playlist. + """ + return await get_playlist(request, playlist_params, proxy_headers) + + +@proxy_router.get("/mpd/segment.mp4") +async def segment_endpoint( + segment_params: Annotated[MPDSegmentParams, Query()], + proxy_headers: Annotated[ProxyRequestHeaders, Depends(get_proxy_headers)], +): + """ + Retrieves and processes a media segment, decrypting it if necessary. + + Args: + segment_params (MPDSegmentParams): The parameters for the segment request. + proxy_headers (ProxyRequestHeaders): The headers to include in the request. + + Returns: + Response: The HTTP response with the processed segment. + """ + return await get_segment(segment_params, proxy_headers) + + +@proxy_router.get("/ip") +async def get_mediaflow_proxy_public_ip(): + """ + Retrieves the public IP address of the MediaFlow proxy server. + + Returns: + Response: The HTTP response with the public IP address in the form of a JSON object. {"ip": "xxx.xxx.xxx.xxx"} + """ + return await get_public_ip() diff --git a/mediaflow_proxy/routes/speedtest.py b/mediaflow_proxy/routes/speedtest.py new file mode 100644 index 0000000..ec33bfd --- /dev/null +++ b/mediaflow_proxy/routes/speedtest.py @@ -0,0 +1,43 @@ +import uuid + +from fastapi import APIRouter, BackgroundTasks, HTTPException, Request +from fastapi.responses import RedirectResponse + +from mediaflow_proxy.speedtest.service import SpeedTestService, SpeedTestProvider + +speedtest_router = APIRouter() + +# Initialize service +speedtest_service = SpeedTestService() + + +@speedtest_router.get("/", summary="Show speed test interface") +async def show_speedtest_page(): + """Return the speed test HTML interface.""" + return RedirectResponse(url="/speedtest.html") + + +@speedtest_router.post("/start", summary="Start a new speed test", response_model=dict) +async def start_speedtest(background_tasks: BackgroundTasks, provider: SpeedTestProvider, request: Request): + """Start a new speed test for the specified provider.""" + task_id = str(uuid.uuid4()) + api_key = request.headers.get("api_key") + + # Create and initialize the task + await speedtest_service.create_test(task_id, provider, api_key) + + # Schedule the speed test + background_tasks.add_task(speedtest_service.run_speedtest, task_id, provider, api_key) + + return {"task_id": task_id} + + +@speedtest_router.get("/results/{task_id}", summary="Get speed test results") +async def get_speedtest_results(task_id: str): + """Get the results or current status of a speed test.""" + task = await speedtest_service.get_test_results(task_id) + + if not task: + raise HTTPException(status_code=404, detail="Speed test task not found or expired") + + return task.dict() diff --git a/mediaflow_proxy/schemas.py b/mediaflow_proxy/schemas.py new file mode 100644 index 0000000..9a8fa2b --- /dev/null +++ b/mediaflow_proxy/schemas.py @@ -0,0 +1,74 @@ +from typing import Literal, Dict, Any, Optional + +from pydantic import BaseModel, Field, IPvAnyAddress, ConfigDict + + +class GenerateUrlRequest(BaseModel): + mediaflow_proxy_url: str = Field(..., description="The base URL for the mediaflow proxy.") + endpoint: Optional[str] = Field(None, description="The specific endpoint to be appended to the base URL.") + destination_url: Optional[str] = Field( + None, description="The destination URL to which the request will be proxied." + ) + query_params: Optional[dict] = Field( + default_factory=dict, description="Query parameters to be included in the request." + ) + request_headers: Optional[dict] = Field(default_factory=dict, description="Headers to be included in the request.") + response_headers: Optional[dict] = Field( + default_factory=dict, description="Headers to be included in the response." + ) + expiration: Optional[int] = Field( + None, description="Expiration time for the URL in seconds. If not provided, the URL will not expire." + ) + api_password: Optional[str] = Field( + None, description="API password for encryption. If not provided, the URL will only be encoded." + ) + ip: Optional[IPvAnyAddress] = Field(None, description="The IP address to restrict the URL to.") + + +class GenericParams(BaseModel): + model_config = ConfigDict(populate_by_name=True) + + +class HLSManifestParams(GenericParams): + destination: str = Field(..., description="The URL of the HLS manifest.", alias="d") + key_url: Optional[str] = Field( + None, + description="The HLS Key URL to replace the original key URL. Defaults to None. (Useful for bypassing some sneaky protection)", + ) + + +class ProxyStreamParams(GenericParams): + destination: str = Field(..., description="The URL of the stream.", alias="d") + + +class MPDManifestParams(GenericParams): + destination: str = Field(..., description="The URL of the MPD manifest.", alias="d") + key_id: Optional[str] = Field(None, description="The DRM key ID (optional).") + key: Optional[str] = Field(None, description="The DRM key (optional).") + + +class MPDPlaylistParams(GenericParams): + destination: str = Field(..., description="The URL of the MPD manifest.", alias="d") + profile_id: str = Field(..., description="The profile ID to generate the playlist for.") + key_id: Optional[str] = Field(None, description="The DRM key ID (optional).") + key: Optional[str] = Field(None, description="The DRM key (optional).") + + +class MPDSegmentParams(GenericParams): + init_url: str = Field(..., description="The URL of the initialization segment.") + segment_url: str = Field(..., description="The URL of the media segment.") + mime_type: str = Field(..., description="The MIME type of the segment.") + key_id: Optional[str] = Field(None, description="The DRM key ID (optional).") + key: Optional[str] = Field(None, description="The DRM key (optional).") + + +class ExtractorURLParams(GenericParams): + host: Literal["Doodstream", "Mixdrop", "Uqload", "Streamtape", "Supervideo", "LiveTV"] = Field( + ..., description="The host to extract the URL from." + ) + destination: str = Field(..., description="The URL of the stream.", alias="d") + redirect_stream: bool = Field(False, description="Whether to redirect to the stream endpoint automatically.") + extra_params: Dict[str, Any] = Field( + default_factory=dict, + description="Additional parameters required for specific extractors (e.g., stream_title for LiveTV)", + ) diff --git a/mediaflow_proxy/speedtest/__init__.py b/mediaflow_proxy/speedtest/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/mediaflow_proxy/speedtest/models.py b/mediaflow_proxy/speedtest/models.py new file mode 100644 index 0000000..c789fe1 --- /dev/null +++ b/mediaflow_proxy/speedtest/models.py @@ -0,0 +1,46 @@ +from datetime import datetime +from enum import Enum +from typing import Dict, Optional + +from pydantic import BaseModel, Field + + +class SpeedTestProvider(str, Enum): + REAL_DEBRID = "real_debrid" + ALL_DEBRID = "all_debrid" + + +class ServerInfo(BaseModel): + url: str + name: str + + +class UserInfo(BaseModel): + ip: Optional[str] = None + isp: Optional[str] = None + country: Optional[str] = None + + +class SpeedTestResult(BaseModel): + speed_mbps: float = Field(..., description="Speed in Mbps") + duration: float = Field(..., description="Test duration in seconds") + data_transferred: int = Field(..., description="Data transferred in bytes") + timestamp: datetime = Field(default_factory=datetime.utcnow) + + +class LocationResult(BaseModel): + result: Optional[SpeedTestResult] = None + error: Optional[str] = None + server_name: str + server_url: str + + +class SpeedTestTask(BaseModel): + task_id: str + provider: SpeedTestProvider + results: Dict[str, LocationResult] = {} + started_at: datetime + completed_at: Optional[datetime] = None + status: str = "running" + user_info: Optional[UserInfo] = None + current_location: Optional[str] = None diff --git a/mediaflow_proxy/speedtest/providers/all_debrid.py b/mediaflow_proxy/speedtest/providers/all_debrid.py new file mode 100644 index 0000000..9bf3f7d --- /dev/null +++ b/mediaflow_proxy/speedtest/providers/all_debrid.py @@ -0,0 +1,50 @@ +import random +from typing import Dict, Tuple, Optional + +from mediaflow_proxy.configs import settings +from mediaflow_proxy.speedtest.models import ServerInfo, UserInfo +from mediaflow_proxy.speedtest.providers.base import BaseSpeedTestProvider, SpeedTestProviderConfig +from mediaflow_proxy.utils.http_utils import request_with_retry + + +class SpeedTestError(Exception): + pass + + +class AllDebridSpeedTest(BaseSpeedTestProvider): + """AllDebrid speed test provider implementation.""" + + def __init__(self, api_key: str): + self.api_key = api_key + self.servers: Dict[str, ServerInfo] = {} + + async def get_test_urls(self) -> Tuple[Dict[str, str], Optional[UserInfo]]: + response = await request_with_retry( + "GET", + "https://alldebrid.com/internalapi/v4/speedtest", + headers={"User-Agent": settings.user_agent}, + params={"agent": "service", "version": "1.0-363869a7", "apikey": self.api_key}, + ) + + if response.status_code != 200: + raise SpeedTestError("Failed to fetch AllDebrid servers") + + data = response.json() + if data["status"] != "success": + raise SpeedTestError("AllDebrid API returned error") + + # Create UserInfo + user_info = UserInfo(ip=data["data"]["ip"], isp=data["data"]["isp"], country=data["data"]["country"]) + + # Store server info + self.servers = {server["name"]: ServerInfo(**server) for server in data["data"]["servers"]} + + # Generate URLs with random number + random_number = f"{random.uniform(1, 2):.24f}".replace(".", "") + urls = {name: f"{server.url}/speedtest/{random_number}" for name, server in self.servers.items()} + + return urls, user_info + + async def get_config(self) -> SpeedTestProviderConfig: + urls, _ = await self.get_test_urls() + return SpeedTestProviderConfig(test_duration=10, test_urls=urls) diff --git a/mediaflow_proxy/speedtest/providers/base.py b/mediaflow_proxy/speedtest/providers/base.py new file mode 100644 index 0000000..275c901 --- /dev/null +++ b/mediaflow_proxy/speedtest/providers/base.py @@ -0,0 +1,24 @@ +from abc import ABC, abstractmethod +from typing import Dict, Tuple, Optional +from pydantic import BaseModel + +from mediaflow_proxy.speedtest.models import UserInfo + + +class SpeedTestProviderConfig(BaseModel): + test_duration: int = 10 # seconds + test_urls: Dict[str, str] + + +class BaseSpeedTestProvider(ABC): + """Base class for speed test providers.""" + + @abstractmethod + async def get_test_urls(self) -> Tuple[Dict[str, str], Optional[UserInfo]]: + """Get list of test URLs for the provider and optional user info.""" + pass + + @abstractmethod + async def get_config(self) -> SpeedTestProviderConfig: + """Get provider-specific configuration.""" + pass diff --git a/mediaflow_proxy/speedtest/providers/real_debrid.py b/mediaflow_proxy/speedtest/providers/real_debrid.py new file mode 100644 index 0000000..0d38511 --- /dev/null +++ b/mediaflow_proxy/speedtest/providers/real_debrid.py @@ -0,0 +1,32 @@ +from typing import Dict, Tuple, Optional +import random + +from mediaflow_proxy.speedtest.models import UserInfo +from mediaflow_proxy.speedtest.providers.base import BaseSpeedTestProvider, SpeedTestProviderConfig + + +class RealDebridSpeedTest(BaseSpeedTestProvider): + """RealDebrid speed test provider implementation.""" + + async def get_test_urls(self) -> Tuple[Dict[str, str], Optional[UserInfo]]: + urls = { + "AMS": "https://45.download.real-debrid.com/speedtest/testDefault.rar/", + "RBX": "https://rbx.download.real-debrid.com/speedtest/test.rar/", + "LON1": "https://lon1.download.real-debrid.com/speedtest/test.rar/", + "HKG1": "https://hkg1.download.real-debrid.com/speedtest/test.rar/", + "SGP1": "https://sgp1.download.real-debrid.com/speedtest/test.rar/", + "SGPO1": "https://sgpo1.download.real-debrid.com/speedtest/test.rar/", + "TYO1": "https://tyo1.download.real-debrid.com/speedtest/test.rar/", + "LAX1": "https://lax1.download.real-debrid.com/speedtest/test.rar/", + "TLV1": "https://tlv1.download.real-debrid.com/speedtest/test.rar/", + "MUM1": "https://mum1.download.real-debrid.com/speedtest/test.rar/", + "JKT1": "https://jkt1.download.real-debrid.com/speedtest/test.rar/", + "Cloudflare": "https://45.download.real-debrid.cloud/speedtest/testCloudflare.rar/", + } + # Add random number to prevent caching + urls = {location: f"{base_url}{random.uniform(0, 1):.16f}" for location, base_url in urls.items()} + return urls, None + + async def get_config(self) -> SpeedTestProviderConfig: + urls, _ = await self.get_test_urls() + return SpeedTestProviderConfig(test_duration=10, test_urls=urls) diff --git a/mediaflow_proxy/speedtest/service.py b/mediaflow_proxy/speedtest/service.py new file mode 100644 index 0000000..b40c639 --- /dev/null +++ b/mediaflow_proxy/speedtest/service.py @@ -0,0 +1,129 @@ +import logging +import time +from datetime import datetime, timezone +from typing import Dict, Optional, Type + +from mediaflow_proxy.utils.cache_utils import get_cached_speedtest, set_cache_speedtest +from mediaflow_proxy.utils.http_utils import Streamer, create_httpx_client +from .models import SpeedTestTask, LocationResult, SpeedTestResult, SpeedTestProvider +from .providers.all_debrid import AllDebridSpeedTest +from .providers.base import BaseSpeedTestProvider +from .providers.real_debrid import RealDebridSpeedTest + +logger = logging.getLogger(__name__) + + +class SpeedTestService: + """Service for managing speed tests across different providers.""" + + def __init__(self): + # Provider mapping + self._providers: Dict[SpeedTestProvider, Type[BaseSpeedTestProvider]] = { + SpeedTestProvider.REAL_DEBRID: RealDebridSpeedTest, + SpeedTestProvider.ALL_DEBRID: AllDebridSpeedTest, + } + + def _get_provider(self, provider: SpeedTestProvider, api_key: Optional[str] = None) -> BaseSpeedTestProvider: + """Get the appropriate provider implementation.""" + provider_class = self._providers.get(provider) + if not provider_class: + raise ValueError(f"Unsupported provider: {provider}") + + if provider == SpeedTestProvider.ALL_DEBRID and not api_key: + raise ValueError("API key required for AllDebrid") + + return provider_class(api_key) if provider == SpeedTestProvider.ALL_DEBRID else provider_class() + + async def create_test( + self, task_id: str, provider: SpeedTestProvider, api_key: Optional[str] = None + ) -> SpeedTestTask: + """Create a new speed test task.""" + provider_impl = self._get_provider(provider, api_key) + + # Get initial URLs and user info + urls, user_info = await provider_impl.get_test_urls() + + task = SpeedTestTask( + task_id=task_id, provider=provider, started_at=datetime.now(tz=timezone.utc), user_info=user_info + ) + + await set_cache_speedtest(task_id, task) + return task + + @staticmethod + async def get_test_results(task_id: str) -> Optional[SpeedTestTask]: + """Get results for a specific task.""" + return await get_cached_speedtest(task_id) + + async def run_speedtest(self, task_id: str, provider: SpeedTestProvider, api_key: Optional[str] = None): + """Run the speed test with real-time updates.""" + try: + task = await get_cached_speedtest(task_id) + if not task: + raise ValueError(f"Task {task_id} not found") + + provider_impl = self._get_provider(provider, api_key) + config = await provider_impl.get_config() + + async with create_httpx_client() as client: + streamer = Streamer(client) + + for location, url in config.test_urls.items(): + try: + task.current_location = location + await set_cache_speedtest(task_id, task) + result = await self._test_location(location, url, streamer, config.test_duration, provider_impl) + task.results[location] = result + await set_cache_speedtest(task_id, task) + except Exception as e: + logger.error(f"Error testing {location}: {str(e)}") + task.results[location] = LocationResult( + error=str(e), server_name=location, server_url=config.test_urls[location] + ) + await set_cache_speedtest(task_id, task) + + # Mark task as completed + task.completed_at = datetime.now(tz=timezone.utc) + task.status = "completed" + task.current_location = None + await set_cache_speedtest(task_id, task) + + except Exception as e: + logger.error(f"Error in speed test task {task_id}: {str(e)}") + if task := await get_cached_speedtest(task_id): + task.status = "failed" + await set_cache_speedtest(task_id, task) + + async def _test_location( + self, location: str, url: str, streamer: Streamer, test_duration: int, provider: BaseSpeedTestProvider + ) -> LocationResult: + """Test speed for a specific location.""" + try: + start_time = time.time() + total_bytes = 0 + + await streamer.create_streaming_response(url, headers={}) + + async for chunk in streamer.stream_content(): + if time.time() - start_time >= test_duration: + break + total_bytes += len(chunk) + + duration = time.time() - start_time + speed_mbps = (total_bytes * 8) / (duration * 1_000_000) + + # Get server info if available (for AllDebrid) + server_info = getattr(provider, "servers", {}).get(location) + server_url = server_info.url if server_info else url + + return LocationResult( + result=SpeedTestResult( + speed_mbps=round(speed_mbps, 2), duration=round(duration, 2), data_transferred=total_bytes + ), + server_name=location, + server_url=server_url, + ) + + except Exception as e: + logger.error(f"Error testing {location}: {str(e)}") + raise # Re-raise to be handled by run_speedtest diff --git a/mediaflow_proxy/static/index.html b/mediaflow_proxy/static/index.html new file mode 100644 index 0000000..ea185a0 --- /dev/null +++ b/mediaflow_proxy/static/index.html @@ -0,0 +1,76 @@ + + + + + + MediaFlow Proxy + + + + +
+ MediaFlow Proxy Logo +

MediaFlow Proxy

+
+

A high-performance proxy server for streaming media, supporting HTTP(S), HLS, and MPEG-DASH with real-time DRM decryption.

+ +

Key Features

+
Convert MPEG-DASH streams (DRM-protected and non-protected) to HLS
+
Support for Clear Key DRM-protected MPD DASH streams
+
Handle both live and video-on-demand (VOD) DASH streams
+
Proxy HTTP/HTTPS links with custom headers
+
Proxy and modify HLS (M3U8) streams in real-time with custom headers and key URL modifications for bypassing some sneaky restrictions.
+
Protect against unauthorized access and network bandwidth abuses
+ +

Getting Started

+

Visit the GitHub repository for installation instructions and documentation.

+ +

Premium Hosted Service

+

For a hassle-free experience, check out premium hosted service on ElfHosted.

+ +

API Documentation

+

Explore the Swagger UI for comprehensive details about the API endpoints and their usage.

+ + + \ No newline at end of file diff --git a/mediaflow_proxy/static/logo.png b/mediaflow_proxy/static/logo.png new file mode 100644 index 0000000000000000000000000000000000000000..d71e14fd09f0941b23258b48459fdb9459c64686 GIT binary patch literal 87421 zcmXt81yCGK(>~lKxCTjZcY-^?^>7V-K!D)x5InfMA1;SWa0%`%A-KC9{JdY)-&Iqy zTRZ*C^i0oeKf4{Fsw|6vN`eXi05IgfN~r??u_X@@FtFAKufZ6|F zfk|eSk6CeAUDD4-uH(qBI*i5$0#I|849M>870oDDxZ zu7$0Jt|ZX~?_9zJbDX)ls=GWVMC!~8>J}pLnY0|O{NV!vU;%jYEl4QH^LHAlBDCec zRXDgu{!^;T#=PVEcK=j*fl||PXO$$7nb@}bpa3^*Z;7XLMlNR&$lBVqTj?j2XSq!+QlFdnVZ1

EvbmPYoymg`^6l4h6wYu$-T{a1DHs$dNs9-s7*##NNT3{KkLukre zORJU>86Ud!`#vRC@gmdXn)AzNYLLH#Wr~}l$J{fiNf(&OW78!2N&0q@w#xtKt|q=> z8mLk~vEs9yYK*SURkJSrC+)bR89VW`U=on{PttL)N)S#yO3loQD7o>G=v=7=p2sCI z@juJOHrZKlDUI#$A7D&QRpY*0=fXTaN3^i5VMa#+-XAF&Gj;h~R)j@kdti0z93>Gk zB{-Ba@gD@$hDbr{6j{`oj1H`ks}BTQoPl)aIdms@NYh+A+edhwlt-iCwEYJrtK@&x zl`}WdvFS@Lm(nbXgu8wSZknaL5E$gxmInrkA684TCmM76vh7K#fb0#Cb5hhlf-yu$ znW`B4DQOrK%)a0+qVCcZg9ISQ!i0>f$6|=t{AF*zhC}b!@$@Msj^o&LqG92rs_g6> zdOir1!@t})UKcGeLBpk10VCugcp+trgk<;UkKP;1Qxo-E)#SY!8(br2W!vjguN>E~ zF)*E^Q?e1!YgH72me%4?x=a_TY^vaMVG{tgb9HZOT;>rGh&E{XV?hzOllb%Xs>*uV z{v2*|%=|%_jZz8KkM`Flmsj-)b-hH*pwAJV>C_y(CHM*lJmv=>;_8@!UJOme^(IZN zCX*+i^k5K1#)L+@AhT9`D|LQn1n<$T!tY9uk z8zr)!)E0-a;ylY^wmbFP2o}&4Bn`6DQmN20_DvsA@eN*_gb1c@f2KZQW`$0Dp_uWk z-VT&8l*l|q6(;1={+yUjsg9=i(9i8Wr!VXYr&Z{MYh*cGn~kq$e=^Rm?$X)30^1dO zSG(AvR7zanB_T%Yg(!%tQt9ddY1~jaCy}lEr z0!c3e$}5Pt5G<UT8b)Iw+Bvy0;&HCfrW<#;}`keBQ@dwU@~=Dg_G%Bm93-e2~UEGpz|q0cPi zuYeSCf8GjWXVd=GLeMZ;7;PQ&CO34CJaSp!NRK=vP|PtS=P{r3aH-ovu^tQ3!r)oK zAteSarY=N{2Z6?KvRr#{_;$k*(>Ba+>gF(?RfY_-ZnZ{33hUGB_t#g#9~3maKG)nc z@MX`Id&kzvkO#_W{E?^e$sDk$-1t11lq1hOG`(AMOE|O4x35Zj%9vnffBO4gp;m{N zvFA*?l;F%mmhTx9b+2b7cq&I~7G_dF7;=;ivIu6XZ>pQ(C_$+> zB)`;((i2`czK%Et;e9+MYooFFK!9Df;Zz-#s3H; zA&4~h`G<9d^dTkWmd9;ciwUSt=~{{P^yp#Vuc1WCd9VCKMRbz?Cl9o&ofCqkVP#K8 zICW%PQe0?7MylT7Kvt1^q$j=akuQmq6;>4YG&l1lLN~J}>(|nehGxb7HQMlC^nop# z#sUvjvohPZ49)VN8UY-*w1zjA6%U@D_MGAAeAj;C%lTh;dIggoX{XFgHbT8|)(}9W znzKe~71qY5?Y8SrYeQa7InDm|LZafkRfel0BkExyQHTQGz~h-5s3;`)j5R#(-)6ai zH|$f3j(oN((Fc1T#&O1%Im6BJC|;$b^4o6T8S~d` zx7)`4PPgRc2K%@jmvZAtaDxKG$%7HfVEF?lI*%=mxZQ&Fl8esLt9|;~Uc*tEEA;#{ z)(0O|K6f}<^$9&QP#BWFnkMTf_mNMt z5&LYe00*S`8Bwhwu9K4z%AWk|MGN}E8`CD1phr5V4A1F1a5u;?oImoA0qmQAmo%ga`kW`&(vlaV|4sq*xPYf0*U~fq&Vc&e{!?k|0ylA%|%ML zOFMC7~ao^TBa~-nRc>KjQM|sfT#yN|uWN-4Zyzx_RODUzuD(LxQ*kp2_c=L4U zfv!zWs2n!tX>tNe_aXpMnTZ*nu*?Z$lgh$x*zX=*>u%_8ZBuUVy2F@?n4*izo@D0q z>v2L<2iH)Pq*RP-jyz5R&pHnNy}_B zU^46(dS-FZUZi&2ZhknpyE&+hzm7ihDGlLL^>wUv5vhqWO+nuelsMD^q-nqDl<9^) z9y>QavFTN;^7qWoEHihMEaYE8gbfKtGu9T(e4lov-%$pOoe!N^dBne^@&~I=AaLmg z$7OA!+u(=t8hkHZO`XVQGZ*@DU?!K2Jc+1bAvP$t{qWRdH!%0f_v@`L*Zt)1@UV`N z;{N0-d)nqr5xvlHjnO>!p--!o`MYFMlsMUFx&MIEbCFWDx)#5ZOo1VO7lr7fE@OWR zz_*aZkr!d-x9E4Fk%c6eNJF2lbd5&eL5Fp;JdA1pYsD9S=X?zh|FwvU8$PBvL3TGl zO^<&Ur=DM7!r0xgX+PG*2r&PHB?P#L`)8pe(*^Db0pqIb~!$VlV~#4+Jd;h0%L_?vuGAvizoQi1o#P!yuOJx#8cKz)Y$H zaN8k#KHs|)e^(wv2*N_xe|bft20}AWa>f?^XEj>zKi=je`L8Zg!6qBe zH&|7AJ6N)!ow_Bt-V@8W3CZH!4(3l;_i>wBu7l)G;sNyJO&>Wsg*h8u-$#N5Ke?4d zFBm>E1kHb@{hY5_0Ti8j17f6x168xO}tOa+Y5GgL=1YYmY0gVbse2s7<`y zl9k8+PtpfD*v?2tzg?>=tQ<1lgWs_#gjiK-KzbrO!JB6c3f0u;3>$beg!^&vJ#LXCy46zZLiP9^j&G-i*l&y8H3vL z+A=G%!Z5#@)P(!?lD^Bs;@u~YQWca_2lB^n$C^4agICnEKV%}Cz|PzBMT652P86F5 zuxfhk$f_&)L$BP(e`^%=v&5wW$=g}RA0;)|!u1V(icX|KN2!cw)^za8&bM*d+vl~* zOe8tgUN^DWXGg??9sBfs$oiyL*F=tr%%J&A!syK8(B3N$=U?J+jzLd5ujKm4JaE_&Gz=8es=MCnv#^wY*0O%ee2QyN_&@6VUX_ z{z73m?#tRw5l3VJ-3S3(Sz#UtLWPsa`@UCh`BNh;arLmn&~y$oR_5$RPfwCRvmE~Z zm?jfH-*j6AbeXR{Byq`ZqMDp3{_wXP3qn(%vv2T&z-)OQ;&PhsA0K{W+Y0tQ5ghO3LlKKdPiAi(JISd#!*Rgm0Jq6T7Q|R+0r0rpA#T-f9c&=IV!s*!EsmRl$xla{RG8Src(_>cs0}S< zMs@W1osG98h*&ehYwiws8szpPsZQ?`uc$}-C;BrUeE=iD)QNev6pnyZ5sz}8VY5b# zH;{`J@o%+yNm*3nz_vpv0x(8B5BYRHDOBcHC1#9x_R9eZv9ARGx3?iCtRSJN?Ninh ze#)wspRrop$|91y5yfyall7SzChs1tHb1imI7caTIcAW{l<6tG%^j77cYU~ur0a| zN1Xvz2gZs9(J`+P$&0_Q@BwI6*wo85ONUpO0*Td!Ohtc`@% zTsUO!UfglksK8j6V&3-9@_j{}>J`f@GFJ9gT~33oFZ^{j5w{ABwz>v#x*? z({DVmT19UbLL4o>?X1!rLjZ}g^nVT5;S^A$=(BK;>DN9sQEF!LR)Kan;r$&m2_OQ; zKjwsX9xfPBq5I4?lN44a*Zeh}!uqYRluJqLuvHAp`%z&?Ze}VTJ*{Peg{s$V%F@e0 ziy}K77lreVxc8H7=pecgn_q$wYRSMU_Rn$`*P~mDE~e7PvX9i z)FJ50Wf5ioDEgNw0N3+CqHa-HHgA`xKP)_9A(u^-lruI`6NtjJV59FqOk9d8noe}Rx<^_KB!EW+Oq&fQQ?9V)@sh8K86zwej z@hLhq7=B_?U4_KEikL1~`e=MCegZFw`-WJBy<phrt#~SCB5pvjLw?|v!A`UWMWR?x0Z)af?DM1GEP>K6+B2Z z2Lef2@s;?Y${ir3ZT?yYMgVKDGjI^87{y{ z0^;)K$r>dp>%viwLXhelwZBpBl!cpefQ-;Pk2!xc*FihXkHDO0>xo6HC8E3i3$QL; z`{4WsIE@r(x#)lvdp642n_Zplb1!t%z`ZlF(tpCQksTk;0Y{Q$P~i;leaTMz*LSsj zbMBH|j7jVqcpH_?1|YUH&qC6Ku?8)m(9B zct4;?+W0GLfVz+6*K>>s9oBz|jl{$aCSl7T67c4~o8VBh7VJ#_U^0$sK$==|G~Qqp zhGXU#$>vorX1!M*L|5%6${PsD%djIcW&;r9f6-0MCTvZ8`cQg4!+&d;#Ya><33R8c zeO{u@^E>P2eho%Lx$~JAP9rS$*DNWHCeDvQq2DMLNi>7(-9FUCYXgYs!nv~?WWr?M zIC6;&mSy1}^Re`$;l0CaqMyv-RUf(k;fu*giE_+gU~}O0N89}`iW?B;23QO=MdKy@ z46GFL13v?{c7iPBM~8I?3<8#Th)V+_KF70rMAUEfet9uBH}EiD7cIoal?wu*MtIAb zk9XCO<511&4z`-OVcc{7ZDsQdctEHz8eBF*m6M7KEckeYG#^$gNKM8fnf`2*g|>I? zD2l;o=rd~kLSmW4(YU>^OdMepSa8Os>FEh>3UahC&*Iq=Xw8lTTA3?`BO=zkl8NDv z&kg;#sdKxY;oR}zAY_{KdFK!k`+=XH@GfjAt@}MX2J>fP=Et}5Bu!Cp2 zIvKs&q$Bpc;o=ndy&@Vgd!;+@2Cv#ezO5C_2kYmqlSo&pzgcKz%9P@SI9!+;KOK12 zuWBu%jjICdp8JIG3j+Qx?4gmqj;^^rDtGn@+=Tn;5PH7zto!l5V3?WeNU$|NdB{5N zJBq5H!pI8lKgeBg!0j97yI2NLpTY1Cs)bNAMq(2x!_spN87uyCdJl-xkPZV`wc5M3 zl}+j`;U&Ch`M9z}CA40+I5|rwudXot!~xj*uyei??B3nxS$y?E6Yn~)2k0wzs0gZK zWT#)8vj)U5z0>141PPWyp_Qt9Fw!A9Vvx_oXt6@-@2UbRL#e3c1L#%Gw@;nF^t?MBhMi{K6?6E< zdb4Jgw&r@Bd-(S_hs*z)`oum>TB9gxW>(%ZFaqb$*f783KWWh5h*?UW$b2XJ`x~q6 zBCp|>*|Bmu!hpIa=Z@T90+9w6EAudQ4o>x7>6@aMH{N^Hclg*avJ&PU11dGNbWe^9 z6y8reA;0J}2A2!Y=4&JpxYk+HjBwIfQvWk5Di#x=@N9x%qF^#C*ZN)oJipHj_YT3C zf(TYcI1~*b?OF0A)gdQ5m;i){5&fq^h`sLqm_TmFHV6yKeTX4$%*Iby@ukYBtw;Zl zhj^s9q}J%e19CzMdp=M}p`8v_L0vpsMH85&Q`+!}36T;}IF1>JoeM0B9WGqkkVVC} z+15VPMvww@BubX|jfRM4-Wedn-#O4^-58W>4@ppDKF>*zZw+Ow^i^fT%4vFNc{X6_ zzm3ebQZkQ%l4c<}sF-$E+9#$MiizOmUKNy=3K{<{36`CFlBA* z*quqPbsjXh?hDb;4Fl5$)#ue`%;=o4u(weG62NTjq6q?2>&JMRdVVu1$GU&kgYmy6 z>dn;q5;;t{lwks0P-`A0`=L9qE_JO-NuV6FUBdQFPGw>`k(WnBOBpP$hyu%Nx_-Lz z75K7TQk{?Oo;$BQ?_474-4MOBdZ@mWw=_&Q(iAcGw|ebfYASriKa@qBIrLc{h21t5 zGvYa5NP6F_JFrX!2~}#8hv4#2c2#ODb7+aJBDpAA$O<{kWct&v;fwSz73p%NY1qN; zC+YZTOe37Ev=aeaE+d~B&9lg!f}TRy9WmhpdcEJq`d0~)+Xx^=?_M4)JwMGkfit`I z*y&^pRO7DNf3?~T?VuIe8IkmMII#QqWI(n@qVZ)XLTb`foYr4+9i^7~vl|Q*_Zqq* z2kh_N)2(RBhf*%qdxNj+@8nygOds|^lo?!xeLzE2&?aV5{;Ty3H&8?+?K-mF)4bjc zzw|T6X$0)MN5V%S{D;qpD>iDw6zjwYU(-5XRe^2YQY52p*t(&$9M2mpZ z)tF9M=}XUdc2DPe!%^b07p{V;!`xrZSmms^G+zTjN8UXkzXaBJ3j=~NLx07!FAk00 z!OUZo{-)Dpn&*n_vojgRV|mQ54<`h9!B=CntA(#)`o8mp()kUcA3hip=5hmxmiJvcQIwN1iXcNvV~`O-N-`x+b)#BIeQmr47a_;fWWd$%?t{7U^C za)?9vPgQDBIF_!b;S|Cz+f}lw*fV(YaLqEMHXR5e*F>0jc{zsmYc zUh|c6O+Hsf({hhBr`Y|ok^3VeEd081zYtF(T7OYWWy^}*K9&}_A zCKC#xY^46NyDaMc{5tvloJ@x!Adm*Rjiy~a`7;oE@^;F?Z_pm}ErMa6y1QZLT(_&l&g$@9I1}a&XlPi%(UsbKOq>0HE3FFM2#_;psdzxZH!FpxUp-cX&oC1 z>)J(g8CJXZdAP5|gda|Z$92q`=#U{usfBJ_swXo>iHw&m7zNQV6Rf;b}14c=)~i-$|39Ud7zZL|JdaKO>i1?zdWw$zrm#)6+Ua@==pQ_?3*ZaIFkhu?y%c zu~wO=kt&Y;{hoU=^{zA2Tw7m;RTAk=8#_5M*48+C0cZmeoN~Fk4>;d)C~8>ndmZLv|*{5q?((wx9+|6vDUN+uHN|ghkr{P+Y?FNDV?1oShLN&s>;TkJS;j1 z+;{}i(dQzTuixv%@H-2MaSPcD4ZcWp5>MivuIj3^f8-FAodWk!gU4U1eM z_uP_mS6ryXLuJQFKJO|gc_J>74Vg??V&mWI;#|K!k(NUhT8f*qA~3Zk8T{*I;FAJ) zNEf7$i255r)#j=ep|f$QU! zc|lMMhgiCo)898$aXqohnv_JCTV(5LcB9p9B_s%EUVl~}V~@os2#ts#JKPUFVQ{}B zK%dNdY(raal{Vi*Gu>6%fWy!wV5}1D*&$KyuIbAr6Jyp?d-lWuYvG2-ySRmMl_NYBIR#Ej=N&dM!x3PJA0iPW z^02mi=+Z8SvWm5}7)Nu=BbFX;q;lu73)wlQN7Wj9{hb%0tz<@Kjpe`=YdqgcHB!OK zRA2JlyWIr5|F|o*#g9UR0OyL0DOYt+^mB$q0Q?us6QJIiOdp-;MNXSB=PQJu{rE5S zR~*yty8iAECmOroSH* zMe8mVxOtv*#MC4Yzxw%EgN+SUus!AH_;&WaGBS8m59;wJJb4^v1{&J*!I7kih*0D{ z@)s4xm@gf;H~WsNYjtVAq?i_t9u>^r#5LSxF*}v@A6u;ltb*%fE(DRwwQ%J!G+SD* z5`H>X865ha^3ZBpVdon=Q`Y`q4C07FkDby>CTb~O^u;btGb^z<^=YeZQ(j`1@FtC@ zU==$^S*i#Dp2WyQ@~HB@RZZ7(?-llKPdeCl-ptHX0pugflIYCTl_uYI&N>IPyp& zG?7)!#}sP(8*ejJjEO4~GMDrd^ZB`#%T^w1qCO zJVNM@-uR#nYExyiS;@{PUo(*I%H%yZFQme&-K20c!yb%}-S8(*DrpU~TY|J=nR>!$ zetiX{Sq<~my>Nf&P{}j3KQhoEy7L1DanbGQ!K;?Vq+nQe{b-+z9`O|x=ir7Vp#e(c z^Dc#8>nTX|R*VE8_P{WJ2u%xts=_dh^)%At0>cSYruL}<1&5tr)r%fKPh`F5%Gm93 z&u1M+ysLXOdLsPmqq>CsenV7fCZd>FlBVI00G;j8|T|s;GZ@;XHf+c zT(ps+c8+B2EYP%7h@RYd57o~Cw@m}ZxmG-Wm0IN`lyY}483RZm&xO2J40em3G4MVB ze(UZLD)rg+$nB||mM)7n@3W8t26Pu6u^aQ^Pk3X~83vH1Z7~*x%Gr4FxAAN;O~U!M z%{(A%SMlXzQ{MU)RthPNY|&p9CvauGu&O#9X4qm1V7=o-+?cN*!-}}6R&klOl|dtU zErCO=Fx~~B{QGU2DFr#NFZnLXJg94%BJ{<1I?{(iqH;661jnMU9>=zmHHIWaDSnHe ztlg&BDP-mC)!CZ+YK%Ij84oatyh~^Jb*KboffMOkKxj$;TWFH?F`E8U#NXPliN`3C zQCn@;^I_D6TX2EGZXH7l;SD5a1(sda`K--tT_MDka!;Xo6K2355%mr<4B{kLt9mRq z&mk)4)9;W%b=2fx_P+06vzd4vaqbH=Z7m$9O7%~|#+X8iRDSq-_}`@FiFoBfgTqLQ zpu2z%1!n#HP`XbHP`GkTV%4&IA@v^D#iFh4-v3wfU+YjXdnmHfv z(a;8@FnSTTOi?2z20_tZz4V)@@Vy z5Sh`sMOj^fl2KicsR(zm6!3%)HGTeqfN5~A1b0Y1d_Q{q*9du zEV$@m;U&We@G8wdV6=w@4KOU9Sy$oaYbwB|BqL%h5|a=V8G+hZtL)xck6HC*SqL&7 zJ&YtaH|iNvlgy`q$h=xmHp54DFpcKv*3A1+`t~%NnL5BGn&pA^Oo!tu;Y-*HgslE@#i6BJi`4OSS5@PC)?n-f!UwT*MvC>$$l0JsOA$A0apuM0lPUFN zch()Iyv1`JN+!Z-eR0#3$Dvv1;3z7XU+k(*9Xb%r=zvc3k0;6J z!W-=kt`{4@o$hBMUbWti{{H^nLL1FZAps>WvOO2Bi>I{~PvOny$#07yZJNCBw)ZW*CsJ(PtxR1vdIWCQGy*P4vO<67YWuaP zaJ{bz+82EJlRdaVhh=PU)X~|Z_FIeIL2-U1#(0LwR|Y^Kdq}9>kd$eVs1FKw77sxc z62kwU6N^@iW&7zy@k2@**7um(39ZMT;PvM7(=ONj%l3N;zZTyX9+BE$d`#8|JZ^1O zv4Qs31gEGauB!x2b=TrEooAD7A9pJ(3I{Wvmfow#IyOUH=+U)y_v>FS%Xk=hIJzGv}>EK;^~Gkl-^tAU{Q( z5jC_o7D0xA;7KQnPh{P5_Xq-9oHH>5cXTHBRDigblq)NDUV+q~Nr?fKVlrkYKd$d~ zF89yAzwL2Vq<*acb+Xy7Tr@(5a+ONKiFZ}M<7#2k^5&Yi7WGT#8g-Z5i=unGIU>As zSTgDxEn7fB$XI;^`OSA2dUp5E<;To)^hU5lUl)aQkml@7m3eMyVPTjD0m%hTni=tF zaHGM{tJ$hxtdA$1-tNZ>o#FSGCwCql8YzNWRcXO9v8E`J!vQ-~wGx;iOfqa3|Bi|k zYSvorLQ8A9JUlPf_AXWcw$w5#`2oP%5Xg5rHEB)h0SSDqtyeWPg;3{-x+KXfr*eml z)wfF|_sy1e@9T}%sv*I4`F2J<*{F(f`^5CVgTd?kq{uBKHTQYuHu*S=i9SXT&en!SYMXD> z9#U1HymRlyGpH*q$~c`_&*AmH(4EI)-;Y{LaKgd4Cnx7U0jlK5*fTmQq2Qh=#v%T> zo{RP8aHLPwICzvFQPE;W$}xeTB>c~op+-t+n75N?h%{9Q&9b|gk=e`P#!_%h_7p1RX?=N(*4kZ?%Xkv(F%93%(M3zy;nFSPe)-M_ zq?Fj1dfTQ-CE^4Faxl#PT5{XXpuGNyZ?$qcT2?tSWpe9;Tuo+bScAtfk3&R1K;DE; zEH1;iYH``Kb7`Bq1|1o4-)wDg^>^sdm?2=YFik?>4{%cOF@^aJR*!&Nf?Hw%idmbw z%kJLwG&}eRv#ammq(2dc*e z(6kmmZQS+TZ+5?R@`<*2`oEMlBB_fZ1}kWX7m-wG)`js()dU4CEH}wppTu(IwUtRwAoy#_B`Mvbih`2uT8F@WzzVyauS7WjD zVOC}29i{usIYF#UJm};1!`O! z!i;{8hj+8Not*H%DpL8KA}U-O#YCLgpBlLdP@g=*?{7b->glshe`pIbIWM?i)}AXY zGD=6ueVTKsEySW*%(D!LH@QDQ<3Dw9NHFE$_04!K*Y}In7t_gTXN$;<^TrOw#}0uM zlL{r!E9~aM%k;{QHlLHf9Gu4aKWt>*H~k3*VQC===Xb^bxB z6m$TTrQq=}TJmcecvU`m6gl{DKkZYjb$}jbAh$K$j*onzZzrh^m4((U9bOw!~O5}swg(Po=3`vFj)I4o!Co=3Gu*ajcSFLVn=#&KWO|Ee1ui7w#!zp;!sC7kNI>_({!TZ_jF0o>36K1yZTa9({(}0W^cr& zqs9XcmXJUGu2!#XgSC*-uf&+9p*|1J*Zn}LaC@e|{R<(#7iqOnupMLpaQ(yU6e(zm zM^jetEi_JZ)3@a2vznxN-1hU1ESuB{GEhDl3@!vy%-_-+3bkvVohNId55$Jq zinew_Z}+R$h+r@LjC&6H<}4ULc7&zU2t>@g>@-^Kp7`YLe@PV2u^|=9Y;@GR06A2I z83i9`W$)ji2}HPxaRrBcsY~u&z-`p$j4~JTCF{n?_=l4<4^eOHVE7FVjk_`g=Zi)9 zUVJYXeLc8r_Iunuc6V<&sb$t`rN!pLuA?_qfb0eoEO=skS&jew1%6uB)|yF09Sw)* z7pgw2`cmKwnGwl}4TpMbbCsy)>Dk-$&VAR_++a7fc)$B`v|-YKl~<{*06R2D%Zak1 z)zI&JU2yYtz}rE1Ws@&;XfM4!k5L*QZz@_$9)&!DM1ut1ACm>Wom^3%eXeIhJzcm7G zYl%AC9iB7Uz8l~mR*7a;qm$0a^^n{RWy)7BD()~q93NJpLrxLTf;2H4--eh)Hn<%8 z+MI6(Zu9*G)gJp~=^{vnl=gZa9Y-P#!0MxuR{LE0Juw{whOOnH`>W4|@=*fv&AAe` zZ53+E0(xuQtbyKkV*8A6d(8Z-IRmrW^G0D1&zC)39O!$-^#qUZJ(*@Kr90+H?s~_w z)w%E0ULn*_w=NhTO>DkQ5C3+^v+`&Ofkq3sKf9wimuycpf6$Q}OrT{Ug1f4@7ZWuejEJ5_gZND-0L9H{g?!!AhN;qb}`TQT9e!9A0@Q3M6azCqubX) zIk;s#HWLs4gRUvAuM=s(pn0i$jGr5bko{qJ^-Cd3*t}nhLr5V4pZ!2+cOI&gSe?YQ zBv-vAJt~K)4(N-(*6W$Vai9PFuGQfWAgYMKZ_%g2PVXOwQ%PFUWFLK;93OI_ko}sT zFK_xfaCDfyOIsdd>@>%kKiF8*WqMacZz0y*gG1J#f;6$*k`#12ESkQJH(|RLyw-Ru zBZJgyvQ^91ch4?FJXD@>4ei{*^)kGDU0HGVD zXC|PLIoYhNj6|wNvW(!H#iZRs4NA^o*k2W{)@n4oi)GEydK^*^h(P7&xyd_N`g5%{>Aj33ItY!exWh3Rk} zU2TUR4GB;;Np`J>YfB7s`cV6;vRl?T>ywjU;GIv*GTqUzr&H#rPewH$!z#$CUu_i| zLH1K{1|Y48eY)CbWdss`Lo}a*Fn|C_fM?sVKe!shwqy52J!T~IJ zhk((9pA?dzF2zA0%q)vwf;E)6^!9g_^lc-x`|0Y*%XA2-urIuDx~i(G`J>%t$J@a~ z{>RZ`9LZ0TOpQYD3O^ZaG1B;70kY#I}m4+R*H zSD*Z&n{C>rR<-2Ig7%ENt}o&?*EcCDBk0++cp~Ki{KlVQk{aRNC|>At+n@ME-7WHn z`WS(896nY>XrI5vT&@*s0N7nhMR878RU1s?OmYWAJ&$|tDGr9G98K7<>z2H4_>5a_ z^mAM5Q4!|#=wjy8C6}hzsW+aCvlH|`OI34l9ar@0f^QL9zFQ`=tw#EJF9>oT~b zAU>d6^1WepM`eMW-7$*}v*;nO%skp)-e0er5;ed>-*oiJ z#C#?!)mlCum9RD~z$rC|YQsP#5KKb+i>a7ch-5q5!T+6%Oe9y39v=q>VJ|sSl^iQs z8OeM26Y&r3pt2ss@u%1zT$vPYwt2o)f7kgCp8x>D-?*1Oy0%57loF_t8&Vu-=WGzQ zrO2Rl4Z7;bpxRc+lC)v@DUA3Nls+dC{OnPg@m(prNlCixdP1ce+pl<-tx#i|V4Vsd&gczZbmqOXtb z{%`ZfuLpH!l}2@;45*QOZ1mz@xPg>yq1Fdg*nThpQKtpyKYE@SLB%~0j=?^nZE&)GuPe-v3OA!>IZz zWdVSCy_emoExNHxVTYWo7b!}%udQv4J*f)Rmk4c(ovsn`x4@wKEFB%R{}uHxL|%|e z?ZMzzGyPHDYg685&4CU-_jB3%dn`vA1Q}IS7L!Vj@Mpmq1O0XpJY3q3ADRznq_VH2 z@94+=gwd|ra9e*^;JZc!00JJfyPv(gIA+IKkOdC1LdRnxWdi+fH{VD`YuY+*bn_(J!QGzel zs0yw(H*LbYIgLBp^l$O^w7~kAIo4hHDhARf^;HN zO>ge7>+nbyl~_Y`Yjt20=TBFZOq@D=WuMng{L6K)sV79RdfQuPg9tR;=}1r;==z#P z{4N&SbM1^;@7}*tV3Vne3x>I7&a-;aj<_%dY~1wyh9HcRGxX?e#1+|#x2Vja*3Xdhbd|ZobN3Lr1^VR5Z#6mY$&?C*Z4pg8Z_ z`R(8PQK;c(u_Y#^c*IPc|Eh~IKBgd%m%&@)B95LQH0AlY#}{8)v8AQ_+XgD1)R?Ev zj!4n>(#Hqx*1oG8d=(X+zW<4UR_hMwf=`guBiXpMo+%)UHTuU0>l)&P@MpdBtvXI zbR@^n%VCnCUyBscUC^Gw&TYov2dpA&*lC0X`EIulaO6wxyP^GZZmMM)BTVWuVm~`C zLplwX;e`kree!ptO%_=Nj`Q!!=f1TKf;zyRe`#^tG}y(M98WVbG4memlTp06+C}D{ z&1#p@25Hg~l)-Gry^Z!igwxPNKjDVqi`hMYY3|pyZ4=q3D>A>%EKv4q7=CnyG0WUp zWcUAOC~VeHn@2#csW(~`|JsAK@hIwlx!pNmQTUCVK4js*BM(fbA6s4&sq;x5SkV}Z zX5Goh&3UY2x1@cFHvpXDnqm4A3lUgt{WE2s_k{a3F9DQ>K5mDfLP(%5tPCzZ{2H~l>uRmA7TaWbeLH7y~G9dS@BnU^u|dlcT+ zvksdB+x+F&_{Z~!*j~mQ$KSaBp9@fq1M5`HUtrp-TQ*TGV-2H9#k5NH`cSy~z5-`a zlftghnc54%hl!Sdh9$5=$Uy|(#3LCmTKYx3cP5?-sm6Z)Mo=y&2J76K5|CxOO_v!y zkbeLZQ}{raZ%eh&05n^ZaNyrjDH$`T|2*xoD#GAbW{Ca|bwG;06&8LRcOifN{-d|| z2V+whTH_r#MWBTmh+w?`m1}EP3ULD+Ilfl;AQI0)&9^#IhA_FNeJK~GaZ?2VlDVKz zw+YNJlUuXA#zNhr({3%}KbrjQOExi|iq_9z1u$!QjM|-7^t@6*8?T`v7@Tnklmk-K zj45wE9{o?>`{nm;Z|$#dj?F;GLQ#y$LTO->*2W0^&MZ%>j?*!^Q##{@4`!xRz@T#a zkGldv@2^@F`DW_;;JR7}bqC5snV1c!;sfd%9RRfMtP=(h9u)H7{jE>sL1h zQXU$$5UWt5YRb{3;b*R`zx>P<9gQCE9F93u83=6QVrB|ltTg8m6|Eic!OB9Xx_X2w z1xPp}Poi{Gf84*KBHqW7?u4Clzjez4Q3yp14%dpid-g{^`(Sfz{kiLFD^eCNltT!U z98E1sf%ogAg^nRVyR-k@4|^Q4V~)D0F)i<-u2YWPS=TDn~qdx z5;i`u>%YbeC)u=F@(R;M{JadF87l6I4T@5{)@Uq6X$Pdh>{I^g_Rin??k|73Gd@^Z zBgMGD&RSY`F4b{Al}hnUj+BM#WHtU6W^7Gt0&X3UaFR$l)n9Er_73?H*2&J$p-_b- z=v>*|F>R2Eaf^*9Z90^{Y45hWB?L?~X?* zs$$iYP!SlvyQ>s6aPBZvrTDWBVJAcUj*DBmkYPi(qJdNdFvR`z)*Ro93w`M^*--V zcpN0U!Bj0rU%GMWGaEzd*o$`WCklTT4pVoZO6&!*PsmiII$DAf%6-bK?`^%cBc(A$ ze9MiKTjFoIv86t}YCd~qU=Ws-R0+x!6CL4%r`E|JSseyNr_L+tiJNH}>E|r^v@lD~ zZq|BU;PwF0AHvWjirbt891lT5nX=mG{Uz}Qs-;ZV6MfowYYlW%%&h(gqpc^TXr566 z@wb6Hmp^Afp+b3J%y&P2@b~}XwRa9VT3vx+XlYc{T2mOSN>XY4DI;exfYU8C#si4{ zlb$u!LjYHO5k*4N^dF^kJwezg_Kou&!k~s<+>1_)7B3$})YzhURE}=my0f>>7jHbh zQ9x`7uho|Zqd}>w6dHL$bN_qHjcYoh0{wORJV#PBbLw7Td?^8+YAyM-UO952Y zd9*bjmxIRb1@}}myiNcdT4OLcD0r|UZ-4UXC!aq2{0mQA9T=lf8l{qBQ31q-=5{wL z94P|fo5OJCxmw)Vq`OgNnqIo> ze^J1I?q(?KM}_b0*#GaXdk4cy{(2bLaP4E*%ZWST2=ELw_|w2BfIhhqNX*5g0;i#KkSE2FiIebSgAF&NWWGZ$6IcMkviKY8u7NA^$*#T_X#mQu=Cc|eRrZ3%h*;rK`IJsJaHS8-ms{!I2Eb#or zQ@e}(<2^TPP0;u#1TIG4n`ulYS88AQz-sl7>KWd^?RF-;2a&tWMF!)6`Ecvg_wU|& z{@OFoZmh4UwPUMF6*VjN%9WKHm)Ae@)Y`9n=BaOd?#AwehacX5$iX_w0;1Nqod)A7 z9W_z66mZJEYeu^56_V8Sg*YyWoOaDnIM-GxoFz$F$_S>UO36WKE`7RJzV^GE2*kJ)P&44hI0Co`W?94SA)pqiMalb?MYpmL#VtpK{yAElbd#*|Vcn~dA^l^2kr zDHr=QnMc8$kEbT`IiBZl=1^aBRZOTQx7Zx4iS;>A2`~w@dIo`8?R${)c4E*OmyLob z_ErAsqwRn2>gyj3FC7jFFBAxC%2LMS$IwcQltcKK_yBzTVi%+V3JAoiT7ea>p@-dq zc!(E3jOvrnMtojo%cDhq=k%)qOe0k2%}QmAKAZr^ZjdAH$QvzQUL+1 zQ5XnNY_oXv*7mC(ZQJ3cfewutN(r$c%WxYp;Le5kbIZYCNO#DfJp9u2OP{+oWU#`+ zuLbH0%ZjoqHzz^ZaVDIzX`z71CnNmxU*3J7s{)D$=&x>$mu>pp0~$ii;m%)s@$!aI z1B8304P91k8HkO7Uw{6}bJs7w{^ret5+dugg#x7koV&g^J_$nj2e0lW zJE4_JjGyIhNW$>=1h7fDx zE;;-{GF@MtE7D2b0S$dREtB@#o-B9H0kc%4#xaYTuY5T&m=rfd;woY5m5w~RHVI-c zX!~JZk8NhgFwEMQWGdzrkBll4m3}!MbQ~eHv?xa4FCXmv!&m?HI~bNLtL~JN+R~24 zMmbb)q-I=>4-d=zgVDjk_+Y;rjWJfEEIC%f6ay4S21B#5TCA*aI2ad2sj!v`fpKBs ztiPzr*^V>@cx31kze>=mn_@@B5MY(fqEi3;gD6>o7zo+JZCR@nYG?skl~Pn-g~n_b zSO4HAZ{OSA{;&VTzrHaPP)n2qh{CU1bi66@AHQ<#`QQE<|J@(|_?HhJn=983O&Q#0 zoFzkijJO!#r%Tyj^IH{W!u_k&O4+6LmU97V2hahaDr0giR*KSyn5|1!|A+72{B(Ty zpMT?p8z{!)ha>yXet!3{t`{`WLx@_G?v!mb<;Q8`KG;}fGzqDy!lZt-Vf<{(g(ECf zeAV3%4fSdUY&3fv3V(WM{|C1o$;ves{0NYE2HdX|)LZK;Y1OJqX#vtEig}=llj4iV z);M*tBnoS!$(&GQO+tP8RwGLQrwJ$&g_nK^oqY05nrrY9Iehd%j}!bz@6KO=Bh|DP z>4+-f$S0gnj!+0KRW13^Zu!5y`ir*?&1hrASqa4ID#X|k_Yds$-u8n>qsNcRgF}p+ z2}s468Z3m?g?A`vOG0TVMpq3s*4H;KuWww&VCA6Hk?PpR!t~xAREmqmA5Fh*BDw7a z|9`*V=c*mH0Gd-NMi+IHNaH^m{KOTJH#=8RYgCnXWMHXAhvUoF{^?tHMqA(iFaORr zzi_1}TzS@6FRXGbwpdmC`m>k*o4@_dzxSuV_}05051+m<3Z7G>D#lhy!ix2zk9fk? zk3l+^DeYC-VHZe<@n}pr<8> z5<-{~eCP4_Pu~1wXK-mKR;?=B_0EjJMAfxkp|F5j>n^HPD6PVXYI6D@j_a}6$?-MR zbR~PtA{fG=L=$3?xAuQMBTQ%C8U(aUSNMAy=2Fv-pytlOO(#^PFQ2+(o?{)SNHz2zTqDKrOTs2t}+C#ltcF@CP@4`Dn~5PYnbE=nyPQj2=GPzI$)) z(IdONCstRe1SITndc(gnCj=f$KvAkKbr<8^{e!y?_sqbot*>6WvVQf-VId>v7)pT! zRRxx`R^2I4-%v$}BNTWP0Ju}cl@1ZCDAlM`iv@;nF|NP!sdo>ok87g_)KW%J($ZL? zYft^@?VXSR(Rcpa|McJbm8UNm?{w%^Eh|nK4S}z2n*Zu=eQoVeU;pC|@8Zf0hy}`0 zRYff|1W^*DT9lFL`%O>=uOP2!$IE9DqNpM`Xgo<5MFB8o+!!WGmIOtOpct=Q`J*@P ze(>nMy@mMj&7N_8!O zs!FMZI_DyPy|LSywa&Yglv4Tej{Wv8@7&i_8LqOVQL$D*Yg0SE8+<@Hc2TGlu+}OD z;-b`fR^m3E7`zaUiDRmS$q06`M~%rQ=bvl|HEUJ*4ph9LS}`1#j@bmE&f`=32HKv= zx8At@M{j(%zp)8117f4Je7t@C?RW3L`~LXx<6=Bs0as{@_$>rOfmC~TBH`>2NJ1zg zVkkqvh1k@CgTu#LdygIsM&;&UC^Rm(yO93s5t!&D?-+I>Xc>t;{OaaE=q<+ZCIbf7 zM6ZE(@64!-q_%ZQTu%IZssfIO=98Vho4>gIwU?i}wjl=2yB?(5MIgZfygcBqeD21l zkGJ0YWJ?Ew19NB$)!%b3-ZvB$Qc@Y?t$LT9Fct(VOL+pRqBLp6>fnt)ss2vT2d3jPj3X7BCzRpg{;$1w`Pz_@ zkc785oum`!G1Tdnr&mYbb8!EObxR9sQO;SRS!#oE`|z*u3Y7Vs-~Bx@kZxyBg)=7S z)u3tU?CBX70kLzE<|o|jq>&0)24@b$K#u^d^B7ktln0R4@9zHZKYH`d@XEMY8L~8c zyE`A;y7%_G<)g=IA}huiz!1i{l*K)m!^@HyXV+8p~kc1~cGro#6~gky(}p9)xGEV$H~l_ zx2oRKjqV1i#6xw}T{2H@XFn%{)j?|jo=DDh-+~HE-ZjjD2@)OIJJ`B4mXN%YitZVt za}>&M`5btRMSv*8>Uy(jH@8Mh!-XnTX*U`?$7a&;iHK#ZzmC z7DZy4k?}UFN_4L!B!-&DPI&S=6H^!`knkcB`Ic^B$)b)G8)b=fMF1I)qz6C*$=G!tR784k0 z3W!NCD)8P&|IRmFe{Bp~TpcouE?&Iy*4xeHYYV_of^(P7)mz5zLXbl7A3`B12=3CC zXn_dDgcL%ejyMQ{0ByT@eRKWl`mo~Ckgc^APg5~Nav92rD=cXQ{6|Fh9s24IlJoR= z{H@48>zk)T8Y!H>EGBC(1(dEEtJ{V8+~&q>FTeBYhfg0|sw&US##W+s!XQGk72$KI z@4I?^^UV8~SPuY`nCwW!Ou%3jDNOOw84&+Sn2)JTksmI0L(xMgz(sGoOEF0f7_ z1<_*BnE>vU0%4|P4kd#y2adUDxA-}Gf7`a^RdBCMukG29&mM0vzpIZ?$M}Adpzj5t za^By(XKc1MPCt`p%+m9=U7g!Pb1TIh3ujwl`X?$U1R4xxc18J*zWeq!uWz+WD;$s3 z-+K4@n`eic&7uT%{GbTvNpb=h(wVfS%b%}(zv7t-#Qz|kY%*RGNm?VpLd~VEkAC6V zBY)#(o_=P{s_}JIgH`lkfq*7xOEotWEw*jL4{Nig0*VsGQN;%&4|lRl|@D-T|Gp@YoNrD5e#vAOc2C z0#KQZt$O!H`%k}p{;hRst6^YEQxF!HOH4u~o#CO!ABqubb*KiFz%(&6$${BoTDs|F zvBzCf4)e#71E%)%9^0C_(udnH53yM9trVAiYoN}@P9GN|Prd95D6i%s4uG={Prdku zr?5Zb+rZYN{JTA$xBT?;JC$1+l!WRf@f1o5EhSk;6wCv-Y2o+Yz4DJ=dw+9rpN*Rv zZ@t@o_|bx~^1Q%|Dtd?%>YWtj3YIG;>w;XXoh$<`ene^k3R#hru(EdY*$40YkN@-! zJ+Z(3^h3u2@Xomp8r47%ty)7%jrj>IDH*Ke4sB+e#XP1yT9M$Tr}HRg+vbe!z+ei4 zQB=f&QbDa%q+YnV)x7%hJ1;)|(Atm`sEal*m z(9eGF4OLkA& zo3)0eN)ghsNgugGOtV;^Mx#JXb;ZUnKDf3d09ru0^B_@fy`IVFUYBBRmeVs+io3Ib z&igPlj&*!Oi^}_z053~3A_uuf7q};rup}srik-CXaDjWw=i|LGRxT<=aiNN zOvL$BWn`QJV32oiPX5j}-n<}-*lIW4{*YI$4J~Gdqx11_oqvbxlX6EyB+BBh{$UCU zBC>oC^97N{;DR6}5HQsRO#Y*v`{EPFR|s7WeB#8~slx}~`{2VXH_W)8Fo-9siNXJI z;F1d~nE#KExxf09Z+ITJtoUJXf|5vJIe(E`RnbagP$*%+s)h3#<5%B2`^-Z}*Ou!_ z=;m{a_6fFZDq;zE{OH=j<$d2d^TC!|3WO%2?D({Jgk-{Y64hLmfl8!_l3hNN)+h=A zfQM5NDdISJViF;!=ad!_CTcK2We^SA7;jFR@o+Gx5j+q$n&b+zYhIw{3KkGBYnx*$ z!_S;p9;WQwc?J^nhzS8Dkqyu&>l(iG;l^*jcHx39S-m8_4BdN)nqWpEABBapLy|GL zdWG%tr}ixi#4JXP#YfyHwWB4`MsZIO;}lxp)W7|o#oaw(yZJpy{KG9O61HjJ_A24r zAC}i_hZ>m)KxDtTU9UcAqLu*hZP};Q^Fb>+zQ>yV%Pmm5JwM8%ASV;&7?+Ai!FnDR zv;koQ)vv$&-b?Gvcw#r+ez&^5HDCzBK!&OY&9ycl;7WdpC>(UPznCLA`VqDPp0dnY ze(^sy2nTlxf<*0t>G%hpIPw?1^yz`AP%5*5;q-x}ryo9k^ZLd+AAG3QphZAPiNjLV z5D<>X0CWcDJjgH2nVO`e}ssxU2M$J{aUdR*nF|ZBN zj33!QcS6!OCy6Yl z!JR3cZ6^E|zx?HF+TRBI?^xBnD@@C28%n+{LExVLIQ#sbV7dkHops9j9mR2mB}0H! zz=ZPqAFTiSD;GE0aP!aDh*sKD4?}z4G?ihQmn(KiaEi0H9zCQHZCB z^lZVs5uF`b-;hoyh?u!3ZPSzp1IYHO^a54rUi{R`_0)rf^EXB>z4ZELpFDkFsV1HC zYz^*8YoMs8P){5f9$8)b)*ByA1`B45R4~(+#D)Qsls_)Y0+s}uG|)m<_PAolvNTtk zv*WlkB}PavM0A>vb0iz$hWH?jwwvRvstOBIJH{8fQP=!vO0hr?Y#}BlY&p=$(?^#c zJUAfRxSrr{1XNN3)tZV3Pyj9K^;W)pZu4Kh_R(9{#v{b0l!7tia}^#!S;Cr1bpl3Pz=_&e`Tk;HgL zU(e_ulpaK3EO4uDe2*d?cr3wm^2DhFKmVl{1`#S#n4iQ#*mBUvPOLq2^w67coxL{E zrm6-MYg+&jjW`XC^wO@e^hmPZ(Fd_p$sm}5o*;oT3>Vh7 zUVY`==bw1+;8Il?U~0h(Pzn(fM=Tf~T3bP*-+AvsJ6N@ri=qK7Niev3jKNG&$O00k zM)c1VA>68-^mz%m&e-BtGfxFLOP`D|OvV8)S5BCasU2?7YNQMzK;3Tr!0G+RmPC?zFF6jO@c->5c?aZjSIX4B=FAt&yE}C;qnZxq zOnC!j1XAeAdc@}AVb0qbuQ}*!e*e;$pi_^}O|bKcm!xH$Z-p-&Lwc;cQd*(Pzhq=9 zYr9P!-d#(~)9(yp^wP7a*&vi=4duHZ-gxtJGrD#I*RL;9svuP*p{^Jr!_FWQBuM{K^QWsj|w9Y46Y@j1PN6D#dQ3WKlIFMKut3?RdNeg4Rk-DphICV9WczdDZ^L|Mt7DUfODjU=mu=ga)dK2C+a3i-KSL z+=GAWkt5Y;WE_kvv{oIkbnCD#=bqhUh?KBFai>sIFT$ zv#$T-yXXJWx6ZzNd2_Q~wt8t|Ld%EGME^0Ob!Vca#ska+t!(~vg(V%cnrUs#{z0f? zT99NC$+I~<{XJ@McghUE+}qd1m)Yxc@kxnly0_SWVzxqdFV5-oEwS^RICk5E*W-ly zzpd~BO13TcbjYz^e|SpbsobYxn%~dQ#J|k-&+z^oea;m7HYI?)JQv~1(ME~?i#!+2 zxvT&qaJl9G@zuB9yE0lo|G}bK9R?=m!b3#!0S06-AoHE>`j5=@7yN!FCv>3!(S+c# z0pJIH+u`N#SAOPm`-L=xpoM81Zxuhs7pSZ~xF|1t>iCt5m*2X0Z8BIi5uha+*X0}O zs-xt|0w5g^Q)q}IdNh=miU~>^tHUBWl|@vUxz?KI+3d(b9aisLU4Qkp51xDCeu_jvB*Dqv((9H{ zTB^}1kVT2~-D^))LT3e>t*2R#aXT50$AhY_M8psf1d+L}On{&ybbMk#Kl|W;C0a0} znfr{wj1pSPMuRsmZT;IfKl;j7x?i`e!PAKX$@|d&TN{Mu9z1$uWAy5|^KGa_0H^}V0nw>+Ojy=m zI-M5CI#V%#l)WguV~CX@WjdchAxQsX}1Qa z6;v@@3i{~rL+@U^cy3)aEI4}~2+z4G2(-+(ml&{*QsR2F-Y<|$se4GIvncbM?nc_t z6=qaxiL)UEjke?QM977DK=ou%G3UNAw9sH%j~`lk?C4U!#Al|3v?4c6&aZF1a_+|O zpZVye_phGY;3TYwECvoL2#8kHg9#j@?5ooCHiOwMa(Z+DD1Pp|fEbUSIK1@Kk%ihk z5wxTzzb-_)t01>NC~vE4Bp%peP%x;-)d{}-&gBm; zUD2kg>tQ|;q%tAcvIX3Xn{2EdN=y+qJzwNblQmxn7&G_^5JcNLv@bk)$xrF70n+K!FF@(pp16Gy;+-=(ptOJ8rDFZqpO% zuNUh!D~A%LP-w!QN?0X=eJg+P{+0jofBnjT^%sBi(fg`OTTS+ms`YNPfSp{b|N0l6 z`R{-0yI;S$-V7J=@rwcGM&!JL5(4XwV#!2C2cmM5Ck$G8&v#iDb5&EUS45i%P`HKm z;^vJ}+uXOXJfMUa1FWP}K$tX#53iVDjC#~!W5R12&HEo+{ov}x#f`~&%O(sMR-_t2 z0|7BrKqYE+O0`F}w|714WB-^*(1le32C;U5;}Zv0D}lD`qM%b(89yvf;yoz^k3Us7 zv)?nWh_`1-Z{=a?$4)T#y!$xXR4uoPTRtu>>TRj%9sw=^VCsomB{}z~Fm1bj%tiS? z<`g?K9Hy~iPFCU0kSx=WC0c ze&io141y_huFw^@Spihu-Kj+OAFPN9W{39;{``+Wcd!ncOs4U}e0!_FjH2;;a21n9 z!BeLWZEo84-n-n^D=jR&)z|D;kEZr;9#PPQWH9Z^>M3=7M4aZn>VD#*WkrgZtD!y9 zLp^-w()!D1uRQhO;rmx=T6CTC9GMNJ6{)}jl{|Us&@1npzc^A0wGu*##}d1fsO3mG z9}DJ;2x*mBhO;y2I*@=1QDRA7mnZ|Goj^!-hl>G#kny-?@hqmao8!@F(hRD>K!UhG z5Zcf-3&D;oFWtD>eE;0&TW_y_^S$feK6m4-8_mU$Y;j2ktB?U%8O+#%jX_&6jVb5o zS~Opco?*huus~#cJTo^HMFcaz*fa+Q?F*;(9~ej?3)&;^z5R4Al|O0imOH+O<;(rv z-}_TgPH*p9;@PKKoVt5g$AeQpx{)a8dY+Q-%U}ML?m`K0)D6z|;S(y;Jl1R}mR(;v z+k1giv(Upfdk1GvkNR#;@urEk#-t4(6@&9C553yi*z=C02YrBFNS`edD!A-mnI?~8iOQ2L_4apqLH7DamF-v z4^^cG4mlW}{piYTubq49Q>XSX*Oec&64#KJL7@S%e-Iu&dFZ9r&t7e73nJ18S_;kM zq+Ld%#&?}P_mo3)JqT$O&&7u*XCxLnr}*dyga6K{l$;;PA|2@(L66#?30eX{CaPPL zNhNhv*A+N44h#cSSFUfqeQEvd_4e`@>&B7PDs>2TV4zws1QX9OB?Sy2Qn&*9BN?Xr zCu5F}j2CGV#{r~C0IO#F_~C`ek1a327|EdL3iLEYI&b^k6C6Tx13i6lw=FwOr)D$< zw*&|G-0qz$)1cZmH}lsR2$o5G?|xjB`O-Zj1M|4`-5W17nw-8f=X=K&Y{L=UB8WRY zB#FIsZ)*Wu^A3uswcz(JP5#l>-@MsODrhBjjaoW4Ps#tH@cqP;njhW2(p`&IgT6fg zVDud|ArWM9^N+vy=;O!thXUpz1{a=)EOCYI@E1c;7idqPTHCj@@X{M^v|$JiK`6%n z1i@9t?$a2RUP=4hpEamVOq+4$P4?>_y+ z@x#l*U?iy#|0<-Ri5Lzp$U~~6}1#|&w{jNj8BX7!Mh69V}V$7p*&#I1sc*cgw-O|w)D7NxF1f)l}HP?^*cLJ+Bl zpccSjfzSj%lEQimyo-NM&&13>$u>|T1IGAtDwIUcRtL?^7a!braG%;#v&9^f!N{ zoFprM+_Slx2WtPqUclI11BjhR+tOQinpS7D54apadeCj$?#}C79ouic{Qh^|J6ACb zq^_l|;7NQVlFy$dImk)}zuLtIWN*(H4OYI8i&R7u2ox(pfA$N{+`mwT!gSCds)+qN zgpm^A3pfOxIKJ=xmBm+He`kyZWr!IOKtDR%++rKUT07EPMhwUG=>uc=lFLn&VL-CH zIvSpu(f|<^W2vX(ym9lp-+TM{Pn|xrS_dFVJcVAPLPrTZxLhAQy7s*@Z;r6i z$WX`%B+Rn~xk3gJ`^TI-@wBgUlKb!#{gi?L6rV`*QKH(YoQZm`I1Q1Gb2J%mj>aOP zszR*P00tmBT7?+uo!un5akQ>oIB0I#&+?W#SEY&RY-n!NjwgLlS~c3}@RI)EBL`Ol z#SETDJB7Mi*47m(=U$X)oV#7wevh-HdQ;cSisz!~^4Hwf?~NR=ZO4lnF5|_G?gSuV zzk+asnHqTc1O=JBu>a`T|Je%Wnn8PdY0i6M$JX}p5K|0Lw~M_o=Pr##CEmHw{=eUR z^ZIx)kPxJfd7`89K81cfSPbAQ6giS||48BDUzQvbkX-%srUjlzJYIl4Od=^yyi z3YF;6x7hTc#8EP5=RK_rKoJ85&`0mzcYOcBm(IMsWka)p+uTIWshvr~(&@iovAphl zen_80FthfZ9y-37*K3(RnxGn5i*B(h^}>~_?f1TS@!3a?-oLM^Jo9EG9AW@M5a7h> z@YH>4FTZ(igOv(l!5DxF7EBD{2NHlrSE0D-CIS#cDN*$&P*P(qI4aOm8>+@wgFJ~HcibmAc_OBn=Z+hqdV(Tq zrN%a>==yWVSD!evG_+`Y>C~q^54~b`r@!2V5-}HhEKFX+)0=&Ezt1q8y_$D#d-~^` zL74rU`^@b?u=A6g`IYg!6M&eorB4B7iRwL~<#WpJY(#;OXQ}jD^xnk^+_jPse@q~d zGPcl6%wQxZlz)BZqp!Ym0km2w6(KEZ`JI0kbQnMMl>4Ei0U?Ecl_Wy?BqDl^5_I_> z2m+eXQz!5LktdHXxPqOXC8Qj-6x#2D8&gx{%rF4O?14ke4wHrI55u`w9cvOAleO2bIOxFgJCy5ZZW6Hh%nxBZme-TFof{qbpZDCUXku-V?l0QW=(C z2qgc&J=If8?tD}})#^;mrM0qqy|gIYJ)F~n)#l$Ll2r}8IDqNbXUjd4VmLjGyW{^E z(pt`AS$FT1spjtJO7#sCy=a74yP7l1kbqp?vj6W}Z=c_6{P1#!RH!o7k16eazeL4@ zq{I-I(e%V0*%2k`|2u6F$Y%7pCr>?dVy%XAxxIm6>PNy1KAIXYnTX{_7+@#X7N2_X z=xeW^xir#oHBhPCaRmS&N${MVn{JYdO9de)kKRgt{q6&l^mma17Y#H&z>ETcp9DYT z`O)|*-#zpEBd3lp4?+z6QWAs`t!vnhuGB9)d=#UtbLT(W0@_N{ESa#~yho+f?MXR`f^bLufn09wd zmSJZ1HTnc$hy6Zvg>%35e{XQZWM4`dRAW~cNlP^Aea&;KYlIu+;$~#U%R@F1>46@n(tH022DYDA&H0gAL7mVsaj5{tpbRDkRf zMfYaW6XqcyS81uOG%6;;aBadr`2E+PIeqfjfnlI38+Ti2#gqb*+U#H@&!4*Q@#BZj ze{lZBjZMmcRt1w_BB;{I4af)@B|Du)F4FBpy~|e;_V4TkNuXdhYb2jxma;}bfLo-U z&lk4BO=~xr(WW&JsickqS)3(B`ktwInXSJs?03b=*@=(T(Z$xb4%M4qeDe7HgFrK- z$9!d)mnS1@zuuV(i$sw4bBuC2LN$j9-6m%)s(3CT4_07do7g zKci(BfC)s5^s^&GG_VobY@5x=c+xfqW&w%jutQG0H9nQssGHHWO!Hl55(!p-sOdmA ze)Qq{o;tKttEri~!xN)8WbojLZYulj+-?2AcI-3y{Ppa2%v_(kRn#=66SM6WD9DPY zx@UI)^xsd!q~Lx4FXr&CXq~Y$F+bj9%>NO{V>y z_ArP56DVK-0g$V${o`++yRb11!D?3U3~%gmer9!kiM*4GPg3J_NWbs?@7Q0YJ2s%K zkD?4!sE;gGUwrcTS|y&xGvR;#W-)B7*Sqn=1F0-IWf7FoB?RRZ5gU zKh|MY=Km6$1@je&X{s)Y7e--9u-xVEh)y|7aR>@*>3E|VkF;$)d90=R?(Dxwq)K2} z`K6P-Bu_%3VPXXJ=41Quw@AAa=yp-1`_5e!J50aT%2*5*-T96%u` z7Kaau-B%yo!_~@7wdAWue(9B8w8yuRbYrYhhvg3|I(VBy>xxj z=%B6Q}SzTQX71WH(=anF# z!kK(Y3E7tF#pq8NABYQI#l&!ALH@Hp@#zKo)<5{pxn_06!q5n?6kqH#gES_6oFti~wItlHy7azO-_`U&56)=$;s{sRg$$!eyZTmbUxZ`DN zgEJj8ggzFYf^G+N(tC0W`Rr;oC|_jprEd^n7OVTV$vtDCy_9WgDl{TyRf_nWp1gS^ zVG6C@3Z&h$v&`fjD${Fk>iS$6!sdS6x$M^SXu~UVYb93MXq1*_FcI4r+glfJh=jOQ zrPzWHkH2TH&S{h(U6)GUfr*cf=q4dXD_6BiAcWfPU#Tk^)Y86Bs28Rva6O8x?hhn1 z(&;%fGYr5?sBG;}75>_vd|`Fp%HR6ES2tGk z+hL;GvYE8T@%0yuF8si$wFNa0np1^Zmf4u0C^p&0=N)HD#-uYlH>1%xbZDEholaS+wu_}KdUh)L}6lTznz`?~~Z4@)3X58ad4_DbVz+q|EfmFWs-HJ5VB8 zEY#F!G6?OymH*{meE0mNYk%#}{osM+!TXms-a3Ev!j-MD)WC4Sp;R`(q$L`i9IS?) zJ#qT2YuDbsd}%{CsVX;^i}~panjG4aO zpL+7-8cb-d8KABjiYx+WvbfGKcLc9lT%0AIoNjx!hS>iyi-*gleS2jto|{5M^ZC&` zh;`=_OO2#0jw47%oZ_*QPGQ62^7|cAWgAq=6{M4E-*;rlt`!An;<^9Wl{PUj;!K|$ZT=T@s$_dO& zXrT%Kl{M}0gZmFI4PU!>;iG0UlD1_agPMRTj~?9}$)z$D!kJA?XB76z6;e!Zlm_St z!*jBsVr8))GuEXb^_vopg{0fi)l9*LIx?-ap^XIMdLJg$zb7D4S_3t+8WJX(kF5=U z_Q}&ngNSJh7#Ym7={b{=W~O8WZn^AzCvtlAO_d$0>Fn?MmdHv43J>{A$4z7x7KB1iryMtis6L1_1>nv=zr;^_%OPZ~W%({rG2|edgE+ znNU>rh8d928In1r%zXemJjES~)%;?Gk+D0B$cH?Kdj zy7+U?KJ>ssRhveM%_yGNk1!L@3iz?{)!yIV?#YY?_5jRWgwT@!=2o&7Aem&8J-0j6 z#GOa}{r1^sqhR5ee)-ECkJ3GO=bKL0_T$uE@>ATls=c;w`x3xzPo&Km8A3A}-2z7w zh|so-zkTM)hg((!2;-@*iG%c`$C-?PQOZZM^_73wr3WBWddGA|eEA6D$pJY%oCJod zhJO0g+R2p#_iO3=5Da~`IuoDXh!IE{0AF2sjgbfd-RBh|ssfQp?DYN1Ck`F_>UUln zi=dLGZA7Y?LH$5;n8!35iulzC41r_?Hn{UkqTwY&P|8giD8Jw*9LL}Tnh7eR0%(b_g6O+)G2?CK_b5e2#Ke8+vLp)ctnlWZi&1V9^(^Fe97=#}ICq!CXpmQ22@f?I6 zfGD(3gZ1Y1XZA1r`4=93V5zFaU@a*z)sivg3(+ap&os_=yRi4ilGiFseLEC>|+^XC@~NYnS6QjkiTH)5Gwx z8&rCe>OHlXeI%ZqMrUxvtx{1;aea~J)d04lis`vO^nY-JdFxx%JZ7aAMD2(DP1!Q zfBoyf^Ulqd%fp5Wk)csU z8bD!&HVy_-hT&96+@{&vC*EKhl5YI-TXE3bCXiS~qXBK6DDknX2X9`wbmez{@5df{ z?3srj8c=E_Ow?4RRRPc%p`ZX&YpdjA2iA@(4PQTh<-&~{8p1>fKv)q5(~RA-Ilag7 zbt}5WKAA~#H&#i0K~7kiISM8dGt&^nS&N#+>?@>6D;bknn2Fo`2~Z)Xpg^^@$?D|h z7autM6Q4SEpr%;sspYg|13;LujdDmg__H+NPGOVTdS*=Kvv156E56K;0DK&a8=Q-6 zW_r3E3_D<|9*CJ>mp^ZB-1fl!7dQM?_@%%3E2U0hYU}K+l|4c>+bhmNDN%IZxtVer z*j?alfOp3w0Orw+8kpE0BF5}k3!GhVzWUBptLjjsc_>bPp?G*#)Y+u7zKYPWQn`cp z8LuT3Q16+p>Z}5iuq5rX$Mzjwtf=`M^Q4$aSaYU(^VV7CsF|2o5>a`PW}gruNCikR zJ+^=Gu~Wxi{_gA7#%$|4J?rP68>pUe)Rh*A8s#Q(NCk>1w*+qqvvvCL*?BTo(|dX$ z0A{8kgh`|CoI7`6{o4J94=oNC2@-Ax$jF>zOsZZPmaD;`wf(D$gVA_v(oED;gn)`8 z&z#VLAmFD9SZ|Tj{ksJ04*E$TF@|)<4*Cs4K1{O298nG;V>=d4E`xwUrE;-Dlw`0v zpsF_(h zUYz+d_wMZV@^7{-&OBefxI>-O7t^IVTD3iv+d<9YmwIOcXCUrid)qw5`IuzhD3@3A z;7!lTEyIO^^506?jWqc_VKU4`s_K892xv@s_2T9$mo{0|Ih(VGb;jM!ta2_`gAUQh zu-IlWX#x{5;r8SpB+Aq&R!{efCQ;xF?kUjdyCOj zpyCm12GES=c`{7v(Z7eL8jMx0xk9{wsJdoyntiRB^rT$g^xHj#C+$Tcx$cc(e;4cHdtn`f`TcU=+k(JpcC zdV>Fm+`Ie@&aPJZlEy#`7$4DaDSZRQ&~VWvBi@ zCbooUr}N2^Xdb5$Rw;vtSOD21|1vQu*akFE!m7>9qiBET;Uho$?8#G0Rb{Q2wTSdh zz#d9Uq1bMn_LLZf3Z=Iu?tF=OBX6h~4q=yfw|Q_rCutor?7iDp0d;d@MgI#0@E!|y zgzy$>+1q0V3eCV9Tay&;(`h63R`!BG%et(8Ic-3jy}0MUfedPJa9zV3UP*rG}iN$0Ho(YoyBq-)mjO*`JNlmz z;qJ=O&z9}fMSEn`x9iiC1ki(UkK>RiN4bk{x$KQ|Pgw#0sJRUwBAr{Hrnd&xoC^E< zw`+Gw0w{)rvonwRu+As)^C`eBl|55DrgM?VsQ91|YJLtpuebc_2iLAPl>|!2N*Yh` zDOtB$rhfLinJz{(bmRcLi-?j!VknZ@g2!h{lN!gtSIIP=bXqw#pKvQiC)6-CT~p)v`mz{;p9 zs-i+8bUB1WON&R>*4FCUCX;qDqQ%n{K|tKfk<#{uXr7Ng_9^fy$!!i_Pi-0{2A>E& z1fknO(UYqc|BGQ;#x~D%4phgQsVk<0)Qr(h> zpPjPmUSNN3V1_@N!W%oiyB%_v`}7RPV0k?e6Bc*b$1_u5J3GU82iX5Ty&o5hrKtKF zTa&%&Hm1wo@u5FnViQhZDX;In`rTUYRmJ}Hzuh)=erI}1Fp`5nZ2=lX;JY{VfBn|k z3(ca4^H=QGkakwba`hJ@kt3d>PhpS$FiUnoiY|aDSIbq8NlU7V&BEiH z#fMKmynkU|WnxOYf2+mi1Ed_Rnn)`_szo2$w8q#Qz;3$l9s!u;;nJ?&BkJ5Ev%lTD z+W|GV+S`x-Fb^xXd^)Alg=8JoADNgz4T5re;V!V@x z2b-?{wjuy_rITqRCzG8KFmXpY2{dKE>(`sV{hbR}v6xJ`)95|X=i*9)pzFgd82Mgz z%1Y*OCE*5PQ$tI@^;13w5#L)1neHG-8lWye zMM3|&u(tFxEwQ;sClEL}QT0e?N7F`Xi_vJM;_>?qJpJ%Pk3Mi>aVTKR)@nZ`3)(Uo zS~Wn$eT}4nY&3dhee24`#-+)|#(2^QOa?}_X&7m(pV$C%Wukfd=akARo0oF#2gcb< z`+*t)NE=UtO{iN!+IG-3E2{Sm!xQ%po;vk9&<#13`U5x2=daE=I_38 zWrKq%1%ef;w^M8|BmC{xP04~4^e|_}cLAay zAcA2Q_dWWJ=5|t_`<$3DxuGCR1|Vskde~AfHNp@b&0B^`WBh}E`|hv(&bL28g*qSv zV<1J1X02fytfGmVDAZv#$4DTe>nIGFgmu&_ldcGz6AkTFmm+t8$O>*NEew&;kg!(T z2}h&-iwlpOJpRbB`;Q&E@8If64GOfiwpCTiq-u>;Nq`~<)kGByu+`f7c=FNK%_|$5 zH%2!nI%y;zRGw>-zJ81`eisykWa!n1g_#A#me4E)da4FktJ(%bjpkr&PaHn*+~ET! zRu}JIk_Dm`fhrIk{C%DxW-s&mFr)u0dUgA7F0$D^ROe2W?U~0ri~ncK4idmEy;b&L z4pN$ZXYa^)k3--rHPleuok80M`^SrkL7u~d_lJZtU$$>*w=y-@cW;>{CMj;KZqqPf z=Fvo3r1QliKSf%{p|Z=uS+0qKZl~+HSAAt<_CxEFSQhsk`DWv>_4ZJ*@Hq~Z%ow_E zV|G2J{vt)Bc0Go>)MR>V>3Y)EIM${J%?8>Y4*ZKhditT`YyZPP_!n=ji!I(qsat7G zTC-L`X+tDW$-_!4w(c-??0Ppo9i6f)U-_e!o;P}mGbHs##(T`e19Q8+8pzNFa8WLs zUVY=8Z@>1&(r|ESZRNp}M@}6*bojvj<-xEP2_dww&{ANkDu7i|gR3kwwn=7!{G}pKX)l(OyJ@aU}Lc51&pGkn+60|!%mUerUTiJ=3?`KSy`dL1>d()Xw z*?Ath{r4TpTuwi~S;`Z;^mwPTHb!B9+!S&vqr0TwhRF*T;4OqMqqvpFFW{jSa5g{e8i#_WF*k_`Wqm-jM z-;6M+t=(tMmmXX_{a1hX*Z$cze&f~48}$l?F0)iIPK??nbwu=-gcPfC3U3viFQ4eI zhZHN@z3)r4KMF1jFs4P5pa`alVKqj$iRRLk_17<78GJ`q2lc__*9@@7dLKP8@F4H zZd$Xc?Zher+pHECIJtON{b&;~XEpb&+q zw&Gy{aVkZkk|(MTGeMYHS~-9~IivcRNfvUC%arV~XR83VN1K-F*@4oPIA=ab@0%di zGu_a<+U8ujlMHK4dx2;Kc1eHMyk5XK@5atB?JShd5W4<*J7M~c_{?i!?p!9UH|OGQ zl^qB`hRIS+ANTnXcWfah4FBrv&42vPb*q+y1U1_LGGFZc>GsUP)YaZ*wQjzMY|%^m zv*3|okm1(#UwHn=W2>Q3cxFc1`2 zVkXfTz$+C!b#bGV7ILGJfB22}e(j%s^+POd4wi`!%*-ZjWi4Q8!6*tzHV;oH*X=rJ zr_CtmF49_K2HrH%vPe-YSP@B`s{Of=DXTJE{BTHzb1FDdU78t2Dit$8mPR1M7j*Lx%(L^V0Jkx6+ z)u5I-aA{C4Rx+%(5HJu_V1S9WOfs9R?*VE|qJfkZg8*odq~!7u$=b6iUc`_kMpw8= z9@V?A`MLG7D<;WhC%(DZPjkAsGnPv=WLxGg5NyvG-$L14BuOp8%+CXe=*PBu_l_aX zz0}a{-tEc`v|+bqSuWI%)sdYgh@7+_@dvb|4l-k_FrzcyZrMyP5rwV=#RjyDjCL$m ze5N_>nBDX}()*r&P_il-pdHP~0!HGF6A?}PRpEydm0ffPm;#m6XG)sJZrc&KB^+llq zFuz&O=kyZESkOu@>c}($en`7%jOsO-v8AD^wGJcWh;~F#)hhA{ayt4Hl5CvN6SylVxY)Vz@CgJO|DsTSwH#vOjmT+iM_+shw&F1rx1 zj-p!LYiX2C8#NP!!lG^{VSnUb)7&;n^gGp9v3R!|frJmsfZ4R-qQRjcTGp*rCQi_L zK&I09B_;;i14NPBQ+*UH)lqOkuEMcsbrz6lbCw(==lp#&KKIb-spFsj%~wA7+rRk- z?`<|ZTo$QXxW_77U!gE%3-}o;mI1TWgF-6GUah5W%1XF8Ag9-F+Aak>aZ(nX(Ki~+ z3LhtED-dM`)F!ov$>#M3mIi<6r+(yTf9#3FL#b40s!^Losv;^_s5H>SA|m|BBNcRP zo^9}%i*qU!$6E-%gAby;BX=X>c8fm4S&^6iEoZ4C^O9Tym{-Ui(ZM|_vw&Ft-;7Dh z`?llW*{*ZD;i9g=%<=S}>8bng8TRkD2vpgzcQ2yS9VmNkWg5}!>1Esgk&He+mYZ!4 zn(Zrcgzu*;C5(|cW;1Ks$n+;~#>|VB-JGk@&X?T#FYbTNCl1U{&$&Zakg&%kxj`_a zO>_=F+uD7KcZ)V%^<;^LFbiK+Y`G+k`K30-IT;C52KxEtU@bhg$Y1=-@fT13iU0k# zU;2%g-dUH0wyMAZ2@|viW2LK~Zl^PIY9Hh0MtIMbb`atKjx^8aX* zxps=w!};qwKfm4bpQ*6NVdn?>^%I@e^b??;-IA`zsr7wli_H-N_W0sn%9L7dhlVn< zawj;J=_&3S$*3*vkrb^1_tbW+YfB*IY+>4iW$pb{n%ha-c&V_k=m(C1?d^k!&b$*h zJAuMwGxNqB8<^4fRPQ=LtlSp40w%hVju~lTlF(~(S}Z!(xvD0q%-Xax9X5ffsVW7w z1Uz%F{u_Vx2mjRPp8k9P>Kk8qZLPa=!&uyFVTq7S@zkY|iv*m1OHZ_j3Tf$o&LI*|% zbedJx9w2}CXw6QM`RZfg5T2aN5aw64g&_b1x zv{gP=7HM?^Idk*zwnq9A$%DAM9MlvW8vqh5pzd#DYBCvP>*kT6{@fRy`}xm5_t1Sy z0czF)Vg`=_%<7~f^IA{*>gG7{_?gk@#=B4b86Wrh#E5)ke2(vPDr1>y_Ot-S(&{^f z<#qW-5c^M+c`V#*)-zK&0?Oij?w<&wz?#LVWUI;Vl`EPM&L?(AJ@ z)zqwCd#B#*cQM_bGZMYwWQWCy`%&AU;)h|0E6+`39^J}ktaAh(?wZcISX{3>g14Bag6pbyZ=S340*YB8^QoAvyW56G3_{Va6&bc8kKI|JSI1O=0~3XxgUG>$>aN$0H9T<3H*eu z4i6V*)!7kF?gM?UAdhWn=SLjHQq`7L2a#}mW`Qg%zx)u2>0?q+S_aUj=)@oCGH1Jd zjC%5xDOPpwnQU#n12)JVf!S?-3Vm+1U7rgNuseb&t@wT$RqjY*_XtBN>D&oRz%Fg2 zy`69hqU$}f!WQaSs9Y2_-L1kCozsGR{=X+;^}ELYlL@rs?Uxp>fi(4ft}~Tj?v?3w zwtIp=QaY5>Dzn+e)^Ve5d?3dW&>-qAL#9H~SWyoT`BM+=|H6Ym@$v^Z{_gL5>)*V6 z{-Z{=7!0+a^b2VkQTGiEChjFf3y3a5n)UyYRgTfHNfwDX)6$el>WZ1Dp{0>#o`=l< z)fO!b0WGTL`pJcbKlb^j|LhMx{K(;bD*>V^G_~k-iNc4nKUed|(v>eM1!*^irTIS9 zkOS|4PKGI^36{QLWl7eW;sti*5+9#(t^K<~2|i}&^FLmu2R9C2@yuO9LAN$1RlC2i zm=Vh80N;&3wqK`&od5_Ev6gDdbIAaR(m-vEt=3Jm3j5TFeP8I`3cV46c$StZ<7hRGZMgFW;NrHgl6fj z7akXhGw#VqmSqQ<>T<`nm&-2k`OJ0Y1;_%_?2wu4oc*f%fj?87yX6s^9-mxy$Kgk& z-~51rM46fJjGtqU6immMn88Y&nZYV;_947*aQM{cAOD3Pdi;$KZv5_d-u>EZ?|lE# zwVNF3U>T?&RA6E^8InIejR*kM)ZHq(jA{Vlspm=|lxlzgI%#2oK=T93EWo5yPPUe9 zbYgY+OP_h>$DVuW^x>t03mig;1|sPI^+;B_cxlYl70ekq>jth{9X?*JoSE?`jLI7W z{4o0B=3FgOx|UYqHmpp(Jhdhb#1AjEhZcGm+F23V&`pDh>fA-!yPR^p*8~>hqx@RWI zWFs31B6|{6MUh1oQG|qmghkmEz1J(EqJWAhD&oSG9fj*vzy-OoL)ak#fg~(p3;UK3 z5J&+C!ci~&mC&-2*rOn^k_FSTghRrKN0)41Ob)~)cm`(j%{`Dmv z%wM$Zt;NgVnE&3q#VZ!A>|DX#6EQb2WzufuG6yG#vL$>JARpYb0RR9X07*naRFJQB zRv4Z-D}!5_N493e`j<8wK4|mN!*_nTYJ^|Bv0C&H442+ftFZ+BD$z@uZ%(Usv6oZXc2WTnp|E2q4R%tigrB8`Pkz6Wplod!p=+3b)Ko;%bZr ziu+fhfHW169J3sim0Wvo;HZ%7Rwc=>>cFiT4e0O2Y%+{D8SXtga$eG2U9`O8ttCs} zThac0TjjlFOF#Hv`N~zDd9_lhibxQ@NkK?UGPB#4wYaBox6^cj|bf0&{pBTRKbHZh*8mLlI& z=S2hHtKXY!1E|&wo09~816$y*lcdMe<^LqK?l5 zP0EuMB1+a@N&Ay@-dv8+X-*92jxEV(W)Nl}f_ZZ4vc(jUaR{wgtvs?(*KwJ+wbgAh zuyv=6TA3&p=4ZiI-Xn)@D$Bxz5=ubE>{yeIGVFX}Savl)kAnt4@ZrrsqoYI+*jEyJ zZ@fSSZ3{j{m^J-wFEffbobXd1Vqd%x>qjCXfQiUAdNOo8SmOG;{4bCx|F@Jx|EgX1 zecx0P*A@2xgeL!W96k^$uDcE-6ty5qtyNk7XqRjSHk#~Zwdv}Es_DhB>Qoksm*Q~i zhHN$)p+U)}%}7y9a0_OqV9gb!!mJXIz?OfaF_3EMRzlw6B`#jj-=udbz{TR7%(ZM) z$yE;@qTC!2bf|?8da~Wn>83aV4C&F%s;cO}m}J7HnPL0Z!YC6muDkc-xUm5D00FH{ z24pD$8%(+8afr*fFyW0~<3zViOYJuF_=4~@-Si-=qVI{r|KkBxBEL?E;Cf;K$>Z1{wq}N>rN%j45?Y73qEj8LbNkY$TK6I@na*yDw3~B{| zh)QZwvK&YhI3|qxgLS{MR|1>(uQH z5p-Uk(^C)v3#6!4sf|)!y+^9%$%Z8v#acnQv074ly|xF|V}$i*u3s;QvUs!U2li{E zgQ*CJVi;4`N>C~>t5g~DJ?R(KOdJQGb4?9hm33dP4MEhX(kSAxS)pq}xone?h1u3t zp!`fQ#&~h8I&y9@Ldg++Nu6;5qi>y!fyhq_)2D=i)*@(}GkbzfG-r#<5g+=ktj(`9Ea;Y`z)=|G+KyeLxo`MVl z0Hy)dwqw6)_ZarFFKa3dLXwkG5UY~*6HULSYhBh9^6k)C<7HQkbX8XhP_h-oVIP*r z-!@r|Cq1>*0gYI3Dn23*vce0IiBqyDlUG#Qq?$_q$t5)}MD~`r)kOhFu-l^2IYi1N zOtS!2Wv~IwgNr3((yPY_50_S?Jxq-|k+Fuy2sQ5{rO-qY5kw>>(>O~CQa=&8KRIXs zN&5+M6q={x^<+_V!@g1YpAwX^x4Rfrw~LN|R6zrx3RA;;igo>O;1>T1rqvh+?L}X^UJ5C}0uU&T zv$I0bz)n9o{b?U zC~fGP;DR)a0N+$tpzx=`pwdi;iO`IM2}N0`0GYHt6VajW2aZZzL^^@2bubco%g*DY zkt+^M=3Oc^A)JFg68pk|t}+!zRAdP?P4NKqBhcLJVIa7%sEXtyk8igUQ zt)t7bZoO_G92Kf=NWa-~4krYf0l+gdQ4Eq>2?%PW{giLj!oPOh=`xPJH@+WUiPj9M z(G!P-Uz0!XpHnQa4P@GlT-b>@sX!~gl`YiR11Xh;Yo%$Ut^_b~Bv2fQoOx&Cs$=a1 zqLIcnX~l_gijcyweB-Io`6i>OAr8|bl~VMNNxYMpiHo2sH;p()toOydKKiu=gYD!_ z3J~KIhk|}pgJ@Lv)3|iAxa52#*eQjHK40WDq4V)$P(!=!U*CM z#VU){7W-~PigSSw>yxyl=hypUJCT7(I&GV?qUxMjrNkXwW75>PAsGe> z8;E;H$a-A66iL~ZF%pgS#E`K0}E_yhs(3xH*X=n#PzLCkFn8@rVp|dJ33~S%#?^K>`xZ z^FWAsHo|>^1ZtXuA1b%JeUX?Ix{;DNPDp%GO%UA!D>D7X$f;)sa1M4xUr8P&A7uk3 zPa;`F^`m}TQank&Bp$z7enJTh!qgXyKk--aQXA0XGLa)FG>Q}|W@rFcgp2U3Sa}dt zYH*@qF-SHfP(^Z4BSUssUw1s9zN{bFsD@+}u(pnxtz8o^wO7o+L?siCOxy^lk=j?+ zy)-1EbvT&*G1)3e5$P<2?+vAs@GgLwkj5V60qqHx8$J`6l$}v!0$RbJYlrXG^%z6cKfs25> z7proEc)dXWlQ9H{6(3JW)Bv8g5icwVYWQGqD-~;u==D$91ld(FQLRAax|GA#uGH2; z5(Q*OnKmTHU6meptA0WocZO#XV_XWtAK}jIF;ALP- zbx?`V8A8{_;}^JSO%GXXFH~lGzjO`yL=gbNGv`8kG#FL;#|1s$pHqTEpX9ij1Z&uC zi%iMM5-vb6?5GHE2oKer1?t|)>p(`;%`tuS94OP9k+4_@k3ZlZ>dQdg2i7nLMs-IqzA ze#sga@Kd0HeYF_V?T=hSJp9^P=^-T7s_CSenh~Y4d2ztuIu(D$3hntc46+C0tLFaN z61=pxB;6ph*qDbsD8LF|W`I2ho|X_Lg8|o=oQq*~H6TpZVRAjCJ{-9TskF!Nn(Ye! zGkG9IhI(MECw>sRZb=p5ndiEm?oZD63A1O= zZ+?5>r2P-L_WJ9xa^V~&s+Aj)^b!#_YqibLEh_x9AtluWxRMa+#33D?MPw)B$^Yew zD+V!?62uDuHO>T!>{h~KM$Z+N;J*}aA@G-AW4Fx0UA zMjdG=zcnab@!^sOqz(RUe%Esg1X40W-Yw0OXhx+pSh3maStk9Iq9^qIOQ?gq#xOgv#736V_4JAt_pWgAfBp5Oq{}KU-A0t1(!q6Af8)k_1Ks*rGK8jD`P8C7g|HIf1n-F8 zl_XnoAap%0ptw|fT2!cxM@gyN!&-@FP`O0q5-|ZjSb@5N%DTxR$rx(TDh=Rtgty+B z-?3_CrP}G4`}c3%WYbNFh(JUcb5V&wYKbY>3PJ1{#Bh%0&70TJ(b3#g?p$3dl}cl` z9%piB+#OtGt6i|;RIVsoMG;L?xtLOqFm%c}q>U?=T=F`VOJz=+Fpti9w1)Zvn128L zU;FA;^P-SXDEKr)t5YGKi@mc4ikFzky|eJ`_uhNY$?*m1xdj>1O`WqcA5m;}aL)B> z>9_GF9|og;*3WU~6EK#jWPa_K=z!wp?jVZ`;0jpSQ>1G^sp>A(g+mz!N)&+F=9H4G z=d=jLxDDG!(la4;ja6H%$@B+I2|I!Rxut=uVWoE8Z_?c~qB2eRQjh|BT6zi9#xP2W zB^827puH3UEfqOLoY1hr#M2uFuFIjZ55A#%lJbz6$2rgwt5yjLvG;kYoKYs&#*Ry1 zwj7tJL%p>|!@6G8kt8-_3^_jShu@uY)6FEpB)ciM+`P|cKg%ZNM)|PN9dkK~3K{j2 z@4-(*<#JQv4B?jMW@$)#1LDycz2SzUz}cLqa(SFNk%*mQM@n64iy)^1F2C&ZbI&^` zYzAU!m&|X>% zDz2jIvd)aDs|pGxu=>d?u0v|{WVdB)J6$Wbrmlt{Ys;S~vT2}&5C_kY@Hq0I9B}L` z8(A9SS?kEPAL^q^MBqZl8jQ<)Zbmt^5mHYtq1rjLKblEMi&F~I<)p9OjCU_w7!{1v zma2IU@4@hJjQsQ;{b29CA1Rfa5eU3S^QWUG50!PzB}$Gd%lO=L&TCt~j0ir@LGa#3 zKfJ7fBh*fJ_7VW(KBi(OP`TU`TLnJ- z*?k5N8M+jSiD)Q(g@N? zU}qxp=`v*|N160k7UeTfKX=7nt_)n2Ndn;6AP%fa*R8GOZaU)sCg=X5pb&8HJ9ZCJMOT+q@2?cJrRK#a+TwuF|5brbnW}2E!Fe4D6NE;JH#u6|JU3=qb7UaV@l*qww%9(rA&srFaa@@ zQXzV@2n{-s65PqMz*rS+1o?+y5@#~3AntWHWGar13qMnkF)`SChWFLhel0{Gcn9xI zcJ2d_BXr6JTkIwkBKV@H6QwYL1IQ6&KR*4(2Tz_neB_AYaSAmKlRkk zN@u2^apYVyUPLK~P2#h@#s$Gj%Q@7qQzm(nq~0)F;__F8LTMj4cBvgOAx^@Se2U4X zQf6&KK(yb!`@A}L?(;9c*sp)fxN&1Mmw5y~j)ICUa5rfn2G_*;gQSO=hXZvZ0lMHD zIfrzZwE(B^o^YLNwF*-vU`}|wnMIREfFabe&jCvuOHw>*#Ee6N%f$|dZq}6grC}`x z@6gzFYP^E)5$^=`SLl z#arDm`0h(@Mi^V6xVK(+Cvo8!`@!{Hqe!dF!eI0R9B8B2fx?POcD2;;$HwD|k^70k zm+O&8$R~qy_VD@Oe&rGXJ@ms_KKyQA(mNz6?!k>BQ@Z#9%@TH@jg=zm}aozGL9al={y?V662*8u9E|L!Q$98?g)i z*aUtKFd0B)8!)!peuv%n*qwbYr(TM!pP8-S?0qgugCp8@yX{1^D9o~aG24`~7!y}0 zCS)>+ih@k!O$iLPOgUOP`m~Zzr%>vp<(dmI$~nqv4Fw7|2S?%R8kayF8|i6jw6eP- z1HCT90TBW9GBd!o6WCC`?+bUe-L=t>sCb=f?yU_JdihR~dWz-5JKLG0m>hRfBl>Xx za|6U>7nF1(zpRwOd$#CLThOx4loLFQR?!rOH5jJP*AFw=XXU37Spe{W2Q|5DSik1X zgW=Su#nhmpbuC4rt$h=jE2T~v5W-@pF@aPdarHHS`|8)evfFOEnq~k5Bepdm_6mV@ z$LdwzJ>{fi?g;>M&OWzaFm)mJk(ElDKV#em?hZ=JJK_Mo!RUqMNz;5Rv8cg6P!gUNg$X< zz)^lPTZ=z*cu)|4Ai@CwI6gyPWpv^1oX#xMJz z`lQ#Ag+)+%?V^Bs*(Y0AYpyAg`;sf+N7o(g8ce;JuDYfJso<1Wu+fRayQygtg*943 zhxCHyu%=CV*M}*{DUs=koSH7eIC4W1m6H-SZ*OY1#qKTA9{j*2H>6)#7>%SG=AMK* z_qtF5%I+Qu{!FA%E?|K2YV}*+{MO@-KT6I4U~|L?QMCt^ArtW>7ys_17hg(~nn_LR zl*X^R&=?FjbhLNOescDcPd@eXD=)RRt!OGYjoxs~*sZqOb=O_D*lG)M1X4vQEZIbo zl!GT_&jP>p>g)I2cmKS9%==);l2t2L4Qw6siJf-ZbML*j7`ufN!sC4G90Fxj9fAnA z-#K;Z2g^!XDQ6GQ8PViJ4r*?0B2zB}FERqyFJ7|bk%u3C^B-@%^6H$<&I$no`VSZ} zeB`*XWB1x~&tW4sK(LgvA#Ct`5~_JMowVG0$?y;be(h!|mJ|&qF_TX+eJq-#*>;V8 z$+2rj@BPC03-6ycea@V@3+BJIs;zz0s8J&~9I^fO+kR}PPi(Z&7*!cSOzbIlpr;;x z_Qe-oCS>e=m8x(!f>y~4@=J$yK6YAqY>MU%WRQ64qwk|@=Vl*U^3@Oq*7j!$TZe#^&6wumBK43+{j91BJ#f4zl?$X98*r3c*iB@ zgZCj`H>ijj%%%*&QMA%Ta>&4Q&pvm>mH+dVoc#VDr0v0TM`>eChCg<2I zFO8ET%v*lR-e86!_#DpB(q&7}JNNuQ{pC;ZzPkjhX9t7XGlMwGvR!xG?b|1Qd-CMT zuH=-}Mm2;S3u^$w|9$E`=bd~0jOo)&_*S}m`Jeybvh2Wv4?h3b=WM?Drdd|Dv8Vcx z0f*@7zy0;vYi;E5>9N^gAjAH$o6!}JdVuUNkP`s=R0`l_pE&3dF#t$;m1VK*5- zAfIK~&O7h?%@eViyzv0pB;j_4V1a zpJ3=Fh@b!Rm-~HopITV-Aqz;uT1|26o0>xlu_+ecw%H+iuLbnF<%XZeg&KVjzXW-F z4fU+us6ustoNDM7!-v=$Ph8yAZa!dpmy|egjhG53LXm!9X7Ayx@^FzodkqlcJWoEJ zz2{s$Ip-dp*+Y5-Wi26iNEgi}xbN!#)j@RW%8(uzl-Yy}?4o*nVjJMR1F_B)Hrx4uDa@P8XzWvB7nR!>9LPpZ#8D5+ z8lJacDU$6M>_rX$)=CLj-*0g7|Ag2R67izTm%C*Rg$E@Xf!Kx3T%}=3HEvfhK`G z*pqMt@|1aU9Q1&^GG<`8S>?VE|Cow|4T0BJ45BD(s(+7M3}|O#VoK_X=-nmn{q(12 z5-}0EpqyDhMTxoluBq2u_qQ+r278b$V#9yyK}4qu&%5}rLl0T_&U|Z8n6Wojt=4M! z+;!KTpZfIP3l=UU5Q#lni8rlWxoXnH&;9j^D?I01Kwu+!h8k^cEB5`&XXea#xq!!n z6zt3>>0V(rw#d#oB4rb^9)9%aXa4;C_m;p!6Js{=amIKE-brxh6<1zy%E_m|C-RCB zWlLTgrO?c|fK_(1Q8J9cDPo|MT~MvlxT7&*;|7jEFFgO^E<5i!Wy)rZ=}|xKh{KN>JY-0+e4|v_*D@0)XwaA3CGDr}z@VZA z&@+{a8o=PU;p zRls{@s#G@_*j!?G_6$+csb_Pv7E(#zl5k(Kr2qgR07*naRFP(+b#a_t6&!`O0yKKV zG33e+lPlx0OD~%}`x$2C5QzBUz|`5^dGbl8!1xCN#~gPYkSCB4avF#lKz!LB|8(l< zr*`;uRS@6+*VIzpY3H5y*>}ROpV)2G$dOU1hWDtxGUuhk4?m)_LrfMV>nD73 z<|7X>l}NW(!p4{SCKVvL_m#X7utiyZlnGMIH?TQ!S!VmQXz#uE-gn>6Anv&}-6qSj zax-NPBzsPX!5{|^F)qFA(tGc_PtAT7c26L3VIM;UdJn^>K16CEZ#uE8 zionFR&iM=GPn@{_f(35{MJ`4>2-+f-FKo)ON4{v$qWvdKnm>O&n26|0Up}s>xgXg= zWaevbxLy~u5`PH*(LgT0N99Uqe$BPlCv5>>qU?$*uJjFv2s7iR8>SfiGnbXWc-*nf zn%%eFO<7+j3<}{oB9ow%g9~9RM>QfBe@fzRJ1~NyIt;&e27`x#YFi zUnjldKC;utC+@!=5E@Q2DMO_>FV8*sJ0~$$RTl&UbmXyLdhwMx4?ptI)Ty^WIBUkU zFFyZ=OE24a;|~MDvVtcbd;H={{wpL$0OP)C4_tlC-(apFLIx@kl|K2&J%01si>FPy zXVxPR-*($=C!O;B{{06b5w`~qd$GIwccngbrm4`b2=dAGsD zngPi4B~2PZ#%I}cZqEu5^S8e7op;`O+k91mf!VNOBYyD1AKre)oiiVN@b0O9|If3} z+jhGhL8aI>^O7a+op8bl;GCF;4jVji|A|>vQXC+6^G!EOI&o>ufifaTxc9#M-h2N& zwd5KF1Tb~#-Ah0CAbOCLNzvlPi)TDEQ?C-X9ye~c-FA~0HDj_-SnjhmQ>cjR{_n#= zK_PW&K_k@iwYnbl^6uI_+5NlreN6!AF~Cwft-*!Pg7g7$&M~>FBTv4A@%SqXZ=U`X zkY%jmyOI^%&%$9bHRw>%lo>cx_GTtQ+DN`R>?Mkphd?J9Q2fIE!Yz1WcAm(CJ+t>z z0Xlp>sHHT#U*^1Qi?N7|1{y%0Az#eIZx@Q+ZE0FCx^%y(sdUcSXOVL%53+yisoDSa zyGzv+7G7q&I``H8{PnMxJu|^&$V$KX)tO~iLX@e<>26gsN&{#7>=&z7uS)tu=U;g4 zU;cdQ=nsv_ft-Rxo}lSM{q2h^^0F=zmdyW z%cb(~|M0u%)9*du8(-UV&pmef#7+|@O+5FUvtD`eg`IZXsVL*CWMgO&WQ79ENbK?e zbf*<>w#BAD`RUJ&{nD3Sc=7o=Zl7}ee|&kjU3U7=henSWIc)6Mtqwc%pgV56;TLE8 ztXyhRU7rEqp_vaZ{on&LcoY_v(S4CnoMTW9${9JsGw1OGFV;GCwnu`EeFI2NmJYg_ z3P&?%JaXrqw<5t9x65w3e)_ZfeE0h&&3jgy04jxQ9K_Hl`N)boQ@X&7db6x* zwy^TB;^M@c^*?Zg=naTB)!A!2ft6d-Fbq}=n%P3hGY`u1%9z0|gGz9g_l4DHw8paL zS*V3_7r$yBHCm+6=%(paE1kRSy7QO6@+AOf9H3vG`Lo4~7Fi}Y4c++mw@+$YxlAGE zdBVQ?PS|Ik)PEv8-UurJG-uAMx7;$tOhW|lVV^(jo8R~*C?gH)ClzoRqehRq?DGE} zw80STvVZ%Xg?HUOReY5%y)@^+haa-=e|2L2?Qd89+gFYw=NvQ_wXC<%9~yo4J$H>8 zKduN*Hgc~j3#-v7eIlTe`j?1s`j39_mp@&$`Q{(ah@B+=)2%BkddT-q`|c5=GQy!2A4?Oa5!pq=P0-m^5L(NfY*)xZl1LChWW4#0e87PMkPt z|4I8#oHTL5{uB0_xc`Jnt5$SaH5{0?)fDERMB%*P!V3!63w-Xt1FrbftG87bwakVQf)r=nk`cRiZ-J=Td(tY;pxz zxD}_jdeU@n`$r}Nr&qdpIBh7^JgNJZdp!@~tXxUd5KYlgGrf3=nA&Qu+u1axZk`o7WvWF~+VJNmfSVyAPgCMMXW*_1o1XqnQN8!EzfP$MC9G(u> z;TgI2-m~{UH$H(^7QL3)xM&$U~>#nU-I>UAWa_+R# zejtH;jsZ>e#Hy4qe{9E(9d^*65ucf^zxD>1ojdQm%li<+;ehO*$&(L0^dOO;y$Tb8 zPo4>bhYbF&OD>7BKM3K-Hd6;Exe~#o+`&xuVp$`SbHc>>LJ<2rBm@8@cieGbl-MtC zp4qqFoL`{91p!c%!FluM+NE+&zL@A=JXjeXH1_yWBT;z_fNb3{%Oq!r= zT{Nr`i78BtR!SFr?N$HkMe}aJ#KkXx{LvbL+dJ?Fo?`LTQiB2mOp?Ncc{SW^PrP>1 zBXje9!{1)vUwHHVTvws?&2;vzp%F|gDphKCi9M-E5y*V_&R*hzSQa&ViS$T{x7ZKQ z@^B0TwY+Zuh90_afhqT`?6=sUpYy^jg#3v)DzBM?btny>(TLWhk87$idzJ~PRy!Fq zV#M$>e)2OQ1DME>%dWfr`g`wvK!)13V#O(^e3u!L1un}>V^I9Wk@_oP!w$>_35`af4>goS9d15FUK+L4uP* zC<)}U?8NV!C{n*U913Ft@aa$QwbhnenXC^$QUfXJ7BDr;H)f5<6d=r= zJ-fZ51IhZ^XYYMRj~NXj*6EQHals}-0|yQK!eL)<6mlvO7>_^xn4tdAM;+y|l2e%s zvmTlCkJn#^#>NqWue$1LlKmxADmOL%=8}siOq>Yq6gpP7-+sp((Aa-+gef;o$@7Yy zR>1zKqmSZb+G^aQeJV3?@zdAUrwv)fCH+CEB+~O%*Q>~w_M}I>>ghuOWJQ`QHX0yA zI*QOhmFSs;ZNIzu$(7kKl(UX%>FHOOd~SzLN)`{m@TrZmQ5U!lXS`S53l57P(M?Jm zubV2%uq9eoJH(VzxMK2&86}{820~}0W0S%CNA%AeazSY(wdNum%-5))buXch-6+i_ zoM>=@pr-@A`OU9ie(7Z|zxs+WXpiiaQ@%Ir(TDo??|0_U|8wCx3xaGsew%UM`Odeb zrmBV6$fyYB@nZPgzx47ea$Q<}^B-@G+iKh9=8}e5g7TbmpM$tst@=D?&R4El#S{+_ zSn}@Tci&&K(U=Y2eDe(>lT76LwYKc|@sCPds!=KdG=n%1fKPnl<8xn~W1)p4B%KUn z2yo;Qq+$rVag15hfJ^3Ck3Ra)j7Oe*_W3v8dgJ}~-s@bwI`>sbevgb+wXFm)n^Xc& ztyE3qu}hpxk}bll7>=B!1XV)|oNbwu4bK!bf_bmbB@N7#YxMD*J|?_| zbmXGnBrXeJ#~pWwN2D?nxp}Yu!?SnfKC#nILoH%&SE$RnjK&$#Bg>r@(XnR9gLp@)hW zJz!A*T>~W6i5OCxiMl4%S7AWU0}#a1KJ=>X(CK1b7l2cS5JFo1xo#zJXP|h=BNMh-m)t9Z-$d4|A5aO6u8>lxpOc3<6jP% zG~vI0|6c(Qv#fmnIp+>+9UzVPT(-!0(&$(V1!PpKdE3giWZ_YzT6yEm*PUv1KA7J% z84ia3W;>~l}SGa@CT$a++6k5tC!Z~gSf)#rh%R4SEA-Wy_uqqAVT zD8L|BDp6Kq%d`W6w?bZ;R#YvG7tn?aY-x`0JJbQ>|8ovgMvXfB2!!xdT6Uz~DiH z+Ey%=3BLQTscp+w3>rKbjCrrV_S92P2_)F_q)Gda95u?D1{7za05MKLxptIry~_KI zignhqf$I4_z3oAu{@wF{o`I%7{ZU(I9{?w@M4I#g#66|V4)AcCg8}cjw36L_-#Y8T z*WTyWksjU?e5rZvyG#G^Ui+vq&1F!IObol?bMqvrV4{SiEwSjQp5Q%|svuDs>_kWw zSt@GulmH{8`64I@A+hUXCpF2P0zP`cOFA>3vGWo}ljmr4?xQ1Hn@sJqEM+d9T-g}b zYcT-@4pw*1;>3gKK_K=nH6{pRiVK@~!v6anFlpl5cTI)teD?kqKRffv%m3WDy2I!@ z9dp!CpZ@fxip|)0EyR(>5@#-CzEeW)!9%xJFlYc1 zV!l+0!__bb>z*UW@MKGXK$eKywi5{U7CM7!o*PS0D3vqkb7sdG(NRYqefBx$>145g z{ISR9&3kRjEw&<}TW-FY01pqKxvA-(gFXkM0Rvn2+I#OiZ@U9vg6~|t`i2{C`totd zQI_3t`yF|;3bRHXa@fJfG^5Z}5cc~feO6yf(mibvrAQVLMKUXE99r7rUiQ95&~Gg9 zr1DwvaN|eUR20EIF{uz|iXDRq&Uto}gHR!Ia31u)3kz;~^kvsNBzNRJ7^SS-{Qk-% zkG%BON5^bchAv-Xb{0tyg|0wc#I6~J!Kmapvlnt9+XIBK8x2o@!NqUZ+X#^$beTZM zoG@|*dGg3}${jmqLZxHN5d%jJEfXlAdQ>|YAkRE<_^?ek*+g^fg@=ojm_RI9NK;af zNGJe%^DYoDeAsYnR08MRg82&*0GMQ^B$&WVpalySI?C)!N&@_cHrmL-d3pRcTYdav zJ3jg3Y{}a} z{?Avwd>j#9cir`*Vh{$l4w!uKL2Ob;1FY>ca=pGZfO<3vcVs}NNafSkyLhRK`g_X* zinT|~scV(H_`WuU0I(AoBeHW}lLiQ~DiR&6$cZ}>QCIvmr=LHVrC`)^> z$!43JamG)6blU06A_}WHO|+L^pMBP-(IZ)>6$~z9xP&ll?UZx0*(Mvm@y0()(kEZ7 zo`1o40|&J-5Pa-N(tbD#?lK2O)ZI4UY_k~;+#f{jN~Lq!wCM*=K2V<{a1AgSmCni| z4?ilj-k1}@^T}+)KV*zdx^dZMm$!GUB0H;y;Ep)*h@buR%uP1iq}&uOX$0JG?M+7= zeT-fR5*5|YP|`{EAv8o%nyavpNIGF$K5prXIBP92hm$V}+itgA7zzW9-1G;g|KLZb zI!^eWEd~AVaL+yW2GLud89RP#lnn_-9eL!FvuDds&$#Aq*L?qbr~LJAS5_*kC8UT! zU;grO>H!D#ov?3Hb5m!B&vRz@S+iy>T)3c`^X#W*GgPRWbin5Z4IV6Gu>sWj0%|99 zy_B`0t802vwzb)LatH!M^xpTN)*kk(_zcxm+LNBwzhzagu?I>DogR=U5uXLgTaWGX zRhLyW;*1t|@C7$MK6iP>rDiAefWWQ_s#2ECeP`K=Z!W7U5OdPg?6h(yRn<-#NzYYj zCsaIFIcG`J5RPHD7`{R@C>nmiKtSc1&>*XWCC6M&x)0xj^J?b?F8|oZgPYltI1aSK z8tu`08{k@(E}~nA@@j!(t2;bI(lCAd+b3?n{YRWLL%h%)wS=wySPA4f+Upq01DDk!hQ&t`QS_=rbKXq1`quGrT@L< zxXnsU&d$z+8DTWP0u>YYP|}D2hr_Bemjg7+2uut+L^8=XpMC!M`|qE5>F@ublMQSzbm%_&eg-B3v<__j)Si2JuOscObk2J8(HpM4 z)>o?%gbJp^kNAT6T8#D7(EK{bZ_i0^j|&xA?6x#`tiD4!23r*lMrOKdrQ622M^6<* zjxMNpSFod+$GW@Uee+P#ZwVJUFg|ubrbZzEX!HD+2cEzE#WyQ01HE$qIdV*3RN*_F z%i2-C>yde@assmhd2miuMzFl1&X@+ZV-Tv0s`t?SSDY+2?P`EgLJ=AiO$wKC1zmVP zyl0GwkrUWawPWn4fg3bsnfGBwluBI!kw(79^-*e^<$)$qJSSay;x(19U@3cmO4*

V0eCqyltJjM zRK9+~xAMFiq;MiPapJ^5g9oYOlECmAerUrz_Slt;i4pLl(@tNoa3Pf0yMrem)URJZ zvOqT?9Cp|tUS%LSqH}+J-fu6yM26=a4I4UapMCZz=rie`2GqZ10((`up^aT!U)!dG zd74zSqY(Zd$xhD-*Zu73-Gs|}$BT|eAyCmPmn;hrqh8J7Sj)?o$_Wj<$*$6jA{ z*?o_;w>IUj6Dr<|bkEL#GuN5%{m;#rx1_yF4xE8pZoH*9s)D@eeGWo3M`snR$>sq? zb+Q$eK#OgIa`KbohWi3^iu!><+p%+Avfnn9eAdcsAKheNnb}L;p+wU3l8UE~MVM|T zr?#OQV1-qe8*7t~hi9T~wjckU6Tb~Au{d0rPdxF&t;Ua|s1+!E4kE19E9c3`&I!gw z8;?2W$Rj`+zwh8Goz)|bI^y^LeVOkR3zL9SJ;O_vEDL0{FDkPCe(`^L!;29J0-%{P2U1?z`{ArOQ4rMI|zQ1DXDHuDCfpSg;rj zYHb~0hz$nMU$C%}=MrH>x`_n@>g-&7)zw!e)1eA~!YKjSCsvpr5{r7gBy^bs6&)Zk zb(H3zV{f+Ed#_qEr>tk!RcQ(;tPuxEw)wQh=?-0v-s`9k2q?^ z%==UN>zw=Hk4_6vv$^)i9D6iWOLGqHsi$XyNvw8EIO2#e7_@bG2OfA}b5k=E4(^#} zo_=T1JE}~Pn|#ngE&W@x(FQJ=t_b$pm7*Bf;8qVS>0u$wi9~RXUh8>9y(-j2-Cgqp zE`HXsSPV#LNo9*5rI06IRe_q`SX#OGhQ}8B!Ok_8DJOVMtjW=YS}F2ma$)!QC@ zwVfT%5Q(? z#O=5L$WKl`W6Djp&U)nWnKK`{?%HclIQ|>s$B+HvpDqQtQ@{WHv(Gs{&r_PoXa4-> zS*Z*ohy-HxKmO4Vx88dE4^I8zU;ljNb=O>f-US!!yWjqw*=O$s^WPA3t5CZOP0F-kg8vJE20UZ4EjRn#NvFh3c?1A=+9*lN2CTAZ0k zL%uVPqfiRZj0`1W?quIqt%5TzQ9xqDV3&X<2s>=DUd_O3E>pN`PGAp421m@EJcB%U zma9v5`S4~#%P!CBHjTxzjQbn`da6}~)f_UsQy@%nMUe>7-t*uM2HidN&Y6$QYH4ZN zdhGZiLx!j@NWuFqG4+ag22)9itdc7;MvNM9^NqKBW}ki9+Ey}GO;60{%zNeRbAD|@ zkU)I~z*Le=ra$@d>F+IBbl$n=FvN|7@mp{8>tCJq-BZ7h5DyAs;PrpJasGuD5Fjx^ zL?qib5^t=kEH29|{Do@tF&Uay*S7-f;K_&FbkofiGwcBT=AsMlzVpsK_Sj>{&>_`I z{=)MwJpAy3E8E*_zjyRD3u{T}eGPVjkeoy#p&0Keo;r6An4-kaANKKm}tr;mE^}n0C*-VYqC~Y_Zv9d+fD45Ul@+ z0usM;LooYr9jzCdEHZ=~Tq+`yTjM)*rB|#e$acM{xoZx)ruQ9rCmYJmW2OYfv2&7e z%2naKXvvP8W9|gs_Vnx5KlxgvWe^;DB2VFvYBBbDP|if`%g!|~?98va|Je@pIFhG4 zR~^>@%#OV|BMBY^&qPcexnDtZ8|nZ6AOJ~3K~!G#w63bH1I+L&2UC&v z6mp~IUUDEQi!tK>GpgG#$J#V|ib|)PF)vtW%m|b_>NwZuCG1fKf*?a)TQJjyC7ZouW z_~=d_nfmv;$BY>%uNWuR4n^j}jyN+zG;c6$g99cVsI~!v+;>hn@$?`5$leXM5i)Wq z*)v76DVI~iy`X`5K|zik%%2NobcuUc5A87=XB{5E z%(}@uc<)mQ+B3@io2J}8Wv@N=(2){_y}vqN3~?|B;{-tD4me=a4L9DHH8%rfX>>)2 zX7MS1>9{Z2!htD*4xmF1Jv2o@G9Z5b3x~B19LVCI0mPxgOrQV4;p8&$7Zh+eY+tMo z>q7ClwJE8rv+lKZ-Hcg3$n?K8_p8@;x_YLunW3IWbVoAQ%tU0jib=XkV0BM>aPmt? zgUdW;zUo2Gz1MNx?K3|pw;&VIGXs^G8RWSra1$XT*IDX+>!Yu|{LZQzECyGLj84>m z2m}Jb6FRECeRb93ZL3)68x_Aqz)(GeY^bam!U7<2)-hAH>LZZ>JUd;@NWrQDW)HI@coY^Zt3^#oC1vD@ zGC12LVbu$`ci!dWPd@p?m%sAWrg95KTM4Jh;L@q%3uEKOv^+B@k?8TsSeP44<_fbEGuWO zOiq!D3Ct*!OH-!Yw9_Ych6&Ql4EF4+%z5sso^nsz(%f>{WtVTa-8P1Y1O{ak`vVp+ zKqkT)*s@YdX?K{90N(^vy%Bc?5xHD44<@VHL(@)qIeLhg*$*B%_^zpU{`ica4;r)q z2`#4xCDq*}hXlF)1N#5+muFskW%t2Cg(jfFYd&7H$FPAZJ>99a);dI-sQQ(>p}O) z7#SymC$3~o&2O)8f4TkHwhA&&na{+$#6aBD7&kOZRkWg#udaeX9=T`F>P3LcL28}q zt#-v;a{7B0REnI*v-6BfrETm6%{y+`TJ|iF|4d-#O=b?VYW+m?HA~>)*CI~^IS`mD zE+bOM3cE~?K;#5-WR)0uXSL*UQm+svLZwnA)!V8})k^2;8Y`rxd|(9nel3R|ebf_AKJkSk4q4g0DsTwESE?10lqIc#h`~8fWv((; zE7eX?XOJ8g%_ByQoId0Ji+=N)ZMWZnEK9k1J{hG_X`g-fojzm6L5Cjb!Ok(c9ORjN zM@NT1%BT)|_Ab*5Zy?XeSFT(Ya5w&Xe;*4Dz6ShwZo7cDrrG zTohS%L>ObV-IcXiI8^+s$C8Liptg?AqzApC#Opw|shj4K>y7UBq*aZ_3E~ZzV~R!n za}G@GS*`n}HgXr;`@)5Pe}S7v66e*dBPTr~xMWEZSr>~Zc9~~FHK(jx$y%2$`Okm* z%ptpcs0HlVNmLq~XJ`)oj2*$}1VYA)yXPUn^7Q2#!q`57MEG(h?PgjdEQw z1qO#x0olpfi8=Wkj;NI9)s|I@PTqU#orX3IfG5v+M!8v?%y{Hvt3aa``ieQ5NjK3r z6Mb67=SrEIH+RnJN~N0nYPHIFK79D_4}Ex(R7{}=iZpIDY((OKK?E#YzUGdS zc)qE*#lfv^U)9voyzLG<2yFtx5#D&?A4`@j>8w;lfM{-M+WsRSC33c9P>hUcKrBp<424d1xA*+2c}KSvaHa4o^q4V#ObZBD zwCJ5hixxwzRPa9EXrqmWj~vwq3pib;F>@aPz&U#6nWtBGtR`|Ub6|`ZK5XMnH?tRu zRsgTL4{x$bxm>DNy>m`ASCy?|mJMs2 z9I&!&#T#$DRq0&aS*`foH#e2H*?xyoxdg|KgMg(y$Q%*jnP;BuSiQQlv*LZ;)YP=e zhc_KIa)c%Jikd*cku_2O2Jrf4l^$`9wENW7-q{s7jr*Z(u zk-9D8?gNGyb(N~J+5XMtYO_(>X}!sma1=l{S4#|j7Bq92}MzwkBQz=6G6d1+#_sYJB2+|ty*(og>$Vu zL($)Ajt`Bxu*pm)J=M_QhGZ{LZ_+@HTFF`sMfRjEa59CIQvw(pESQ7414Iv=X*hy) zS#-Tq9hg%eKwZ1s6INIAMuRKqr<*6!>s_|mgYH-cck+R$g!Si;XFBEPUPANSC6)81 zJhnU=C}+Jh*i(r$K#w%cGFoRLn*yrDoS~Xk^Zv~*E~{L6=QB$>o!4jdq$)M!6MJ(* zbmp|EE$6H(>p1uuRkDztt5E`K#Aae9I|{(UKs6Xt%*4<(S$tWX!#h`^DwnG5qnq0H z+iv7wLPm~>a;HNOHwb~896_w^y&8(rm!(dKtetBPr`RKxQ&SMc(PBN}LJXpfj4|#@ zGC9Kw0kWOE#%tpaNY*ptgNYEN6^7S}q)_AeIa)vh_{fuFob@3RO+5XuFrt@%^aYh1FvI9aWUA+z0 zam}nS$BjlB`W#qz45v|CH{)6&Xv>tjaM*y7W$T_j-y^alEQHj*jQXxK!_341;5uU# zJshZiI?z6+{^9wqfZN-8x-SJr7<~W@U#$;J?#q%A8!le$Vjyemjd5+Ni>YP+&S%78 zn1ln9=Oyj#;#;46c^Px&;BsHe;mU-PckoW)qAd`c6hBza)gbR2QwCRQ8F2k$^KN?Z z)zvD?1=pVvHIrW4%GH$x@3nz4XpYLb=#oKeV2&-fbZs<%pHMK;y*Xk5Vq#FvXm9)U zwi|6SxT#qvJZ0X&Q(5^EIt!L{}GO{p1xu6mWeIC+Mcf-{BdD=?yFpXf26 zWIe0z#kdFony`SxmM3R@5rcg4sA*!b&`J|$*<|p926}5JzGG$?wcr!r*fxl;95mxW zImm0B&g&*K&BiK>S@qY@Lt%$ZOjHQ{O2Q2okh{#nMvIh9VV-c#f;d)ZUTpAkI)6rt zxyS|U^!P$;Nl$C@}j>#x+3{xz2#F*wewZZadWgKlb(HSQgV$gSk2YaW|- z$4d+H)`5(QWcJKK1g|f6JIhFf7Ojc^kk1H?*gLRuzS6(-lG~s7@aPRbGk$1`bBrp( zkt7lzlGMN+{6Ty5{r1(!nheJ@auI=wz#J7d<4z0+DKJ=BlVjx4cVf!me5IOov~54K z|0g#dI*?GUO7MRnq*@(zF&}ZtgzR^pmBeIQXn_-^V;x{}F#SA2gu@Fd3X{=ygNd5Z z6DUO{AaBS5wVQ6tthM6Di? z*}|BT7noCxT0J8`TuYn|B07H3&Z8DsNQaFc8GqJ}`b!{e?_X`udbg`KF-OV;S76~i zZKIF4LQPGae7Yz-AW0^EGk(z1!#AnGDeEb1;IJ})QG%lhbt`3u9hj+{9;4cy2t9*x;9vVdt)Rv1S+&ufL5%PBdmHt{d+DorE{ciC|d|r_hirOTvHbXdNvf#maY zap`UbqRh{h<41!|G-CPbo)1r2N$!(9l?Ms{~kw|m>GJz%hFxZaOJ}BSBx@w3f zF0Yp6xo!CwSI(UGLDlE3RD}bBsvhttu}1~`+6V2cxtzQXfF)pAV~m1rKjF;j518FQ z&Bh33$0hRpnVYJNypm^~A57kP%dJN=H?e2$NuyT-4TYqBVcj?uQBT0!C(=lRDQbXC zHKZ!;(f}a4c~xdnf)|ftMCfreccgT_M%nGIUp2pjJZL1Pwjxd>!(jd|hWf8nNgFA| zt5OkoA^C^K;0yaVhd~2!)dd}l$W^5M41}QhUdV=NTScbA4U1V=48FxT6Vj~6YoqRB zG=*^$uCGb3)aN)pUSDYsOV3}hCu^B?eNTH;=NcZ4?i~PRTV9ktthu`NLKk{o)}Y=5 z4N$GYwn0R1_}t}io;;Q@{xb9B8Lus`HV>ha_pZW$*r&d$=n)K>HQW9v2apGQ$DROk z9+__%@Yp-6PW$tHuP?85YF;q!h%->;Xj$%`UA(lKHG#qEeI-_TR`UWS`aoEcWZ$FT zPn8@MJe?=siPds!d9~WvzWklNw)xP`8*k9+s~M_HJ_mcJ8O>O8`YKTBQb4cQDNfM5 zK|56Ui_>5Kr@b!^l%y)}{k~Im@7#N5g_!|a23f?10)lLU8W#lj7#E_EV3df6f=2V6 zi9VlZafyjrd{N@QYobXM)I{^#zzq--Oo)hpQITb4n1Nx|d%Np=?~k)DRbACx)m`1U ze~y>#>QiU^&bOWKoFhfN3UZN(T=2TNVXK!0<7>XIKB&16S**wU2l_}=Wl>e;l~zrJ zGtV_tYzK-aJuPp9JVcUGT-=r-IV$Q*HA~mb+~T;Vf9w@matklt1}#DiH!t;urQPVJ zLm)RwDv^u3qguzAm3N`bQ^gOmt}?%v(Kdyx|(4rE921O%%lhIaoDcr#c1Dh`L?9C#Nx zoGk$mEZu+l;R{bW_5u4RJS8cem1lXkfHexuB?v%hd6L61k|W4WTcw`{ z#b7ESX6|(`Dx=7Zr9=5SnF7#a3vb)1w>KyF=K ziKdymPGUquDZjhPx`3SmxE>6pjFm4!OIbtmJzsEMjyI@(rdCppxtc@itko+q`?E=q z<<1bS50#PCc-hTB_aK4Ty``M@C~W{4m5fP-z~RQtd;adL|FMm&0co5?ui1oNw}+eWXr^olEP+PN2I5IJN^OF%bo-m?$q z#m%t{+^uSs^5XnQDEgj<#%P2+E$%*cW%8nv|Hlz2OF3~u;EZu#T<$iO z86gJ*3JAAx(sS7X*{p&{bul7GlxH#;t^f|7L2*<7sdyc&@_HRrSR0YjI@Hn@x=v>p z4-7iwMk-vj_m&yMEXkht!Y%1-PYxTP<||k$0ssie2oo~3vc-V+!FyorE6=bA{`J(s z9UaC`Bcknd#6}6kbahYiXqh1@KM{8D|U=*t>KP-+R$-pZJ2uAGwaYn`!=SSN!Ux z#e^lCQOYO*7iUQw03x&X#i@^3;Nrpq0DzPL!E9&Wk#n6Fo_hFk`>jZky2KHX zQ6@GNk#ZRk$b-7nlA}X#%}}46s~4YZaX-2_`H}QzoLQGM;t1oh-}d zX6Ft(@PG#$ee}^sKZvnJnhqeFWY`;dRPDA*hx?%8NYZRknRuWwF1^4RSOL@kX% z0P=nBHFQ^@+fz(_rF0eT1PEM33_%>2?lF z_rCayBObPXc8+HqATA<2^^umOS3|_sF+#m()bGw3p}D8^$k_v|q@LO?nk{8(*R1V! z7p1DALSs>R_2eQvgF_BFL0%Ng%3aJL=vYj5NIi2mFiT$QtOoCNG&U%l{i9d zE@vexF{rv);i7sWf|Rg9I}58$NEWx(nF|gr)f)94$t!B4?2-Bdcs}=B7Gl{h@#pb| z4}h`tD&(OqzTapm#R7oz6buDG@pz49C`)K1)+7`c6`|aRg9wZhCq%|FfGd8vpUnd$S&d0>H5VP(M%tAOT#ndhMRQ zdpwS5n@2i~fpfwHe&?CLbLo38-LQTGm{1X`3@JA_S%)GhLs;KD2q3#bOIUN+i}EFU z9V2$np&C2cn|9;WY2I>zp|#*n`gj5@+U0YS`xr=@Ao2~h2Fyt)j-Xl&_F7jDt5%G%)?D*TatrG_uii-rK%54n1OPZ}HbN}zRR7i9OKY2tcuGvDV}fTV zfcWzrORxFItFGR>h%>VQiCBkaB;qK*k1W?yN=M-+@xbM&@g(=(s-%iZtGG%L|1Z{P zGoGaE;7tb|e#l{SE9Ns;L`E5884#g7%^5+RHILG53aDHIQt?MX$mCw4j97;d)6Q&X z>-LpPn_vBm(@s5T%^c4NC`04`NEs9TLO}XR9FV6V03%ZD!|BjkM$5^})tvn^6%1q7 zTP@bES+{4;9x;$}^Yh3O1mrv;LgKu$w8$wFER&QreAE#~ed~&EA9V0RVu4uc_L448 zGI|upff0%jSPsC%ekjYX67Ptuzp92tt!HN2v2^{3Z{n~?l3Q7|e@W(}caY0^nVjX; zWTq%gT05Y22oivzF#<%j?u2q&RkbwpFP^3LctHMGSDNZxp5uio21<=hw6*Pet${Mr zS32cn@jLe}SvwHbT%gc5FtQ;oC|Jz|P)tPpAp~H+h;u;9l8i%oUzfe>8#jFH2e;td z1`Z5>5p{vM+MKDhZS9ZOdZ7wD2ar|)M-BiYa0a!P2;`BYL>VW}5I}4tlb6bJqAu@t zXOg*%2W&Xv$b(nR&t#A>!PLTI%jz+J5tG>-KGZ z!^KZK@u1an8BZbW5+^nbgviBt^r8YHYKF9%oNRT}^k?mGw0M9}00b5QvXu*~mlhWp zAe&8Zy7}j8*RBQ>jw{Tv?#`V%@4EBOtFFH0;~)L#&wh3jQ6{o^>|-DM^{;;&F;UW3 zJGzzd6ielZoQMPGz!@e|0Dzo~MB-c|QtQd`u~N{OT7(szL9#Y)gO05=5on|?9oGt; zj&6~TFNd33W`$tpEz0SKLM zAPbQ%7!u(pn6(M8JoYuuN#_@CHI)MB=(T4h)bm+s%^8f4c2||Ks=e z&8|UUgpA0Fzf-f%Du(nQ^Al-H+jt5j-`4#}00Mg~})ZAXqEGj|OjL7ZE$GV693 zA|`C>wtH4C%*)C8(DyB>hNL1Q5l6o^rphFe*406-KQk zn|>m-bCKzqAW){+gSf>((40-ay@$a{IXJJ;nrzEa$xsO=LDLPa%Y9X0`U)LJM*)GY zE2)DFl}hb$$DyGYamjr{{DR1*+s)4Xz4Pwga%VT|KK-drd)@0_2LLzz z4J)%UKx`R6z9+`Vh<+BJtA ze%Rw4_t>+~e&Xsit4UqyOB{Z2+b{m|&3}<~y4?<6u_FEB*ZlGE$2|<0v_T07IUq4) z@OPJ9`n7-m+JPGm*te%c1dL@%oyAU3}q2%vTltEgq-NSugw0bq7EotvLqSeToipP!qZPiGUB5D{?B zyFA;sxVUfc-o>4Jx_dfHi(SkX`QCe;a>`M!`@JU~wcpGv5Qhvz(H!;dVx(3$m2&|I zt{^7Hqe#OOq^pTHzm{S*kk|h1nkAD2n zg=5$3Axd{rBk_iVJ&yXx;k#{^(VI^y>flKj&AhP)DfK#TQ@v z!S{baoLYRs2`65C_0`EtqT`7fXM}Y5m%sG8=bS^F6X8k6pZKLOec{j}4i{}-wQAL` zZ~x88Rjchjmsdg%AR+H`A9VDC?%Z-`mhq#GI^yP=Z_YRpAm{30Vl})F%+Hv{>v=g^ zF{4|L;n+8I4NA4?8*>5KHF0tq8M$aXAQ%c%&k=;J&*e!yLLhVrc0lspZ{7Ck@7=aL zT@Yb2NSQ<}*xmpDAOJ~3K~&neFYm*$xBlq$S6Q%R)#_*4CNOElE7FJ;DUcArREFreg+4G!79`mwiKjN^J z$t-aK=l}=C#C?!VEsVM@04wGfHgDOyYGHvPcDvof4&8M3mMsYQU;LaP)6A2}Erc9YcRl3T<1WAK z@*@vFTpSld#2@>ZCw%YwS0VslddbC?yzAZX0JCb0x7>Qm>5n{P$G!Iv(V8{uuD

?UTmZ&EIltWYq+&FMlMaA5F+j#MT3TF}?tRHqPJ7|w zPuPDBQgJIPGL8rYj7VMy0Gvx#HYdyg0Ei(W$4vB+zwy-cXT1ot35&t0FOC2}1V%7F zzoy$=M8KJunY%XMy?XU3laE^GQ33!7_K09-M@@S8hs-*4?&hM0A`haYzM z9e3ZwfFULsbpbL&wy>~p-MdJ77|Dfe!v4B za@=tXb1N73?fJ=1Zn$¬pHHI_g1JU3Ja=>-QIcl+ZW{g|6?xtViJ)0r=S<;zbv>r94t)px%0n8!bk0pma?p7`)gOaA$K(L2{FdG87m?$6S%iq0IQvfOhTLwR7kFLWrzcy?W)Ul_=Nu zDI>0|C=P%J9dzhrm;LLOt@m7i!}Z_4>ib6@eG~#@ls)%(&)c$nv&is(4F`Pg^Z$Iy zFK_#|uYcuVzVx}T{QFma{@=I0=WpM;di6SStHf=${o*ArxsbTHy#+TNvFVea{sgia zK>#ni=%Q`g?#(!5oX$J<1wX&_=R}A&^Ts#+`I%>(NsIx)BY*2Lk2>SGM88~r!}Zr+ ze?3E{4v3ZQM4;dN@;CqbZ(kK(FTCKw6>|%aA0dKMRzF2~i1|YV`aXb2qT09wTK~f5 zJ>G2lkqye3L8eWsoCa>ErM zs6Azx)61iQ)UAv`JymOm=YMkNR-P^?HiIcfs{c403=-bfJBTF5hIR_h{58% zxxL$;c+AF2U-hh~pM2O#B%ln5C!7*_ZkkvLH0Oqj`s+0SdFLT9b&6eXZ`X~yT(w9D z5}@;(b8*HckPOH*L?Kr2FaPqF^uaPm9DevrB96uZPFcn?twU_su<_ehT=BH0J$ZJ; ztUhCifWG*}%YOE=pNdp#*RK2472kU1bACIWO@+MkzWdG#FT4Z@-tmrie&@<7I0piD_LHCVCx7x9R13s_ z@V{RDKLO<4!w_l&4m}e)ZE`-y2*Lqo9a`UFlH!V zlC@jr%H#%V0qB)AuWGtW5Y*aOqKrQMii z86X2#k@1u=!mOKQU4}?Aj1t0(3qOgOgW`+cWd?FtL7Xqgz>JEOR=lq620>}j3sV6Z zlNY*@IB{A;1~R!~&6zsyB}3qd9FbVkr5S>hb#@<;-uud@pYY}vpMA{6`IHmqoG>HE zKo}_`-2O;LLsW86-1)9{vgxxFLNvoGLgS76-~Dg)F<}TI6iQD4wc|nzO@es;{rA1~ z&2I$)gaiQ2dCs%7Nk&BCqIHqiU%mCOjy&=Rc{!|95|9A@;U7P*$Lq~+e$z3>JlL)| ziqrNGPCDtti(hsz$ZD|1&_>On{%_C1g1Azf$U-|Mszv|Wh>m6@@M>op= z;eii4^phX`P?Ade5E}!Y`i!R?dho%bRiFRd=kC9MhoBs>RD*!~_AP$sBOe0f0QkJ; zKKI~*57yTOJHY72;M){qfGD?X%2e0%!6ffzs8_nI!t<_c!zi${{FrfPwF zBpkrzC(W=BYT53lE#>4>` zVxSl>2ABYfjeJ*Japmc!KI&&T-vq!3_J~va^!b1K z{FZw*BOo%o=%NelnQ5sg4vIwU6m3V8Pc~%4DcezSy4{Oi;&N|_IV&ZO0jR1tXcr?$7~=@{b9UK}e*Lb0y=E(0+huDRFhJ_U60i=+@S9nL6rgJzCTVjJ z)Fn$#gjjWx+o`3lNuUmsF!DfeMwQW^YW}OhQ$lI1V3#06f($rEWZiUzFj?GjP`dBj zC!Bch<4-(z<&3xt5)r#3&z4~%ud64slT?TSl21h~QCtY%Nnf(R3;GjVQ~q#Xb9OzvHg&fBy$R`}xl@-r>{*0L1jtOaK0Y zm%bPQ07xj&58QOvJ=^X9U`HMG;2UrHk0g<{n{pMh8~^LSPCVgZ!tZkGDX0A4>hFsv zf3P+}N*W;Sbos#t9dhrz+W?RO+_m|x{Wt8d?F}IM&<8&HvWqX4#6uhqKk}guz3@dZ z1nK;dfd`@vTefc5wCRD}P8WcmarzloUU?-ZjKqF5q(?mBjO(uZApkt;QIG!i72j3| z)v9PS>!=G&m@cLQVBDZ4LA^VHDfX1W5J)P{oMC7wJVn$~m62NtUjze_seSk&f;=n* zK{$sv=L~5-$j&|Ku%i#y@2y|_&h^{3cGLYiFh~Ii5E&8YNDSne$zrzKLtu$ZLd6(} z(2|u1%92w08G;VJ)SmF${;Ap>CYKuXSLJA5|VGoS5#(g_cI(GyR8*rDqe z7$@LMkco||D0Vr6A{2dcoH^t{y0Xyz;tZU_{VAlHp&~}IT2yd&-JiZzZ1fe2Rg!_C z)boKM5MsjK@WwY@@RC17J$k~*V$vPAdethXxUh(wM*u|FvSqVYz8>&^2bhcU0IdC4 z(&n2hv`OIek@T#k?y6)N^opRbKm>?2ed+oJ9`q8z5Krg!JB5v(9R~=;f zZp!If3{BhAo3ENAd6TlAp)Q`Ol+dd&!e7IrGSM*=ELf5eMY6*j0Ft1{Isa>*b&bypv#O0yZ!n)za zIT2ujjGgg>Cw%Rj-+2B1dA%?oK%N-^ObJQ=n4Ou?t69=PhX?@OPFIunzQsi_V@Wmv z=!$0<*8)Rs;}9Mq4uJ60uYP0mmaPO__)Zu1Ex!C^F9&^5qi(#A*BoB<(u?$L|KJBc zpkoo%Ra1%t-A0BHO@6LJq_M8A8f|5|$mj0xy32Lr*(;)1_a%?y6ty z?4&D+B>)6jmod`IAyUJ)JwK)_RNe#00_5>7Qg(<gk2&s`V~>B%bAIQc$30XE46$6S z&hlZ5p_1QRM8+DKCl6CNVB)UW-eFpEhDaHxj3O||W{*zw$lz9XIYN;)i zet&xpu%Ce^AN1EUbt0Kq`$BpeLxmZIMRTFEOYKm_;)ruTS;Dy3D+C!PjNxj|AA88k zBo`AEpLXBO1y4#@OE zEGefF0ug3_DG_5N_FQy1b_o*9*b3g6U)*ur1J*qEtVciQA)5|amChg$5l19|1Xbw1 zxp@!()cMe00%ojEAx%$;`N7x88E=_ItLiU%y@qnu?fG zJG214edQIq_v{d7em?Xe$IZGUY`=V!#=4#sHYSHWGNk(gYLWgq*Xb4%>JC zy2bm>Jaqm|=RW@Zulk+mpM3a{tJn+(X9ofilM$ujXnHPocyWYGTm~tRngF#5th)zw z0ISve?LY|{6O3LP!m}$lYDVB(G#sP8M}$0zxtFw) zA72Xq0Qj0edCi}{>5XrG^Irlc0C>+mTVMK;m&vN=bNM8p(giQQaDHJyoD}k@kAG@u zY3aRx_kRHqS@O(hKJ(B`n?#T3m;?X-L~=WF%KrO8|GcP5{v}I6d6xvJzq$X)lv}E1 z^0PV%$VCOWk4=8sZDPq2+vpVzmA)*1urQ}YsVIv_sOBvum>>rr4oDn10%OPsGolX1 zWD#fY-c8^5@txoJ;jOpr=x*oOolCpGhyw~foq*U?$~Z7!NRV+(Kp7^)7{@MQ1|%*B zK;XzIO+*L?a!wqGL57SoVAO#OS;9I?yt8|L-_9czRy_LnBhNnN!6zKKe)Sy8A*4J* z01lYR)2an$=enU%rw7hO%#rp}n_y2e%@3?ftTd{Pzz+lxsy5iVKOkeUa^>2k#l-|M zVX*n0tt(fp63hD#GgCEhRGCP3T(_GYa>$|E?zsn9@~~qcdhL&|LvdTEIzB|uu=__ULcKJ|zLAFw)|lSfnlah^z9o@uj; zBrBJP ziq2)2KIT!6d-=sLKjpMj<`?F7@7i_kwb#ApZ{PF7>#pL`He(xvVdeW)e?%PJd6P8~0c(D?+231h9Fp^KNmi z3d+q*pbs}zS3>6L@x7tn#Ndf~?r5lkB3}S_<)J*x4&?ze0F)4M0L~HuN{LdjoeIRh zE9`K#br-+sj{AOa^RKS|#pXNiU);L4voD#0WHw7!hQtsOWSj`(i8~33MU;#K0dbxI zWk3MLI8QiFI=fauXWe|de&x)uhaULI+-C+P_5w>Tpwwhvrndk_Coo2UZvg->T(xp-XK9fEqzt!iyJv1;L2*l0 z3d9~r0f5+D$`0Lh$hPg<7$zs3aMBNc_B8G|BXE8gw zB1zM3x4XEs2Ccih0i>nw($k;vjBkGP>!Na({{8zdc*%uau2BFI zees{a^t|Ukm#9O4#~*+E_rLeux%rjA#Ic3?8ew?UqaSn4)jzQMgZ;y2Kl`k6o(+om zTk96;kJaYbX3chT|2T&m8>kqNa*5T4lFh9uFG^+PL#ZFe2(a!1wmG?23hjLiM9BSn zrW_P*My8vMW(CH+2IC+Bd4o3cj1gzVh!QRj;Q;`E42U5DC)h`@Z4Yg^Z{KZq@BFVj zw*PAL_N{xCHt)E9Pq*9QtkWfuE3;x>XvQc5;t9emOJ--&8Q|+?lY{qLICj$k#~gCN zVF#^Qziw{z3O2_e<&;o{>a-xOk_u(t-0>|=Dr!9L+@ z!8KRuSIM}`M46an-M78@uiyUHZ|iir8Sg5;n^;380$@q9-@0{gd+T4n==?trm%?r>w;&Ke@~me*^K<|7IYMK2As`Wd>B}zq_(wm+c^3em@!QY%r+@w@WQ>5A zUZ6($#K%8%!37tPyy%#%-+#lczqoaNVF8S$X0_Hj!IotGqJuRAK3iT>juK#bt#%L; z(|#59jOE=JRoQ=y=rPwlVsQmP=u;`os*lCVy(Kr*%oQHVQG?O}ifm~qA# zA){0^k;_xqpi7(*mU3bMEaS)tI)r;T?AX)YvSaVodw1{LvxqF+-N_cai=24ElKI)0 z*_mYJOuBAicE44#YgVpUw<=voaRovOK%6phI4BV#Wa7wZIe7trNS|aag_YWop+o#d z`#1x`_o0|mSLgdffbaRSKWnQE5Z0~TZ*kuuLr59kxq0)-RV(v2h_hu6t^Xo$o*lO7 zfm=6kVM%hr!yb0cwb#gZbdSnj7qO=d32wRhmUq1C-CzIOSMR**u8emH7-MP5(j$*N z>bw`6cfm`}U%zpGM9g>w954Tu%bx%H&qHK@aKHfvUVF_o2ORVO>1`yQ2>y<=ckkX) zPC4yndEOkn@eOZy;~W2sD}g4rBXqNa4?1-F_N`)9+eH_>?A`zO-GXj?3&3u)^3&!Y z*U~m!dkE~lA!z?h>H7DfN7bWxRTThw5!ln_OYx69Ip4T!k7ccm02L!C=C#KBLVGj+Ku$QVonNR9Ji;9$%>z*m$4u||^;|s=WYIE|5deUU-SL~>AP{4Wce4Wz zIYex%^_pWHgkTlm*cEX1-FG7*@oYAoS%1JrrT_azgFLp0}ae}8Dh|PCx21G(WH#fIxVHHbL(lKYEu>^p?0Cw-*ySTIu zfEJhb&9HR+hK=gba1crhGC1S(M_v2Fs}X>) z@{9m6!3@lWJ2F%e0gxjX%w3HK5fXAHS9(c?p<4P906dvgcR&s z*~AUNWdh#O`oG9Ut(t(fyUcqGf03Dsewg# z$E2D-D4nS!%ug=1kZ5@afUtf0_QM}|#Gc(d0pP4N&;IKF`x=SM=uL0f%K&n=YA0>8 z7J%u`0oU+G?iJ~~+#wn+H565j0VtNA<`&pHpQxuVzxtLBw?^1L08ktg5YMeV)eq&Ny817L{a1RLh= z^bm!~oxDoZL#1XB=@b2B1G%yo#5zGPK{uuK*JGR-5ZNnCzOEs-J{ZGWz7xsE$^n(8 zY**ASC02cOUy+3x7aQt*l7L();!N!;6X)@X-6u+_Bj@T=R*)m8B)%XPA`%M#s%@O! z@VYnd*}Y3t;FYg>B@t;6A8OvHl^eoB-5=_hZ0LS2nOiv3ElrH_h;(rBDkgtt0u~k* z<`X_?ryWp|0Z1Yu8F@x^>H8(Pzxa%`@hXo(LEL*~xIb8IL4F{a2uVbDB63986#)9I zFc51ATo?_A1PsLCJ7nFrCDwD*;IA| z;8a>x*@S7I4BxA&eb12^CUE6$)QiBbl@}SU$kRTDtpSUQoDYPQ=x1p3dD1E#$|03M zDb6NV4Wx#SB+v2D6}HtFt7xY^} zSIwvB?HVnWoL|b;U(IrRipTSBp%GAp6A%zUs$xP(ZA6Z?3hR;5s^I1zE6Oq;5TW*R ztyD~H)jvWncZ7g^+nfb0)Jl8CbE&b5q$sn{xj4G3AS2^T5Z6a>)Fs>dcI_7Uy@S@@ zFj=##KT?qzvXSFKo){qaWFYxW{@1TZ$QSLL0D!!O5^Fj;nW07Sj&c|it?pRJ?b_;* z%1PeB#u*SG6F0IU01~_F&fi>j?R5Y^Kub%DOPxinPck<9cYpWZlTSOD2)VN}6TJGV z)o=G;xu#$#9yo~Xw`ZT)Q}wy$Oj%ZyJAj+gsqsHd)yPw z{>Hby_2QTOAra{Nf?^6Q8SXzM$OE>4DBpKqE*}Aro6)K3;wS3d6J>*`X>DtK<=|)m zVCqR|{@TyY-#RdyfwkHw!$5O_5qsv*76i2+d2E}O17_t^Z@`jV+-RL>sNA*#QOyFu z(NVAS3i*KCykbO$WCk*3&peA)*{gpcK@F?tG5k`fcUQ{P(sTlcO_(Hr&8CZraUT?P zYlPKCvqkS$o0%C|g^|7kdHw@2WIK26y6v`Gx8Hm3+O=zsIO2%?_TNvtk@PwS1)c3U ze24%G7J=I}J>3JrnyFi;=$olrp0T+WKwx-A@)@SiUJ9k9kqAxj_W_iBy;6R};PAXwz}F-O)FFzC$XBL%7HSR~ zQ7>3*G2g-{m-&!TMvHPdS_%L%WL};@oJ&1rg46P=)oc$H=dL$|saS0S6Zx+)u<9t1 z5pX@Hok$>Y;-}g=3IqgPa~zR%&zd^@yb;({Z8phnGfm2cK^sMT*=py`xpK`0-!Yp008a^aKE&KDrYBX#b(k4 zwz!u#=V9LZf&X4jy*(_cE!ntIs8atEPx^#(xpu7`MemEKi45bWqEQU+Sr1^2OYq2lS^8bawo9YRsQr*+adeg z7E;_3k!qDpTfH7y8BxI$YEPvl4AcZfD+K_czI76S7>HG1ax8?3+#m7{%Tdo^yIl}- z2MsR@r2^W}gXWZ7MG$Heu$=|~%ob!&h6}hxp$d_zBrIDpF0LkCWC2u%H_Yh%;2BG@ zB9OKmm8KIvim&QFeCwh`j~O= zO30GJxqLONON*K&WluiUyiWC>RKx>9?($`R)$uCbEEc0st(6EUzsSBi$$5V>505nsc;PWg1(XG^}9$3W`hBhlt)MFQg{!`7j5qWNwX`%D6Oa z($JbqsGSvae`rHX3SnEJ)#YPTp!&;j4Z2=M_u<zE-7x^jC7!XJvQ?0$XH5?3YFOWVJmQ;h{@ZG}(LTPWjMnGI znv+YMuHQ-2C(i9+z992fZ53(^g}SIx{7ou?)LNt|DXIHt)rcv$Zx?U$-DL{2K8yuN z8UcJ3SQGh-MUU&flF=yPJ~ZgcS+SjV; z@ceUmWI5&j29q%C!vO`Kd(u&a35^3{tEL54DiA_{TZslmY~|U=su|Y=&Hgaf6DLQ5&iSQ{n3AXp#3cZ6v5?s2nyS-5m7A>4dKgYcrx*t6 z@^(#$q5>w!<2){n7?*lB4bPj8QVK9}e_es&Zi>moGKyer2`0CVi`bg%UrJ0Vv0EO=ppBM?iSHD(N&#XtxfGJ4c!6q8e6poLNtcy zK}5jtE>r9KG(cujpf}}9p=#0d8X z^=7LDfZWjsZZ%t7<&gsYa|)w70H>H##Q9f$k^bm@)xS<6MTzZGz- zzVyOR(5~Yq;CMlYj3Jck2lSiIcC0^H`v82jjdEgJhMJM+ zZB(y}0Ly1&`vAr0UyWB?ui-nkx^A<|DgV({xj|5Rv{;%x86}A>%-7EL*r$MD5fFPvpkP&h~)*a zmULNkKXEq@KtJ!_@bt=ntbdf2$Rn zW3S}p+#vNi(J)S-HNp=~SAL7p9lc4?1tt6_;zls3J;)1#38LphkC4CWNLB*04ziPQ z<-t@8R6uAs1y9hD4b#qTBSt1_pvj7nKucxR3PArhs+f{pbIwHfsR4n(0=A(c@U|;j7b|{ibi3qiNXRQblK)ViC zFkwG=oj3JS)CbQMBqOh&at}`jq;XVoeGa@};7z^>7~df`!3kd+diPoET`mG6nneIR z*td0Xf>ojw6wWNtA18r2hmR-<9I0HZVnFm7dTyN@yX++TaRzeG;_kNFk-sP-ujsfv)8 z``6{G_I{gOj^hrkZ#n}yfafQCwNV*8Dg~gQa?8D-7h4&Q)s$mXa{)L#ps zBSItfa~3X5;wdYsggPyNK2Sh*({Zcxt{~#1@}oJw+Tt)-#s@2JgRRwZmrO8tb{trSh!Y^z*N$RXW}`EHAT-0Y6pTr3j9EE z@}07jwnI6uaoC4vO%$fxRu$N)(3cwEE$gD;eF&u20HWby@~dzX4Rh_k9JHEmwaEGF zrZbpw|5ihdoE@z;-eZQ7q)|>ZCV=%v9U}(z)sH0B0OZ`#9`is1lJm4>Q&BHkVDLAd z-ulp*M%0E$Xq4LkZGDa@1J$t~Q?wjXNTc<2r1j|&JuDd1`aS`OC?J4B3!w1n=wV0K zI*rfqfYuuw(JHich^oN0=Dd#{jgHCq>Gi7dsq!*~;3Ur1aYPR;V1eXjLXC!E|1$!Pd(Aw0#LID z0mW&c;tP}^*0z#!Ui5nOIpseeQ?ze^rY4A^6SZFt<&*YCB}5_O(?(s3DQx5ohp+=C zey~&%Q*$$NsnzfC@0fAX9H#&{#JJbsOxy8p)qlRZ#@mY-I6)y9;jrx*%sjcn{FPENM5tM?R>3)eT!$`NODA$9yr z@hQd-%Jn~vm~#FpS_K3^xwlfxC{}J}h5Pa+4inm}iN#pRg_c)uie(7LoJLlVG8Pj% zxfDIA%3AxmWD#r{jO5$kw!EHGEFU0<(;dWJzzo15P?uiGeaZRFe?B_Yr3sEV zqs+m$x5%7L8)Kql!Bxkil2a@bkhP(nfB;hA4U)$|BOw-_q1^M7wV|V~TC}4zH6v#? z^w@x`gPx|CVv1T|c7myW0Ay|i%bRMbv@m zn(}+rv;f*}UtI|Volp?EQ@*iM_oqld4`G|}c#lXvq87l|Q{cFuf_p?DmAqeE6A(>{ zNRgjLB+&DU^`86j;|c_=Zmn?WmG=zi#x)7W4-9oh>5aRLNUYFz)68!%(drZ$bL ztZhM++Em*q>V=Hq34MKN{@aHIKuv_K5MU5!7qvX{JeOf5TMMrVqwvK}Q3?n^kZQhP zU^%pD>;hqgQxaR2AT{)I*e$*<{!=otIWKCZ7Cu`*o1yUPk=Rny+Kjx~wOnp3KcHL# zz&>KFP}<yIMXe~OmCD8+eyYbhf5T7HF~KMNrDt$@fy58zPtx(|-_RI)ua zX$n65=W%xgV$zJ(w(Zz4QUd?B*eR9+nx<9HFAvt&^5ynX!@PEd&nf5m$Pty1qphS} zpSE>Z^9c6%nqTA^07Cvvkd)(sY!TXAw<(&z5Io?v46sWnF9sUbOze$sEt~=E(aPFW zli?xae~dUs5f~8>5%+zIPNK-eDH=!p(TFvmJvFM;*^>)DK-1cGAk?aY4ynCve&i?u zAP~050xqE05!Nv;(5pTDHWWm{eF#zjb=X7H<~46OShKN2(V5=R5k38|kY^S+4iLJwBFJp&+(B-|Nv&DPyG~3;ltJQso}XbpjDlz6)s?quKFp zer8wAwOBtGKAN6yVgCL!$PXthi02symea>K=SR(cLa{gM8c7sXqsAJxq_GYnnn=jW z)s0|8>Oh5PQ^rle4FSzX6U$L(nAcX-pxeXx1%^YCxlA^{cx+BsTG6h;KpeSywrx}~ zDvS$*G)lfWF&|UG`3shL(fRLKv~nmgap<+@Y#(jZWQR8w0zU){r#|F?HHX9M#-0>mG&0lQwLBKG77s13!r)RzYnxc0T@|* z@`_Z?8yvc(!=9=m+Ss;{O5+I6dTMdw$<|ZMrSK)a4lSd<5n@Q)&{&7}w+7H2jHEuk z5fD0s(3sgx4P7&KXMc!A7PgL5hJZFUxi~;gj1w^v--dacO4``CNe7t^?K1|JJt#I+ zECmYPApN?^G)1=tVFTM#yD6eTsm0i90A#Hx$!~-*J`|^8Iuz*Mx93%3>fYFBYV8Si zR8_!Y?r!yLLl!_A`az&I6`7(GjQD57n48ej4UY=uWVni8V@ADnHTuZrT zM%J=$!@7U|oqbJV`9^k5GRV@R<$VBhAFvn38mG2~X(2SPC2~})YVyd-WaxBFno|gE zE>GM|4Og%fe`sBAjH)_AF1Ka9K1Dg?s|mu*j4}}sF!7uR4ro#~!suc4sRi_Cc?inE zcD0Oxy3N%EWOfonYBHU9Ajb$>f8@RNO-o~(E_NwfwL|B+ir|%q$+_5ym@1bNovdTLXOb3&-#&{k(p2* zmx3|A5HLcb$WgK?X((C&AJrvdc1Z11y($G#cG476OffLfQC3o=yr8o*&_d)@jG&yy zTsf3czv6r!25gyXK2lR>3QY*w6pP#wZJt5P7L#Y34F=oapQ>Fg(ZkJKM$QAp%;-j| zlN5)%x~Q!$88qT9xR>wg;fzU(M!sS_>J41!BN{Cl=r!c}zyMsYMMGZ)(E2b)yTWm_ zBV`8fF|;ymST99=H4H&2j{8SIqTqgs$QIy{QpeyWlP%>&u>fFt*yS1JHQeFUx?L=w z3KOlj<|XY*vgA4iKh<2HD(GXrw&a}AT2Q0sFfUM}uC5Z4hN}OY909SxOIiRz8W0h= z{K>?IS`{3BjMU9laHd9kEPzqj%SG+d3sgfNJgnq>%3q_ZtJbBRoz-4(Fc~v_SPN`* zk)-V*XqNj6T}6r+X#vR7AH1Z#DLVm_5uV(wo=Y^=M)D}qB~PErqD6SE$8TmGiS$?N!q!MZE^F zERbimH7|SpaVaiooQI>IZY+2q=296__ta6$Ff1xJcd1KGhQ>Ak>^O_|Lo48ej+?J( zhQHhDL8=2ljn9AChTJtuC%SLPff@%w{mf@ny;g@FJ?x4XtVb_1^x}l=Leu=gT3WVm zoMM0|41iwpa?&XY8xG8mX09KO5vBG-{X{$}+M^nb(Zqe&OE69K8Xerig^$V6+jO6@{j@xndx2RHv>CNn;yYmnE{-fzYH`{p?TpyMiy z=I^f$=dJpbK}~A_!&t7iJqR&{KQuI(u2maISGITL^%yY%nm|+`KB?jU;C`5*A;cCn z%O)NR^yhmeaQM4}P^%TM#1a#@!oe1>qfkwUQJNL4RD6{Ca)V6*<2aQX0+IH2q#)0M zvyOsTz9dv%)Q6W@AHIY(C{X>OKl;&{ek3c3%ZA)83N3)}^CZDO$ScqH6<`PirfXHe zg2$K6#{ssF*^mMvsykp9$$P)-qadP0vmC?%I{)o&O;~84c{E)tM@j>e zv0%6+-ju4#0;pxb@!0lrT^2ypCJ5j1*9WALu*}`=RmKT;5`v zqFHz|(z@#%Q|A|gqT>pd6;S<02F=FSC!;8c_E{zqpl=HRgC_vSsZBajv1fOf3!N>1 zz2;$>tNcIYa(ojNle2x;)>@= zxnT%y*pHUPzNT-dZxe|QUYBBtCbWLf+n#6l*uOT{Nulq}u3kHaWw}G$4(12|1f(Jh zpp^^6eXOq*3f;)k^-&ogH|kOM%ebQweskCsHPYxR8U3gQbbR<8gZ*ovwEH*?etv&5$|)Mj^lf@ z$>?smv!YEs+=uupKO#mm4Sfg7_Hv5OvBqpOqDZn1fTHm=wR0Xub?pKL-0te08keT4 z@+s{neRq=1Ek^_gRZ^8~6HzyUaq&SS06|Ki4vFYJIPFvaC2&x6CNk4<8mmtv4vQ;R z_iSIS{bZbUqKebEd|KN#9sNr|cG1iocGipsx?p_KrbAa?=G8VgtMGGAi9v2R1}Vu( zYZG37wwiB~1*LdWuvLD!IuX;1%*py~Y%f{jTi*y~X`;D&zKx9mfI5_maL)Pk>z9(p zUV2&p&achiPf)3b6sPA>ulHRaD7FA96-vsu)eNp&T`LOzm&XJ3NXHwL3Ayuwxd0pfSeklUMX;SK>(FBB-uD@oCR(klVgXbz zR`T_0=Wgd-@ngkRqu^^b^E;Ncrq~+DP4{z7DbXk8+uKrL(3o(3x#JX{N{yl%2UjaH z)}6otMWohKIUErNbxp;Z6U}pwnS&l9px|#4$g_>|nrtZnd9Mu4K8orypx>svK+dmH zapC?rT5mb_oIA2%mzF2m%G^I%zv|@{=bxtswo&J;3lw|~E%*|a--w}9P67aRAAk%` zDN7tJYgP?)iD}B7w`V|%u)`unW8`PMv2jYMY25SF;emAj`l?i1VzCYz>(^@eR83q} zJk-;l`IcgFRVNQ|l|wMpmhnjiL;!Id1d00q0Mv!Rbh;!(h6CcQ!OpBG4ntrK>Y z5%z%8Rn@;TuShi35fSILt2;LhPxRRTqA8;}#9WCz@@cwtO-xg1DdrGQbLEZ^M9++1 zQ*EhEw-qaWBN!3@k>#i!@9{(ZC*PeNgYDq1dH68!57_l-h*nfT7-cPbPLbvQFq&*w zN&Ru~5b~gWV*$8D_u~+%@&c;#;$|d|G(sh}zygRaJrvZxt|?Y*1k^cFRxuPM5e;%R7HOb zw`#lc#7w@PP^}<82e05^`2(Zlm6tM)g%o%K%je$@%8-JkhmyDv%+#kyIgFtl+#f>c z0S0U<^wCBoyp>mtjWY+@1cg$xCjFI?=gam)Sz#xH_3`@?>YtDlPW!=8vR6Z`)@;D7FBib*Pj%ptba^4cUTBwSv`OiMIK!a~q`JDyed|Ay~}(@|#Jz+<{e@ z`r$~eRD3bcpHMeJ|5F02H@LL_t(! zFCUv%^xY`5iOtFlECQ^M8dQ zBJ?3zJj;3RJvG18Tb+h7_DkuJzpGpI(Bs?}BaEijm4Nzn*k0WcUY<3G29Q_(9u~mB zwp6hT!dGz$d=tfG*vJq0z#j1;1PP*A?SSy| z^he54sADziopbnZUMtGT`|z!}1|8F>>oMbswe|ioFTdYjaOZ}&KRCl3H4cU{&&ZN- zat#2Hsq%aBl2P-5k|2mo1-UwpDuFd=$ayo!s#4Lx{e?|){|YtCzpX3}r2eASpF^U8 zzBHyvz2UrAw{}L>hsZFwkNO^$;-dS=vrH+`C$-SIEYUXNU1v+*d5#dpibc~3q6>E) zfaHc$`s|b;)^rdUcOE%U^goA3>!A37DFu~`LDz9vFVOl2P^BQN2db9XgXJ^+GpoQ%y$Bx`t7Q62ngb@?v=yuCPw7FJqDm4?mpBkVy_KWs;5zUW!2Jkp+ijCf? zOD|n+o@Sb>gIk}&QiLpzkbj=9(E2s*^9d7I>{Z>Cf{K*NqOe{A4Yli&a{IV!wz~HQ z*N*jl38Up*!!w%8r9&_~f%D!ZpZixRU`Pmc7WqW0`4o8$AT6p1GM*@V93X(dSf~3WL^v0YVbJ?TYfWM=GbD9sz{UJtHYsBvbrFk4_5u-!rX> zSbx}<;#b^+Xr=FGIP&{5Qda3){ggS`^^u`11NyO(LW_TbQNo2|^`YJgoR?ecdo5&Z z=83C`$p3nz72?IZsP^28lLU4D7dZ*tmr&+7yAn$V=QZ-4mmGIT#+ zj4*n@wE#>7RE5#{8_kz|iYtwBZM3DPXkp)jlIy`&O^dh#N+LR-wkBVC7DW9HeOf&I zzdR(=R<#}yQs`VHPlP6Q8mMpxIgX${X}MX1#1js#=>eCNophQa-JeSmr{Tjk*B~on zG@-Wr6p&YFo;_?8Js?8Xub}w~i``3Jt@;?0Xerv<@rvt5n1uzz9`;93d2f|z)UrSimzjO+KLVyiga|I{Z!JPE zt=&{xjmo26S9d1xd2~sON55#2E6!xwA6nJ~b*zpOOs*Dlt>C=-8h~z(shmR|ZT3#9 zyGO`NY`s(5@B)WT-q29ElN{yqyiB7yHP~u(-UybPdNo>;(f6FWP|hNW!RMn5O6-;d55MC^G=cbOjRS7ieT zwnr9MW)L(w=$3-%Fb9_tOZ!RuigGl4gDp^;q0k8veI8}Y5gQ2OH7|x|i8|^)%=(O# zmXD})I{>1VUnPo)+9;0P7Ikibx7eC~B;&U6v33A~h*EoplyD5G`Ix6CvX%ltjk z(qz-0+rrK|@Ps@AI1D+6dfz8}+5cimRBZ`@oI%+b(GAU@jT{q9F_JliN^Y1k+i>7b%1pqL9XJ4pe@E2N7wt4johc@P#5k{Ww5WlU;p72Fx z33zFpZK%;S{dq?6->OjAW=^W{&|e98YbWBbXQS7Nm@PPPW)lqUVYTlBwisDi8=^5p zbh<#@bBto}Y0GEJ-Xe#7wS@N|uZGHUs`6SzXe-B`R}fcp+pf73J7ToL=#Q}kx`{n| z6~U|+L+P#vjX&AvIepvHQ4yZQbLP|mBvF8@Zt4Ty&jL)b>VwSAChsLKJacGGy zWnQ3zcw)jIK`VWw#r9n85cdsLq~xLkBnBL~T4aD$j|Nm|ytNZE-3qfE%nnxtVGXmO zu6#y}-uk;f46Y!F)^i6m2dYk(*ej9ciC!cXNm5G1s(@kYUyi+k%BCSV!8nOCAHE0= z^lis52sKL2R4Rwi0x)HTUD>O1JxikGTjdc|OT*{(PZbKUVmTFMq6pEBvdhc17EePT zPQM}q{V*;b))Fu-e-t-@McI4JE}$wjMG)7#;(Q+i7NGQj_zvglXJHm&m;@ntpLTa@ z%UUNGfAX|p)H?N5j;u}*V|!%LK$+pm)BOjJQWuomlQ&f=$4gB4u}P&nz+?q^Vy`^V z|6H(hqVRDA)I6hOYSfrbtRB2uaSZ~1G@4kZaAMB~>4S>zn)~N^=8&hO<(+RaW+NGc zrxC{#zhJixn6H2`<^f7ko5XiE&QVOpI84ncc?`qAL z2!X&@0LtwpKXZ3y*l;@|x3r3uM4d)VYalzoa@USArItXEzCRAIm8j@XwdCJW=9OVb zVJT~DIEHDBg^VGUm4)6{qeh7#x@Q^?01zS~+RHg;{T(5yjLdkli?Rr>E~s~KMn2RL zOxAEtR%;aOLpQ5Bp{8gaXm(f%q?{xxLG%OdV?#GSBRVG4>0cOf3NvaDN@gFdfYQqdm9ggAVQzItB{2W3nM$0|xYG8A`W;ogj z#Q1obqRaSUYFBhBv8kp{K=AlXEsU{j15um>BtQ4=oZ^lsQ)fXw$zDdNo6uzv=TgY z647?mXj#^UVwbUCzJt`ZJuR(@@wMIiMo*VXxlEAs5pc{p1p)!or7-5t6ao6_muX*) z3$VGt$~kXkyfGs6f7zlI`b1P~zYIVdkOV+vjpuY^qv*A3dWtFXFr}Qfqe1!K3TQ7G zDGLBB3xJ#iQ{K*Y1vQ{8PYjK;gxV*a^I46vEdT%jB0xal1+snD6Wt#k zBGGM%E_J84)3_h*d&3WPfs-?YF=k*LOSM`hmz5?fG+Ble1-lA zyS?&*J`Gg7illc1qpTcaE4pb2aM!^lbclt!Poi;RbqCUmCu_Lq21eUB9Y=TyEOc1@XVvnQA7-a z)Cvo@cIlqSQTef`=jPXjFpTE9TD#Xw>nn48K9+otF+q+V+MkrxQ_-9G5r0b`xz`7| zIfBf|NQelC)?9EaSQvpfphly@`yy7GUylE^l*NYTpB{Vd41|2Mum48rJad>!am`VR z=H4l-SHwWpH_1_`-GviD8Px7G)QUyf0jgX#Z*mQ0xNBhzUY%9bN@#_0u!6n{D@Mlg zAXA{Pht&$>{23j`DYS{}t(MI|Q1W*5ASEF56L~ulT_FHiNud#=sNLsiKbAEiOY9Wo zgnk-RPmFkJtSaQz4b)q*lAgLR{6A9b5yr3SEQI_MRBhT7j$2`GfO+i~J6O7*48pgBb_4B8e` zcvLGW>|67q(@#-tMyGNox5_+TUBm)IM==Da7_wS`a+%Gp4O*k*42^aUd-F@oN<=Ja zP3S&~>Os2B(I$r-kxxBd-;ya<%K-Htm2D&YH;*ehb+T4Ya@>!mgM6wbQ#PUGyKyIG z6hTVExO!z0t&cqbq%(1|LWk#}PEDR%^eW~tHutlgqsRSo$=!OYs3cS~bL$h&Fcdp^ ztjOY4Rg3Th9_jk4K603Tv1XyfhAoT8?HQtgdVAE+5O>)BApwUqB0qu4CZi4NMXtq_ z_gfbbmCA%-6kV#mT(xj>;Pp1vpaEPgC%#`B6xb;sxuR zRnSO@v9D(6)}7k-EsH+=2kC8V|m=aOq0vfsCpRJg-bY0N|&Shzc4*jp_&Ml z>4+Lswv>7-)w}9@l`-i2Xf?t|1*PO%colgq92X-aNU1eeB`ZV|83i(;`Sme_2`$?z zR$9mp-HHG(P)d}WjV(8n&fcSqXr(l?MXI?LEs9v99;8PmpD1SxA4)X{ zTdM!sm2~G)?r3RhSC_2C3(jdv^huudi9e4FCO8ggcfrR?>9)8B5A8t^%ID)($!b;D z|FK5tc|)K)mc@F8F30ErE*<(gyWitat!#62w8&5s|uXd$R0ROGS*r z|HUFgo2SXoF-5!ps?O#OM*_Raax-E21?xSJw<3TY{$;hoDaP>Pe4B{ezj<>>rm;%Q z(*kTd_T+?li^v;(kKKgrr69Lx{*jtnVRER%oK7F9gxAkOyS!3#CVq#I!)J3OlXUm{ zloz(5Tpe=Y|CpoAk^LmECRTas>be+D&7+X$csb9NNQlt9PNEWsS&C|r#2yHH>UncJ zr%`?|rTS}DL+z=Q*E$U9Z1Y)%ky))kEtUh_uqT32{PBo#{DPSkr7LO(?$0n!uo6J{H(1guhS%>HDN3abA~}r#&D`D( zr#dLQkG2G?*6Q@=<>_{Vm8T`ZJp>QQ`l%S6F!!?+Qqt`1a;TqzzwtU)W%<=n{3$|| zaZgCR+`KyI^r3JnuBTT~Z;bLvff#LIUiZphosx7W-iK&y5sX0{Fjm_F#p3o-P-6HQ zz-EGqE<8$ecSVAnIkG#^O6VcCZ*$rZO^*OxY)`aBcE+LfidK)viSO${>K5%fGSBk- zL}zQB^aZQE#dI!<37-Z1uL&|Hm?M0^YJjPslJWk$4yiZnWC+rDt+V++G(ClsALEUZ zDlJ9C&Mk`2No6u-inCMCOMx*IBTfT!VO~~9zTzpWgSG&7YZvKdw4PhE=seMRqFm$w z^<4aepqqS-5I|Vu6pTYYJM-OEx(Mu5)*v9!^SQs8bvfN*d5NR>=w5|homMdUW&Sx+ z#ij>urbR`gH`_H0#Kdy0-eB5gkFjnk$>)gnq%m?S{O5IoO+SX(Rjv?FSG1*eUq$7F zSAmeoqwNl}HGk3W8N;`nvPk_ad7n7;ZcFSX|2w*k1@|+!5937$8l(753M}kQJAZKJ zMfj^%XVCo?%}sGavY!Nco64bRr9F>|?6Q={U~#dV0jcu!d=oPQo$qc%tXv7~Xc?E; z8?v!}2lt`VNd1LORuuwVtn`|r*oqMvfdl}K-^8hdRo5Vt#VO7Rbg~T#Q+sRv=o*sS zPSbKbmDT_Di1_2;)z<%PeULQ_6N_KmpNPmT>v{T6(0KCN7tn()1?)Pb)&Fi0_N%&6 zqn<_cu2><^v=Y**_0Z1NfioJBb{cAoxzZvra3SgX6t5nWJ7}i8J_YHz_!?p`E$zBF zskD4^o`h+gw_+o2Di1-QXeU5 literal 0 HcmV?d00001 diff --git a/mediaflow_proxy/static/speedtest.html b/mediaflow_proxy/static/speedtest.html new file mode 100644 index 0000000..386fa8e --- /dev/null +++ b/mediaflow_proxy/static/speedtest.html @@ -0,0 +1,697 @@ + + + + + + Debrid Speed Test + + + + + + +
+ +
+ +
+ +
+ +
+

+ Enter API Password +

+ +
+
+
+ + +
+
+ + +
+ +
+
+
+ + + + + + + + + + + + + + + + +
+
+ + + + \ No newline at end of file diff --git a/mediaflow_proxy/utils/__init__.py b/mediaflow_proxy/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/mediaflow_proxy/utils/cache_utils.py b/mediaflow_proxy/utils/cache_utils.py new file mode 100644 index 0000000..d300a2b --- /dev/null +++ b/mediaflow_proxy/utils/cache_utils.py @@ -0,0 +1,376 @@ +import asyncio +import hashlib +import json +import logging +import os +import tempfile +import threading +import time +from collections import OrderedDict +from concurrent.futures import ThreadPoolExecutor +from dataclasses import dataclass +from pathlib import Path +from typing import Optional, Union, Any + +import aiofiles +import aiofiles.os +from pydantic import ValidationError + +from mediaflow_proxy.speedtest.models import SpeedTestTask +from mediaflow_proxy.utils.http_utils import download_file_with_retry, DownloadError +from mediaflow_proxy.utils.mpd_utils import parse_mpd, parse_mpd_dict + +logger = logging.getLogger(__name__) + + +@dataclass +class CacheEntry: + """Represents a cache entry with metadata.""" + + data: bytes + expires_at: float + access_count: int = 0 + last_access: float = 0.0 + size: int = 0 + + +class LRUMemoryCache: + """Thread-safe LRU memory cache with support.""" + + def __init__(self, maxsize: int): + self.maxsize = maxsize + self._cache: OrderedDict[str, CacheEntry] = OrderedDict() + self._lock = threading.Lock() + self._current_size = 0 + + def get(self, key: str) -> Optional[CacheEntry]: + with self._lock: + if key in self._cache: + entry = self._cache.pop(key) # Remove and re-insert for LRU + if time.time() < entry.expires_at: + entry.access_count += 1 + entry.last_access = time.time() + self._cache[key] = entry + return entry + else: + # Remove expired entry + self._current_size -= entry.size + self._cache.pop(key, None) + return None + + def set(self, key: str, entry: CacheEntry) -> None: + with self._lock: + if key in self._cache: + old_entry = self._cache[key] + self._current_size -= old_entry.size + + # Check if we need to make space + while self._current_size + entry.size > self.maxsize and self._cache: + _, removed_entry = self._cache.popitem(last=False) + self._current_size -= removed_entry.size + + self._cache[key] = entry + self._current_size += entry.size + + def remove(self, key: str) -> None: + with self._lock: + if key in self._cache: + entry = self._cache.pop(key) + self._current_size -= entry.size + + +class HybridCache: + """High-performance hybrid cache combining memory and file storage.""" + + def __init__( + self, + cache_dir_name: str, + ttl: int, + max_memory_size: int = 100 * 1024 * 1024, # 100MB default + executor_workers: int = 4, + ): + self.cache_dir = Path(tempfile.gettempdir()) / cache_dir_name + self.ttl = ttl + self.memory_cache = LRUMemoryCache(maxsize=max_memory_size) + self._executor = ThreadPoolExecutor(max_workers=executor_workers) + self._lock = asyncio.Lock() + + # Initialize cache directories + self._init_cache_dirs() + + def _init_cache_dirs(self): + """Initialize sharded cache directories.""" + os.makedirs(self.cache_dir, exist_ok=True) + + def _get_md5_hash(self, key: str) -> str: + """Get the MD5 hash of a cache key.""" + return hashlib.md5(key.encode()).hexdigest() + + def _get_file_path(self, key: str) -> Path: + """Get the file path for a cache key.""" + return self.cache_dir / key + + async def get(self, key: str, default: Any = None) -> Optional[bytes]: + """ + Get value from cache, trying memory first then file. + + Args: + key: Cache key + default: Default value if key not found + + Returns: + Cached value or default if not found + """ + key = self._get_md5_hash(key) + # Try memory cache first + entry = self.memory_cache.get(key) + if entry is not None: + return entry.data + + # Try file cache + try: + file_path = self._get_file_path(key) + async with aiofiles.open(file_path, "rb") as f: + metadata_size = await f.read(8) + metadata_length = int.from_bytes(metadata_size, "big") + metadata_bytes = await f.read(metadata_length) + metadata = json.loads(metadata_bytes.decode()) + + # Check expiration + if metadata["expires_at"] < time.time(): + await self.delete(key) + return default + + # Read data + data = await f.read() + + # Update memory cache in background + entry = CacheEntry( + data=data, + expires_at=metadata["expires_at"], + access_count=metadata["access_count"] + 1, + last_access=time.time(), + size=len(data), + ) + self.memory_cache.set(key, entry) + + return data + + except FileNotFoundError: + return default + except Exception as e: + logger.error(f"Error reading from cache: {e}") + return default + + async def set(self, key: str, data: Union[bytes, bytearray, memoryview], ttl: Optional[int] = None) -> bool: + """ + Set value in both memory and file cache. + + Args: + key: Cache key + data: Data to cache + ttl: Optional TTL override + + Returns: + bool: Success status + """ + if not isinstance(data, (bytes, bytearray, memoryview)): + raise ValueError("Data must be bytes, bytearray, or memoryview") + + expires_at = time.time() + (ttl or self.ttl) + + # Create cache entry + entry = CacheEntry(data=data, expires_at=expires_at, access_count=0, last_access=time.time(), size=len(data)) + + key = self._get_md5_hash(key) + # Update memory cache + self.memory_cache.set(key, entry) + file_path = self._get_file_path(key) + temp_path = file_path.with_suffix(".tmp") + + # Update file cache + try: + metadata = {"expires_at": expires_at, "access_count": 0, "last_access": time.time()} + metadata_bytes = json.dumps(metadata).encode() + metadata_size = len(metadata_bytes).to_bytes(8, "big") + + async with aiofiles.open(temp_path, "wb") as f: + await f.write(metadata_size) + await f.write(metadata_bytes) + await f.write(data) + + await aiofiles.os.rename(temp_path, file_path) + return True + + except Exception as e: + logger.error(f"Error writing to cache: {e}") + try: + await aiofiles.os.remove(temp_path) + except: + pass + return False + + async def delete(self, key: str) -> bool: + """Delete item from both caches.""" + self.memory_cache.remove(key) + + try: + file_path = self._get_file_path(key) + await aiofiles.os.remove(file_path) + return True + except FileNotFoundError: + return True + except Exception as e: + logger.error(f"Error deleting from cache: {e}") + return False + + +class AsyncMemoryCache: + """Async wrapper around LRUMemoryCache.""" + + def __init__(self, max_memory_size: int): + self.memory_cache = LRUMemoryCache(maxsize=max_memory_size) + + async def get(self, key: str, default: Any = None) -> Optional[bytes]: + """Get value from cache.""" + entry = self.memory_cache.get(key) + return entry.data if entry is not None else default + + async def set(self, key: str, data: Union[bytes, bytearray, memoryview], ttl: Optional[int] = None) -> bool: + """Set value in cache.""" + try: + expires_at = time.time() + (ttl or 3600) # Default 1 hour TTL if not specified + entry = CacheEntry( + data=data, expires_at=expires_at, access_count=0, last_access=time.time(), size=len(data) + ) + self.memory_cache.set(key, entry) + return True + except Exception as e: + logger.error(f"Error setting cache value: {e}") + return False + + async def delete(self, key: str) -> bool: + """Delete item from cache.""" + try: + self.memory_cache.remove(key) + return True + except Exception as e: + logger.error(f"Error deleting from cache: {e}") + return False + + +# Create cache instances +INIT_SEGMENT_CACHE = HybridCache( + cache_dir_name="init_segment_cache", + ttl=3600, # 1 hour + max_memory_size=500 * 1024 * 1024, # 500MB for init segments +) + +MPD_CACHE = AsyncMemoryCache( + max_memory_size=100 * 1024 * 1024, # 100MB for MPD files +) + +SPEEDTEST_CACHE = HybridCache( + cache_dir_name="speedtest_cache", + ttl=3600, # 1 hour + max_memory_size=50 * 1024 * 1024, +) + +EXTRACTOR_CACHE = HybridCache( + cache_dir_name="extractor_cache", + ttl=5 * 60, # 5 minutes + max_memory_size=50 * 1024 * 1024, +) + + +# Specific cache implementations +async def get_cached_init_segment(init_url: str, headers: dict) -> Optional[bytes]: + """Get initialization segment from cache or download it.""" + # Try cache first + cached_data = await INIT_SEGMENT_CACHE.get(init_url) + if cached_data is not None: + return cached_data + + # Download if not cached + try: + init_content = await download_file_with_retry(init_url, headers) + if init_content: + await INIT_SEGMENT_CACHE.set(init_url, init_content) + return init_content + except Exception as e: + logger.error(f"Error downloading init segment: {e}") + return None + + +async def get_cached_mpd( + mpd_url: str, + headers: dict, + parse_drm: bool, + parse_segment_profile_id: Optional[str] = None, +) -> dict: + """Get MPD from cache or download and parse it.""" + # Try cache first + cached_data = await MPD_CACHE.get(mpd_url) + if cached_data is not None: + try: + mpd_dict = json.loads(cached_data) + return parse_mpd_dict(mpd_dict, mpd_url, parse_drm, parse_segment_profile_id) + except json.JSONDecodeError: + await MPD_CACHE.delete(mpd_url) + + # Download and parse if not cached + try: + mpd_content = await download_file_with_retry(mpd_url, headers) + mpd_dict = parse_mpd(mpd_content) + parsed_dict = parse_mpd_dict(mpd_dict, mpd_url, parse_drm, parse_segment_profile_id) + + # Cache the original MPD dict + await MPD_CACHE.set(mpd_url, json.dumps(mpd_dict).encode(), ttl=parsed_dict["minimumUpdatePeriod"]) + return parsed_dict + except DownloadError as error: + logger.error(f"Error downloading MPD: {error}") + raise error + except Exception as error: + logger.exception(f"Error processing MPD: {error}") + raise error + + +async def get_cached_speedtest(task_id: str) -> Optional[SpeedTestTask]: + """Get speed test results from cache.""" + cached_data = await SPEEDTEST_CACHE.get(task_id) + if cached_data is not None: + try: + return SpeedTestTask.model_validate_json(cached_data.decode()) + except ValidationError as e: + logger.error(f"Error parsing cached speed test data: {e}") + await SPEEDTEST_CACHE.delete(task_id) + return None + + +async def set_cache_speedtest(task_id: str, task: SpeedTestTask) -> bool: + """Cache speed test results.""" + try: + return await SPEEDTEST_CACHE.set(task_id, task.model_dump_json().encode()) + except Exception as e: + logger.error(f"Error caching speed test data: {e}") + return False + + +async def get_cached_extractor_result(key: str) -> Optional[dict]: + """Get extractor result from cache.""" + cached_data = await EXTRACTOR_CACHE.get(key) + if cached_data is not None: + try: + return json.loads(cached_data) + except json.JSONDecodeError: + await EXTRACTOR_CACHE.delete(key) + return None + + +async def set_cache_extractor_result(key: str, result: dict) -> bool: + """Cache extractor result.""" + try: + return await EXTRACTOR_CACHE.set(key, json.dumps(result).encode()) + except Exception as e: + logger.error(f"Error caching extractor result: {e}") + return False diff --git a/mediaflow_proxy/utils/crypto_utils.py b/mediaflow_proxy/utils/crypto_utils.py new file mode 100644 index 0000000..056df3b --- /dev/null +++ b/mediaflow_proxy/utils/crypto_utils.py @@ -0,0 +1,110 @@ +import base64 +import json +import logging +import time +import traceback +from typing import Optional +from urllib.parse import urlencode + +from Crypto.Cipher import AES +from Crypto.Random import get_random_bytes +from Crypto.Util.Padding import pad, unpad +from fastapi import HTTPException, Request +from starlette.middleware.base import BaseHTTPMiddleware +from starlette.responses import JSONResponse + +from mediaflow_proxy.configs import settings + + +class EncryptionHandler: + def __init__(self, secret_key: str): + self.secret_key = secret_key.encode("utf-8").ljust(32)[:32] + + def encrypt_data(self, data: dict, expiration: int = None, ip: str = None) -> str: + if expiration: + data["exp"] = int(time.time()) + expiration + if ip: + data["ip"] = ip + json_data = json.dumps(data).encode("utf-8") + iv = get_random_bytes(16) + cipher = AES.new(self.secret_key, AES.MODE_CBC, iv) + encrypted_data = cipher.encrypt(pad(json_data, AES.block_size)) + return base64.urlsafe_b64encode(iv + encrypted_data).decode("utf-8") + + def decrypt_data(self, token: str, client_ip: str) -> dict: + try: + encrypted_data = base64.urlsafe_b64decode(token.encode("utf-8")) + iv = encrypted_data[:16] + cipher = AES.new(self.secret_key, AES.MODE_CBC, iv) + decrypted_data = unpad(cipher.decrypt(encrypted_data[16:]), AES.block_size) + data = json.loads(decrypted_data) + + if "exp" in data: + if data["exp"] < time.time(): + raise HTTPException(status_code=401, detail="Token has expired") + del data["exp"] # Remove expiration from the data + + if "ip" in data: + if data["ip"] != client_ip: + raise HTTPException(status_code=403, detail="IP address mismatch") + del data["ip"] # Remove IP from the data + + return data + except Exception as e: + raise HTTPException(status_code=401, detail="Invalid or expired token") + + +class EncryptionMiddleware(BaseHTTPMiddleware): + def __init__(self, app): + super().__init__(app) + self.encryption_handler = encryption_handler + + async def dispatch(self, request: Request, call_next): + encrypted_token = request.query_params.get("token") + if encrypted_token and self.encryption_handler: + try: + client_ip = self.get_client_ip(request) + decrypted_data = self.encryption_handler.decrypt_data(encrypted_token, client_ip) + # Modify request query parameters with decrypted data + query_params = dict(request.query_params) + query_params.pop("token") # Remove the encrypted token from query params + query_params.update(decrypted_data) # Add decrypted data to query params + query_params["has_encrypted"] = True + + # Create a new request scope with updated query parameters + new_query_string = urlencode(query_params) + request.scope["query_string"] = new_query_string.encode() + request._query_params = query_params + except HTTPException as e: + return JSONResponse(content={"error": str(e.detail)}, status_code=e.status_code) + + try: + response = await call_next(request) + except Exception: + exc = traceback.format_exc(chain=False) + logging.error("An error occurred while processing the request, error: %s", exc) + return JSONResponse( + content={"error": "An error occurred while processing the request, check the server for logs"}, + status_code=500, + ) + return response + + @staticmethod + def get_client_ip(request: Request) -> Optional[str]: + """ + Extract the client's real IP address from the request headers or fallback to the client host. + """ + x_forwarded_for = request.headers.get("X-Forwarded-For") + if x_forwarded_for: + # In some cases, this header can contain multiple IPs + # separated by commas. + # The first one is the original client's IP. + return x_forwarded_for.split(",")[0].strip() + # Fallback to X-Real-IP if X-Forwarded-For is not available + x_real_ip = request.headers.get("X-Real-IP") + if x_real_ip: + return x_real_ip + return request.client.host if request.client else "127.0.0.1" + + +encryption_handler = EncryptionHandler(settings.api_password) if settings.api_password else None diff --git a/mediaflow_proxy/utils/http_utils.py b/mediaflow_proxy/utils/http_utils.py new file mode 100644 index 0000000..1a594e2 --- /dev/null +++ b/mediaflow_proxy/utils/http_utils.py @@ -0,0 +1,430 @@ +import logging +import typing +from dataclasses import dataclass +from functools import partial +from urllib import parse +from urllib.parse import urlencode + +import anyio +import httpx +import tenacity +from fastapi import Response +from starlette.background import BackgroundTask +from starlette.concurrency import iterate_in_threadpool +from starlette.requests import Request +from starlette.types import Receive, Send, Scope +from tenacity import retry, stop_after_attempt, wait_exponential, retry_if_exception_type +from tqdm.asyncio import tqdm as tqdm_asyncio + +from mediaflow_proxy.configs import settings +from mediaflow_proxy.const import SUPPORTED_REQUEST_HEADERS +from mediaflow_proxy.utils.crypto_utils import EncryptionHandler + +logger = logging.getLogger(__name__) + + +class DownloadError(Exception): + def __init__(self, status_code, message): + self.status_code = status_code + self.message = message + super().__init__(message) + + +def create_httpx_client(follow_redirects: bool = True, timeout: float = 30.0, **kwargs) -> httpx.AsyncClient: + """Creates an HTTPX client with configured proxy routing""" + mounts = settings.transport_config.get_mounts() + client = httpx.AsyncClient(mounts=mounts, follow_redirects=follow_redirects, timeout=timeout, **kwargs) + return client + + +@retry( + stop=stop_after_attempt(3), + wait=wait_exponential(multiplier=1, min=4, max=10), + retry=retry_if_exception_type(DownloadError), +) +async def fetch_with_retry(client, method, url, headers, follow_redirects=True, **kwargs): + """ + Fetches a URL with retry logic. + + Args: + client (httpx.AsyncClient): The HTTP client to use for the request. + method (str): The HTTP method to use (e.g., GET, POST). + url (str): The URL to fetch. + headers (dict): The headers to include in the request. + follow_redirects (bool, optional): Whether to follow redirects. Defaults to True. + **kwargs: Additional arguments to pass to the request. + + Returns: + httpx.Response: The HTTP response. + + Raises: + DownloadError: If the request fails after retries. + """ + try: + response = await client.request(method, url, headers=headers, follow_redirects=follow_redirects, **kwargs) + response.raise_for_status() + return response + except httpx.TimeoutException: + logger.warning(f"Timeout while downloading {url}") + raise DownloadError(409, f"Timeout while downloading {url}") + except httpx.HTTPStatusError as e: + logger.error(f"HTTP error {e.response.status_code} while downloading {url}") + if e.response.status_code == 404: + logger.error(f"Segment Resource not found: {url}") + raise e + raise DownloadError(e.response.status_code, f"HTTP error {e.response.status_code} while downloading {url}") + except Exception as e: + logger.error(f"Error downloading {url}: {e}") + raise + + +class Streamer: + def __init__(self, client): + """ + Initializes the Streamer with an HTTP client. + + Args: + client (httpx.AsyncClient): The HTTP client to use for streaming. + """ + self.client = client + self.response = None + self.progress_bar = None + self.bytes_transferred = 0 + self.start_byte = 0 + self.end_byte = 0 + self.total_size = 0 + + async def create_streaming_response(self, url: str, headers: dict): + """ + Creates and sends a streaming request. + + Args: + url (str): The URL to stream from. + headers (dict): The headers to include in the request. + + """ + request = self.client.build_request("GET", url, headers=headers) + self.response = await self.client.send(request, stream=True, follow_redirects=True) + self.response.raise_for_status() + + async def stream_content(self) -> typing.AsyncGenerator[bytes, None]: + """ + Streams the content from the response. + """ + if not self.response: + raise RuntimeError("No response available for streaming") + + try: + self.parse_content_range() + + if settings.enable_streaming_progress: + with tqdm_asyncio( + total=self.total_size, + initial=self.start_byte, + unit="B", + unit_scale=True, + unit_divisor=1024, + desc="Streaming", + ncols=100, + mininterval=1, + ) as self.progress_bar: + async for chunk in self.response.aiter_bytes(): + yield chunk + chunk_size = len(chunk) + self.bytes_transferred += chunk_size + self.progress_bar.set_postfix_str( + f"📥 : {self.format_bytes(self.bytes_transferred)}", refresh=False + ) + self.progress_bar.update(chunk_size) + else: + async for chunk in self.response.aiter_bytes(): + yield chunk + self.bytes_transferred += len(chunk) + + except httpx.TimeoutException: + logger.warning("Timeout while streaming") + raise DownloadError(409, "Timeout while streaming") + except GeneratorExit: + logger.info("Streaming session stopped by the user") + except Exception as e: + logger.error(f"Error streaming content: {e}") + raise + + @staticmethod + def format_bytes(size) -> str: + power = 2**10 + n = 0 + units = {0: "B", 1: "KB", 2: "MB", 3: "GB", 4: "TB"} + while size > power: + size /= power + n += 1 + return f"{size:.2f} {units[n]}" + + def parse_content_range(self): + content_range = self.response.headers.get("Content-Range", "") + if content_range: + range_info = content_range.split()[-1] + self.start_byte, self.end_byte, self.total_size = map(int, range_info.replace("/", "-").split("-")) + else: + self.start_byte = 0 + self.total_size = int(self.response.headers.get("Content-Length", 0)) + self.end_byte = self.total_size - 1 if self.total_size > 0 else 0 + + async def get_text(self, url: str, headers: dict): + """ + Sends a GET request to a URL and returns the response text. + + Args: + url (str): The URL to send the GET request to. + headers (dict): The headers to include in the request. + + Returns: + str: The response text. + """ + try: + self.response = await fetch_with_retry(self.client, "GET", url, headers) + except tenacity.RetryError as e: + raise e.last_attempt.result() + return self.response.text + + async def close(self): + """ + Closes the HTTP client and response. + """ + if self.response: + await self.response.aclose() + if self.progress_bar: + self.progress_bar.close() + await self.client.aclose() + + +async def download_file_with_retry(url: str, headers: dict): + """ + Downloads a file with retry logic. + + Args: + url (str): The URL of the file to download. + headers (dict): The headers to include in the request. + + Returns: + bytes: The downloaded file content. + + Raises: + DownloadError: If the download fails after retries. + """ + async with create_httpx_client() as client: + try: + response = await fetch_with_retry(client, "GET", url, headers) + return response.content + except DownloadError as e: + logger.error(f"Failed to download file: {e}") + raise e + except tenacity.RetryError as e: + raise DownloadError(502, f"Failed to download file: {e.last_attempt.result()}") + + +async def request_with_retry(method: str, url: str, headers: dict, **kwargs) -> httpx.Response: + """ + Sends an HTTP request with retry logic. + + Args: + method (str): The HTTP method to use (e.g., GET, POST). + url (str): The URL to send the request to. + headers (dict): The headers to include in the request. + **kwargs: Additional arguments to pass to the request. + + Returns: + httpx.Response: The HTTP response. + + Raises: + DownloadError: If the request fails after retries. + """ + async with create_httpx_client() as client: + try: + response = await fetch_with_retry(client, method, url, headers, **kwargs) + return response + except DownloadError as e: + logger.error(f"Failed to download file: {e}") + raise + + +def encode_mediaflow_proxy_url( + mediaflow_proxy_url: str, + endpoint: typing.Optional[str] = None, + destination_url: typing.Optional[str] = None, + query_params: typing.Optional[dict] = None, + request_headers: typing.Optional[dict] = None, + response_headers: typing.Optional[dict] = None, + encryption_handler: EncryptionHandler = None, + expiration: int = None, + ip: str = None, +) -> str: + """ + Encodes & Encrypt (Optional) a MediaFlow proxy URL with query parameters and headers. + + Args: + mediaflow_proxy_url (str): The base MediaFlow proxy URL. + endpoint (str, optional): The endpoint to append to the base URL. Defaults to None. + destination_url (str, optional): The destination URL to include in the query parameters. Defaults to None. + query_params (dict, optional): Additional query parameters to include. Defaults to None. + request_headers (dict, optional): Headers to include as query parameters. Defaults to None. + response_headers (dict, optional): Headers to include as query parameters. Defaults to None. + encryption_handler (EncryptionHandler, optional): The encryption handler to use. Defaults to None. + expiration (int, optional): The expiration time for the encrypted token. Defaults to None. + ip (str, optional): The public IP address to include in the query parameters. Defaults to None. + + Returns: + str: The encoded MediaFlow proxy URL. + """ + query_params = query_params or {} + if destination_url is not None: + query_params["d"] = destination_url + + # Add headers if provided + if request_headers: + query_params.update( + {key if key.startswith("h_") else f"h_{key}": value for key, value in request_headers.items()} + ) + if response_headers: + query_params.update( + {key if key.startswith("r_") else f"r_{key}": value for key, value in response_headers.items()} + ) + + if encryption_handler: + encrypted_token = encryption_handler.encrypt_data(query_params, expiration, ip) + encoded_params = urlencode({"token": encrypted_token}) + else: + encoded_params = urlencode(query_params) + + # Construct the full URL + if endpoint is None: + return f"{mediaflow_proxy_url}?{encoded_params}" + + base_url = parse.urljoin(mediaflow_proxy_url, endpoint) + return f"{base_url}?{encoded_params}" + + +def get_original_scheme(request: Request) -> str: + """ + Determines the original scheme (http or https) of the request. + + Args: + request (Request): The incoming HTTP request. + + Returns: + str: The original scheme ('http' or 'https') + """ + # Check the X-Forwarded-Proto header first + forwarded_proto = request.headers.get("X-Forwarded-Proto") + if forwarded_proto: + return forwarded_proto + + # Check if the request is secure + if request.url.scheme == "https" or request.headers.get("X-Forwarded-Ssl") == "on": + return "https" + + # Check for other common headers that might indicate HTTPS + if ( + request.headers.get("X-Forwarded-Ssl") == "on" + or request.headers.get("X-Forwarded-Protocol") == "https" + or request.headers.get("X-Url-Scheme") == "https" + ): + return "https" + + # Default to http if no indicators of https are found + return "http" + + +@dataclass +class ProxyRequestHeaders: + request: dict + response: dict + + +def get_proxy_headers(request: Request) -> ProxyRequestHeaders: + """ + Extracts proxy headers from the request query parameters. + + Args: + request (Request): The incoming HTTP request. + + Returns: + ProxyRequest: A named tuple containing the request headers and response headers. + """ + request_headers = {k: v for k, v in request.headers.items() if k in SUPPORTED_REQUEST_HEADERS} + request_headers.update({k[2:].lower(): v for k, v in request.query_params.items() if k.startswith("h_")}) + response_headers = {k[2:].lower(): v for k, v in request.query_params.items() if k.startswith("r_")} + return ProxyRequestHeaders(request_headers, response_headers) + + +class EnhancedStreamingResponse(Response): + body_iterator: typing.AsyncIterable[typing.Any] + + def __init__( + self, + content: typing.Union[typing.AsyncIterable[typing.Any], typing.Iterable[typing.Any]], + status_code: int = 200, + headers: typing.Optional[typing.Mapping[str, str]] = None, + media_type: typing.Optional[str] = None, + background: typing.Optional[BackgroundTask] = None, + ) -> None: + if isinstance(content, typing.AsyncIterable): + self.body_iterator = content + else: + self.body_iterator = iterate_in_threadpool(content) + self.status_code = status_code + self.media_type = self.media_type if media_type is None else media_type + self.background = background + self.init_headers(headers) + + @staticmethod + async def listen_for_disconnect(receive: Receive) -> None: + try: + while True: + message = await receive() + if message["type"] == "http.disconnect": + logger.debug("Client disconnected") + break + except Exception as e: + logger.error(f"Error in listen_for_disconnect: {str(e)}") + + async def stream_response(self, send: Send) -> None: + try: + await send( + { + "type": "http.response.start", + "status": self.status_code, + "headers": self.raw_headers, + } + ) + async for chunk in self.body_iterator: + if not isinstance(chunk, (bytes, memoryview)): + chunk = chunk.encode(self.charset) + try: + await send({"type": "http.response.body", "body": chunk, "more_body": True}) + except (ConnectionResetError, anyio.BrokenResourceError): + logger.info("Client disconnected during streaming") + return + + await send({"type": "http.response.body", "body": b"", "more_body": False}) + except Exception as e: + logger.exception(f"Error in stream_response: {str(e)}") + + async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: + async with anyio.create_task_group() as task_group: + + async def wrap(func: typing.Callable[[], typing.Awaitable[None]]) -> None: + try: + await func() + except Exception as e: + if not isinstance(e, anyio.get_cancelled_exc_class()): + logger.exception("Error in streaming task") + raise + finally: + task_group.cancel_scope.cancel() + + task_group.start_soon(wrap, partial(self.stream_response, send)) + await wrap(partial(self.listen_for_disconnect, receive)) + + if self.background is not None: + await self.background() diff --git a/mediaflow_proxy/utils/m3u8_processor.py b/mediaflow_proxy/utils/m3u8_processor.py new file mode 100644 index 0000000..83f8958 --- /dev/null +++ b/mediaflow_proxy/utils/m3u8_processor.py @@ -0,0 +1,87 @@ +import re +from urllib import parse + +from mediaflow_proxy.utils.crypto_utils import encryption_handler +from mediaflow_proxy.utils.http_utils import encode_mediaflow_proxy_url, get_original_scheme + + +class M3U8Processor: + def __init__(self, request, key_url: str = None): + """ + Initializes the M3U8Processor with the request and URL prefix. + + Args: + request (Request): The incoming HTTP request. + key_url (HttpUrl, optional): The URL of the key server. Defaults to None. + """ + self.request = request + self.key_url = parse.urlparse(key_url) if key_url else None + self.mediaflow_proxy_url = str( + request.url_for("hls_manifest_proxy").replace(scheme=get_original_scheme(request)) + ) + + async def process_m3u8(self, content: str, base_url: str) -> str: + """ + Processes the m3u8 content, proxying URLs and handling key lines. + + Args: + content (str): The m3u8 content to process. + base_url (str): The base URL to resolve relative URLs. + + Returns: + str: The processed m3u8 content. + """ + lines = content.splitlines() + processed_lines = [] + for line in lines: + if "URI=" in line: + processed_lines.append(await self.process_key_line(line, base_url)) + elif not line.startswith("#") and line.strip(): + processed_lines.append(await self.proxy_url(line, base_url)) + else: + processed_lines.append(line) + return "\n".join(processed_lines) + + async def process_key_line(self, line: str, base_url: str) -> str: + """ + Processes a key line in the m3u8 content, proxying the URI. + + Args: + line (str): The key line to process. + base_url (str): The base URL to resolve relative URLs. + + Returns: + str: The processed key line. + """ + uri_match = re.search(r'URI="([^"]+)"', line) + if uri_match: + original_uri = uri_match.group(1) + uri = parse.urlparse(original_uri) + if self.key_url: + uri = uri._replace(scheme=self.key_url.scheme, netloc=self.key_url.netloc) + new_uri = await self.proxy_url(uri.geturl(), base_url) + line = line.replace(f'URI="{original_uri}"', f'URI="{new_uri}"') + return line + + async def proxy_url(self, url: str, base_url: str) -> str: + """ + Proxies a URL, encoding it with the MediaFlow proxy URL. + + Args: + url (str): The URL to proxy. + base_url (str): The base URL to resolve relative URLs. + + Returns: + str: The proxied URL. + """ + full_url = parse.urljoin(base_url, url) + query_params = dict(self.request.query_params) + has_encrypted = query_params.pop("has_encrypted", False) + + return encode_mediaflow_proxy_url( + self.mediaflow_proxy_url, + "", + full_url, + query_params=dict(self.request.query_params), + encryption_handler=encryption_handler if has_encrypted else None, + ) diff --git a/mediaflow_proxy/utils/mpd_utils.py b/mediaflow_proxy/utils/mpd_utils.py new file mode 100644 index 0000000..5603694 --- /dev/null +++ b/mediaflow_proxy/utils/mpd_utils.py @@ -0,0 +1,555 @@ +import logging +import math +import re +from datetime import datetime, timedelta, timezone +from typing import List, Dict, Optional, Union +from urllib.parse import urljoin + +import xmltodict + +logger = logging.getLogger(__name__) + + +def parse_mpd(mpd_content: Union[str, bytes]) -> dict: + """ + Parses the MPD content into a dictionary. + + Args: + mpd_content (Union[str, bytes]): The MPD content to parse. + + Returns: + dict: The parsed MPD content as a dictionary. + """ + return xmltodict.parse(mpd_content) + + +def parse_mpd_dict( + mpd_dict: dict, mpd_url: str, parse_drm: bool = True, parse_segment_profile_id: Optional[str] = None +) -> dict: + """ + Parses the MPD dictionary and extracts relevant information. + + Args: + mpd_dict (dict): The MPD content as a dictionary. + mpd_url (str): The URL of the MPD manifest. + parse_drm (bool, optional): Whether to parse DRM information. Defaults to True. + parse_segment_profile_id (str, optional): The profile ID to parse segments for. Defaults to None. + + Returns: + dict: The parsed MPD information including profiles and DRM info. + + This function processes the MPD dictionary to extract profiles, DRM information, and other relevant data. + It handles both live and static MPD manifests. + """ + profiles = [] + parsed_dict = {} + source = "/".join(mpd_url.split("/")[:-1]) + + is_live = mpd_dict["MPD"].get("@type", "static").lower() == "dynamic" + parsed_dict["isLive"] = is_live + + media_presentation_duration = mpd_dict["MPD"].get("@mediaPresentationDuration") + + # Parse additional MPD attributes for live streams + if is_live: + parsed_dict["minimumUpdatePeriod"] = parse_duration(mpd_dict["MPD"].get("@minimumUpdatePeriod", "PT0S")) + parsed_dict["timeShiftBufferDepth"] = parse_duration(mpd_dict["MPD"].get("@timeShiftBufferDepth", "PT2M")) + parsed_dict["availabilityStartTime"] = datetime.fromisoformat( + mpd_dict["MPD"]["@availabilityStartTime"].replace("Z", "+00:00") + ) + parsed_dict["publishTime"] = datetime.fromisoformat( + mpd_dict["MPD"].get("@publishTime", "").replace("Z", "+00:00") + ) + + periods = mpd_dict["MPD"]["Period"] + periods = periods if isinstance(periods, list) else [periods] + + for period in periods: + parsed_dict["PeriodStart"] = parse_duration(period.get("@start", "PT0S")) + for adaptation in period["AdaptationSet"]: + representations = adaptation["Representation"] + representations = representations if isinstance(representations, list) else [representations] + + for representation in representations: + profile = parse_representation( + parsed_dict, + representation, + adaptation, + source, + media_presentation_duration, + parse_segment_profile_id, + ) + if profile: + profiles.append(profile) + parsed_dict["profiles"] = profiles + + if parse_drm: + drm_info = extract_drm_info(periods, mpd_url) + else: + drm_info = {} + parsed_dict["drmInfo"] = drm_info + + return parsed_dict + + +def pad_base64(encoded_key_id): + """ + Pads a base64 encoded key ID to make its length a multiple of 4. + + Args: + encoded_key_id (str): The base64 encoded key ID. + + Returns: + str: The padded base64 encoded key ID. + """ + return encoded_key_id + "=" * (4 - len(encoded_key_id) % 4) + + +def extract_drm_info(periods: List[Dict], mpd_url: str) -> Dict: + """ + Extracts DRM information from the MPD periods. + + Args: + periods (List[Dict]): The list of periods in the MPD. + mpd_url (str): The URL of the MPD manifest. + + Returns: + Dict: The extracted DRM information. + + This function processes the ContentProtection elements in the MPD to extract DRM system information, + such as ClearKey, Widevine, and PlayReady. + """ + drm_info = {"isDrmProtected": False} + + for period in periods: + adaptation_sets: Union[list[dict], dict] = period.get("AdaptationSet", []) + if not isinstance(adaptation_sets, list): + adaptation_sets = [adaptation_sets] + + for adaptation_set in adaptation_sets: + # Check ContentProtection in AdaptationSet + process_content_protection(adaptation_set.get("ContentProtection", []), drm_info) + + # Check ContentProtection inside each Representation + representations: Union[list[dict], dict] = adaptation_set.get("Representation", []) + if not isinstance(representations, list): + representations = [representations] + + for representation in representations: + process_content_protection(representation.get("ContentProtection", []), drm_info) + + # If we have a license acquisition URL, make sure it's absolute + if "laUrl" in drm_info and not drm_info["laUrl"].startswith(("http://", "https://")): + drm_info["laUrl"] = urljoin(mpd_url, drm_info["laUrl"]) + + return drm_info + + +def process_content_protection(content_protection: Union[list[dict], dict], drm_info: dict): + """ + Processes the ContentProtection elements to extract DRM information. + + Args: + content_protection (Union[list[dict], dict]): The ContentProtection elements. + drm_info (dict): The dictionary to store DRM information. + + This function updates the drm_info dictionary with DRM system information found in the ContentProtection elements. + """ + if not isinstance(content_protection, list): + content_protection = [content_protection] + + for protection in content_protection: + drm_info["isDrmProtected"] = True + scheme_id_uri = protection.get("@schemeIdUri", "").lower() + + if "clearkey" in scheme_id_uri: + drm_info["drmSystem"] = "clearkey" + if "clearkey:Laurl" in protection: + la_url = protection["clearkey:Laurl"].get("#text") + if la_url and "laUrl" not in drm_info: + drm_info["laUrl"] = la_url + + elif "widevine" in scheme_id_uri or "edef8ba9-79d6-4ace-a3c8-27dcd51d21ed" in scheme_id_uri: + drm_info["drmSystem"] = "widevine" + pssh = protection.get("cenc:pssh", {}).get("#text") + if pssh: + drm_info["pssh"] = pssh + + elif "playready" in scheme_id_uri or "9a04f079-9840-4286-ab92-e65be0885f95" in scheme_id_uri: + drm_info["drmSystem"] = "playready" + + if "@cenc:default_KID" in protection: + key_id = protection["@cenc:default_KID"].replace("-", "") + if "keyId" not in drm_info: + drm_info["keyId"] = key_id + + if "ms:laurl" in protection: + la_url = protection["ms:laurl"].get("@licenseUrl") + if la_url and "laUrl" not in drm_info: + drm_info["laUrl"] = la_url + + return drm_info + + +def parse_representation( + parsed_dict: dict, + representation: dict, + adaptation: dict, + source: str, + media_presentation_duration: str, + parse_segment_profile_id: Optional[str], +) -> Optional[dict]: + """ + Parses a representation and extracts profile information. + + Args: + parsed_dict (dict): The parsed MPD data. + representation (dict): The representation data. + adaptation (dict): The adaptation set data. + source (str): The source URL. + media_presentation_duration (str): The media presentation duration. + parse_segment_profile_id (str, optional): The profile ID to parse segments for. Defaults to None. + + Returns: + Optional[dict]: The parsed profile information or None if not applicable. + """ + mime_type = _get_key(adaptation, representation, "@mimeType") or ( + "video/mp4" if "avc" in representation["@codecs"] else "audio/mp4" + ) + if "video" not in mime_type and "audio" not in mime_type: + return None + + profile = { + "id": representation.get("@id") or adaptation.get("@id"), + "mimeType": mime_type, + "lang": representation.get("@lang") or adaptation.get("@lang"), + "codecs": representation.get("@codecs") or adaptation.get("@codecs"), + "bandwidth": int(representation.get("@bandwidth") or adaptation.get("@bandwidth")), + "startWithSAP": (_get_key(adaptation, representation, "@startWithSAP") or "1") == "1", + "mediaPresentationDuration": media_presentation_duration, + } + + if "audio" in profile["mimeType"]: + profile["audioSamplingRate"] = representation.get("@audioSamplingRate") or adaptation.get("@audioSamplingRate") + profile["channels"] = representation.get("AudioChannelConfiguration", {}).get("@value", "2") + else: + profile["width"] = int(representation["@width"]) + profile["height"] = int(representation["@height"]) + frame_rate = representation.get("@frameRate") or adaptation.get("@maxFrameRate") or "30000/1001" + frame_rate = frame_rate if "/" in frame_rate else f"{frame_rate}/1" + profile["frameRate"] = round(int(frame_rate.split("/")[0]) / int(frame_rate.split("/")[1]), 3) + profile["sar"] = representation.get("@sar", "1:1") + + if parse_segment_profile_id is None or profile["id"] != parse_segment_profile_id: + return profile + + item = adaptation.get("SegmentTemplate") or representation.get("SegmentTemplate") + if item: + profile["segments"] = parse_segment_template(parsed_dict, item, profile, source) + else: + profile["segments"] = parse_segment_base(representation, source) + + return profile + + +def _get_key(adaptation: dict, representation: dict, key: str) -> Optional[str]: + """ + Retrieves a key from the representation or adaptation set. + + Args: + adaptation (dict): The adaptation set data. + representation (dict): The representation data. + key (str): The key to retrieve. + + Returns: + Optional[str]: The value of the key or None if not found. + """ + return representation.get(key, adaptation.get(key, None)) + + +def parse_segment_template(parsed_dict: dict, item: dict, profile: dict, source: str) -> List[Dict]: + """ + Parses a segment template and extracts segment information. + + Args: + parsed_dict (dict): The parsed MPD data. + item (dict): The segment template data. + profile (dict): The profile information. + source (str): The source URL. + + Returns: + List[Dict]: The list of parsed segments. + """ + segments = [] + timescale = int(item.get("@timescale", 1)) + + # Initialization + if "@initialization" in item: + media = item["@initialization"] + media = media.replace("$RepresentationID$", profile["id"]) + media = media.replace("$Bandwidth$", str(profile["bandwidth"])) + if not media.startswith("http"): + media = f"{source}/{media}" + profile["initUrl"] = media + + # Segments + if "SegmentTimeline" in item: + segments.extend(parse_segment_timeline(parsed_dict, item, profile, source, timescale)) + elif "@duration" in item: + segments.extend(parse_segment_duration(parsed_dict, item, profile, source, timescale)) + + return segments + + +def parse_segment_timeline(parsed_dict: dict, item: dict, profile: dict, source: str, timescale: int) -> List[Dict]: + """ + Parses a segment timeline and extracts segment information. + + Args: + parsed_dict (dict): The parsed MPD data. + item (dict): The segment timeline data. + profile (dict): The profile information. + source (str): The source URL. + timescale (int): The timescale for the segments. + + Returns: + List[Dict]: The list of parsed segments. + """ + timelines = item["SegmentTimeline"]["S"] + timelines = timelines if isinstance(timelines, list) else [timelines] + period_start = parsed_dict["availabilityStartTime"] + timedelta(seconds=parsed_dict.get("PeriodStart", 0)) + presentation_time_offset = int(item.get("@presentationTimeOffset", 0)) + start_number = int(item.get("@startNumber", 1)) + + segments = [ + create_segment_data(timeline, item, profile, source, timescale) + for timeline in preprocess_timeline(timelines, start_number, period_start, presentation_time_offset, timescale) + ] + return segments + + +def preprocess_timeline( + timelines: List[Dict], start_number: int, period_start: datetime, presentation_time_offset: int, timescale: int +) -> List[Dict]: + """ + Preprocesses the segment timeline data. + + Args: + timelines (List[Dict]): The list of timeline segments. + start_number (int): The starting segment number. + period_start (datetime): The start time of the period. + presentation_time_offset (int): The presentation time offset. + timescale (int): The timescale for the segments. + + Returns: + List[Dict]: The list of preprocessed timeline segments. + """ + processed_data = [] + current_time = 0 + for timeline in timelines: + repeat = int(timeline.get("@r", 0)) + duration = int(timeline["@d"]) + start_time = int(timeline.get("@t", current_time)) + + for _ in range(repeat + 1): + segment_start_time = period_start + timedelta(seconds=(start_time - presentation_time_offset) / timescale) + segment_end_time = segment_start_time + timedelta(seconds=duration / timescale) + processed_data.append( + { + "number": start_number, + "start_time": segment_start_time, + "end_time": segment_end_time, + "duration": duration, + "time": start_time, + } + ) + start_time += duration + start_number += 1 + + current_time = start_time + + return processed_data + + +def parse_segment_duration(parsed_dict: dict, item: dict, profile: dict, source: str, timescale: int) -> List[Dict]: + """ + Parses segment duration and extracts segment information. + This is used for static or live MPD manifests. + + Args: + parsed_dict (dict): The parsed MPD data. + item (dict): The segment duration data. + profile (dict): The profile information. + source (str): The source URL. + timescale (int): The timescale for the segments. + + Returns: + List[Dict]: The list of parsed segments. + """ + duration = int(item["@duration"]) + start_number = int(item.get("@startNumber", 1)) + segment_duration_sec = duration / timescale + + if parsed_dict["isLive"]: + segments = generate_live_segments(parsed_dict, segment_duration_sec, start_number) + else: + segments = generate_vod_segments(profile, duration, timescale, start_number) + + return [create_segment_data(seg, item, profile, source, timescale) for seg in segments] + + +def generate_live_segments(parsed_dict: dict, segment_duration_sec: float, start_number: int) -> List[Dict]: + """ + Generates live segments based on the segment duration and start number. + This is used for live MPD manifests. + + Args: + parsed_dict (dict): The parsed MPD data. + segment_duration_sec (float): The segment duration in seconds. + start_number (int): The starting segment number. + + Returns: + List[Dict]: The list of generated live segments. + """ + time_shift_buffer_depth = timedelta(seconds=parsed_dict.get("timeShiftBufferDepth", 60)) + segment_count = math.ceil(time_shift_buffer_depth.total_seconds() / segment_duration_sec) + current_time = datetime.now(tz=timezone.utc) + earliest_segment_number = max( + start_number + + math.floor((current_time - parsed_dict["availabilityStartTime"]).total_seconds() / segment_duration_sec) + - segment_count, + start_number, + ) + + return [ + { + "number": number, + "start_time": parsed_dict["availabilityStartTime"] + + timedelta(seconds=(number - start_number) * segment_duration_sec), + "duration": segment_duration_sec, + } + for number in range(earliest_segment_number, earliest_segment_number + segment_count) + ] + + +def generate_vod_segments(profile: dict, duration: int, timescale: int, start_number: int) -> List[Dict]: + """ + Generates VOD segments based on the segment duration and start number. + This is used for static MPD manifests. + + Args: + profile (dict): The profile information. + duration (int): The segment duration. + timescale (int): The timescale for the segments. + start_number (int): The starting segment number. + + Returns: + List[Dict]: The list of generated VOD segments. + """ + total_duration = profile.get("mediaPresentationDuration") or 0 + if isinstance(total_duration, str): + total_duration = parse_duration(total_duration) + segment_count = math.ceil(total_duration * timescale / duration) + + return [{"number": start_number + i, "duration": duration / timescale} for i in range(segment_count)] + + +def create_segment_data(segment: Dict, item: dict, profile: dict, source: str, timescale: Optional[int] = None) -> Dict: + """ + Creates segment data based on the segment information. This includes the segment URL and metadata. + + Args: + segment (Dict): The segment information. + item (dict): The segment template data. + profile (dict): The profile information. + source (str): The source URL. + timescale (int, optional): The timescale for the segments. Defaults to None. + + Returns: + Dict: The created segment data. + """ + media_template = item["@media"] + media = media_template.replace("$RepresentationID$", profile["id"]) + media = media.replace("$Number%04d$", f"{segment['number']:04d}") + media = media.replace("$Number$", str(segment["number"])) + media = media.replace("$Bandwidth$", str(profile["bandwidth"])) + + if "time" in segment and timescale is not None: + media = media.replace("$Time$", str(int(segment["time"] * timescale))) + + if not media.startswith("http"): + media = f"{source}/{media}" + + segment_data = { + "type": "segment", + "media": media, + "number": segment["number"], + } + + if "start_time" in segment and "end_time" in segment: + segment_data.update( + { + "start_time": segment["start_time"], + "end_time": segment["end_time"], + "extinf": (segment["end_time"] - segment["start_time"]).total_seconds(), + "program_date_time": segment["start_time"].isoformat() + "Z", + } + ) + elif "start_time" in segment and "duration" in segment: + duration = segment["duration"] + segment_data.update( + { + "start_time": segment["start_time"], + "end_time": segment["start_time"] + timedelta(seconds=duration), + "extinf": duration, + "program_date_time": segment["start_time"].isoformat() + "Z", + } + ) + elif "duration" in segment: + segment_data["extinf"] = segment["duration"] + + return segment_data + + +def parse_segment_base(representation: dict, source: str) -> List[Dict]: + """ + Parses segment base information and extracts segment data. This is used for single-segment representations. + + Args: + representation (dict): The representation data. + source (str): The source URL. + + Returns: + List[Dict]: The list of parsed segments. + """ + segment = representation["SegmentBase"] + start, end = map(int, segment["@indexRange"].split("-")) + if "Initialization" in segment: + start, _ = map(int, segment["Initialization"]["@range"].split("-")) + + return [ + { + "type": "segment", + "range": f"{start}-{end}", + "media": f"{source}/{representation['BaseURL']}", + } + ] + + +def parse_duration(duration_str: str) -> float: + """ + Parses a duration ISO 8601 string into seconds. + + Args: + duration_str (str): The duration string to parse. + + Returns: + float: The parsed duration in seconds. + """ + pattern = re.compile(r"P(?:(\d+)Y)?(?:(\d+)M)?(?:(\d+)D)?T?(?:(\d+)H)?(?:(\d+)M)?(?:(\d+(?:\.\d+)?)S)?") + match = pattern.match(duration_str) + if not match: + raise ValueError(f"Invalid duration format: {duration_str}") + + years, months, days, hours, minutes, seconds = [float(g) if g else 0 for g in match.groups()] + return years * 365 * 24 * 3600 + months * 30 * 24 * 3600 + days * 24 * 3600 + hours * 3600 + minutes * 60 + seconds diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..2376853 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,19 @@ +bs4 +dateparser +python-dotenv +fastapi +uvicorn +tzdata +lxml +curl_cffi +fake-headers +pydantic_settings +httpx +Crypto +pycryptodome +tenacity +xmltodict +starlette +cachetools +tqdm +aiofiles diff --git a/run.py b/run.py new file mode 100644 index 0000000..e7970cb --- /dev/null +++ b/run.py @@ -0,0 +1,18 @@ +from fastapi import FastAPI +from mediaflow_proxy.main import app as mediaflow_app # Import mediaflow app +import httpx +import re +import string + +# Initialize the main FastAPI application +main_app = FastAPI() + +# Manually add only non-static routes from mediaflow_app +for route in mediaflow_app.routes: + if route.path != "/": # Exclude the static file path + main_app.router.routes.append(route) + +# Run the main app +if __name__ == "__main__": + import uvicorn + uvicorn.run(main_app, host="0.0.0.0", port=8080)