Merge pull request #135 from luigi311/dev

Dev
This commit is contained in:
Luigi311
2024-01-06 04:45:19 -07:00
committed by GitHub
12 changed files with 637 additions and 267 deletions

View File

@@ -1,5 +1,6 @@
name: CI name: CI
on: on:
workflow_dispatch:
push: push:
paths-ignore: paths-ignore:
- .gitignore - .gitignore
@@ -44,10 +45,16 @@ jobs:
sudo chown -R $PUID:$PGID JellyPlex-Watched-CI sudo chown -R $PUID:$PGID JellyPlex-Watched-CI
docker pull lscr.io/linuxserver/plex &
docker pull lscr.io/linuxserver/jellyfin &
wait
docker-compose -f JellyPlex-Watched-CI/plex/docker-compose.yml up -d docker-compose -f JellyPlex-Watched-CI/plex/docker-compose.yml up -d
docker-compose -f JellyPlex-Watched-CI/jellyfin/docker-compose.yml up -d docker-compose -f JellyPlex-Watched-CI/jellyfin/docker-compose.yml up -d
# Wait for containers to start # Wait for containers to start
sleep 15 sleep 5
docker-compose -f JellyPlex-Watched-CI/plex/docker-compose.yml logs docker-compose -f JellyPlex-Watched-CI/plex/docker-compose.yml logs
docker-compose -f JellyPlex-Watched-CI/jellyfin/docker-compose.yml logs docker-compose -f JellyPlex-Watched-CI/jellyfin/docker-compose.yml logs
@@ -59,6 +66,7 @@ jobs:
python main.py python main.py
cat mark.log cat mark.log
python test/validate_ci_marklog.py
docker: docker:
runs-on: ubuntu-latest runs-on: ubuntu-latest

11
.vscode/launch.json vendored
View File

@@ -11,6 +11,17 @@
"program": "main.py", "program": "main.py",
"console": "integratedTerminal", "console": "integratedTerminal",
"justMyCode": true "justMyCode": true
},
{
"name": "Pytest",
"type": "python",
"request": "launch",
"module": "pytest",
"args": [
"-vv"
],
"console": "integratedTerminal",
"justMyCode": true
} }
] ]
} }

View File

@@ -33,7 +33,8 @@ ENV BLACKLIST_USERS ''
ENV WHITELIST_USERS '' ENV WHITELIST_USERS ''
RUN addgroup --system jellyplex_user && \ RUN apk add --no-cache tini && \
addgroup --system jellyplex_user && \
adduser --system --no-create-home jellyplex_user --ingroup jellyplex_user && \ adduser --system --no-create-home jellyplex_user --ingroup jellyplex_user && \
mkdir -p /app && \ mkdir -p /app && \
chown -R jellyplex_user:jellyplex_user /app chown -R jellyplex_user:jellyplex_user /app
@@ -48,4 +49,5 @@ COPY --chown=jellyplex_user:jellyplex_user . .
USER jellyplex_user USER jellyplex_user
ENTRYPOINT ["/sbin/tini", "--"]
CMD ["python", "-u", "main.py"] CMD ["python", "-u", "main.py"]

View File

@@ -33,7 +33,11 @@ ENV BLACKLIST_USERS ''
ENV WHITELIST_USERS '' ENV WHITELIST_USERS ''
RUN addgroup --system jellyplex_user && \ RUN apt-get update && \
apt-get install tini --yes --no-install-recommends && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* && \
addgroup --system jellyplex_user && \
adduser --system --no-create-home jellyplex_user --ingroup jellyplex_user && \ adduser --system --no-create-home jellyplex_user --ingroup jellyplex_user && \
mkdir -p /app && \ mkdir -p /app && \
chown -R jellyplex_user:jellyplex_user /app chown -R jellyplex_user:jellyplex_user /app
@@ -48,4 +52,5 @@ COPY --chown=jellyplex_user:jellyplex_user . .
USER jellyplex_user USER jellyplex_user
ENTRYPOINT ["/bin/tini", "--"]
CMD ["python", "-u", "main.py"] CMD ["python", "-u", "main.py"]

View File

@@ -1,9 +1,9 @@
import sys import sys
if __name__ == "__main__": if __name__ == "__main__":
# Check python version 3.6 or higher # Check python version 3.9 or higher
if not (3, 6) <= tuple(map(int, sys.version_info[:2])): if not (3, 9) <= tuple(map(int, sys.version_info[:2])):
print("This script requires Python 3.6 or higher") print("This script requires Python 3.9 or higher")
sys.exit(1) sys.exit(1)
from src.main import main from src.main import main

View File

@@ -64,9 +64,16 @@ def str_to_bool(value: any) -> bool:
# Search for nested element in list # Search for nested element in list
def contains_nested(element, lst): def contains_nested(element, lst):
if lst is None:
return None
for i, item in enumerate(lst): for i, item in enumerate(lst):
if item is None:
continue
if element in item: if element in item:
return i return i
elif element == item:
return i
return None return None
@@ -92,6 +99,13 @@ def future_thread_executor(args: list, threads: int = 32):
workers = min(int(os.getenv("MAX_THREADS", 32)), os.cpu_count() * 2, threads) workers = min(int(os.getenv("MAX_THREADS", 32)), os.cpu_count() * 2, threads)
# If only one worker, run in main thread to avoid overhead
if workers == 1:
results = []
for arg in args:
results.append(arg[0](*arg[1:]))
return results
with ThreadPoolExecutor(max_workers=workers) as executor: with ThreadPoolExecutor(max_workers=workers) as executor:
for arg in args: for arg in args:
# * arg unpacks the list into actual arguments # * arg unpacks the list into actual arguments

View File

@@ -158,7 +158,7 @@ def show_title_dict(user_list: dict):
return show_output_dict return show_output_dict
except Exception: except Exception:
logger("Generating show_output_dict failed, skipping", 1) logger("Skipping show_output_dict ", 1)
return {} return {}
@@ -168,12 +168,28 @@ def episode_title_dict(user_list: dict):
episode_output_dict["completed"] = [] episode_output_dict["completed"] = []
episode_output_dict["time"] = [] episode_output_dict["time"] = []
episode_output_dict["locations"] = [] episode_output_dict["locations"] = []
episode_output_dict["show"] = []
episode_output_dict["season"] = []
episode_counter = 0 # Initialize a counter for the current episode position episode_counter = 0 # Initialize a counter for the current episode position
# Iterate through the shows, seasons, and episodes in user_list # Iterate through the shows, seasons, and episodes in user_list
for show in user_list: for show in user_list:
for season in user_list[show]: for season in user_list[show]:
for episode in user_list[show][season]: for episode in user_list[show][season]:
# Add the show title to the episode_output_dict if it doesn't exist
if "show" not in episode_output_dict:
episode_output_dict["show"] = [None] * episode_counter
# Add the season number to the episode_output_dict if it doesn't exist
if "season" not in episode_output_dict:
episode_output_dict["season"] = [None] * episode_counter
# Add the show title to the episode_output_dict
episode_output_dict["show"].append(dict(show))
# Add the season number to the episode_output_dict
episode_output_dict["season"].append(season)
# Iterate through the keys and values in each episode # Iterate through the keys and values in each episode
for episode_key, episode_value in episode.items(): for episode_key, episode_value in episode.items():
# If the key is not "status", add the key to episode_output_dict if it doesn't exist # If the key is not "status", add the key to episode_output_dict if it doesn't exist
@@ -213,7 +229,7 @@ def episode_title_dict(user_list: dict):
return episode_output_dict return episode_output_dict
except Exception: except Exception:
logger("Generating episode_output_dict failed, skipping", 1) logger("Skipping episode_output_dict", 1)
return {} return {}
@@ -246,7 +262,7 @@ def movies_title_dict(user_list: dict):
return movies_output_dict return movies_output_dict
except Exception: except Exception:
logger("Generating movies_output_dict failed, skipping", 1) logger("Skipping movies_output_dict failed", 1)
return {} return {}

View File

@@ -1,7 +1,14 @@
import re, requests, traceback import re, requests, os, traceback
from typing import Dict, Union, FrozenSet
import operator
from itertools import groupby as itertools_groupby
from urllib3.poolmanager import PoolManager from urllib3.poolmanager import PoolManager
from math import floor from math import floor
from requests.adapters import HTTPAdapter as RequestsHTTPAdapter
from plexapi.video import Episode, Movie
from plexapi.server import PlexServer from plexapi.server import PlexServer
from plexapi.myplex import MyPlexAccount from plexapi.myplex import MyPlexAccount
@@ -19,7 +26,7 @@ from src.library import (
# Bypass hostname validation for ssl. Taken from https://github.com/pkkid/python-plexapi/issues/143#issuecomment-775485186 # Bypass hostname validation for ssl. Taken from https://github.com/pkkid/python-plexapi/issues/143#issuecomment-775485186
class HostNameIgnoringAdapter(requests.adapters.HTTPAdapter): class HostNameIgnoringAdapter(RequestsHTTPAdapter):
def init_poolmanager(self, connections, maxsize, block=..., **pool_kwargs): def init_poolmanager(self, connections, maxsize, block=..., **pool_kwargs):
self.poolmanager = PoolManager( self.poolmanager = PoolManager(
num_pools=connections, num_pools=connections,
@@ -30,107 +37,77 @@ class HostNameIgnoringAdapter(requests.adapters.HTTPAdapter):
) )
def get_movie_guids(video, completed=True): def extract_guids_from_item(item: Union[Movie, Episode]) -> Dict[str, str]:
logger(f"Plex: {video.title} {video.guids} {video.locations}", 3) guids: Dict[str, str] = dict(
guid.id.split("://")
for guid in item.guids
if guid.id is not None and len(guid.id.strip()) > 0
)
movie_guids = {} if len(guids) == 0:
try:
for guid in video.guids:
# Extract source and id from guid.id
m = re.match(r"(.*)://(.*)", guid.id)
guid_source, guid_id = m.group(1).lower(), m.group(2)
movie_guids[guid_source] = guid_id
except Exception:
logger(f"Plex: Failed to get guids for {video.title}, Using location only", 1)
movie_guids["title"] = video.title
movie_guids["locations"] = tuple([x.split("/")[-1] for x in video.locations])
movie_guids["status"] = {
"completed": completed,
"time": video.viewOffset,
}
return movie_guids
def get_episode_guids(episode, show, completed=True):
episode_guids_temp = {}
try:
for guid in episode.guids:
# Extract after :// from guid.id
m = re.match(r"(.*)://(.*)", guid.id)
guid_source, guid_id = m.group(1).lower(), m.group(2)
episode_guids_temp[guid_source] = guid_id
except Exception:
logger( logger(
f"Plex: Failed to get guids for {episode.title} in {show.title}, Using location only", f"Plex: Failed to get any guids for {item.title}, Using location only",
1, 1,
) )
episode_guids_temp["title"] = episode.title return guids
episode_guids_temp["locations"] = tuple(
[x.split("/")[-1] for x in episode.locations]
)
episode_guids_temp["status"] = {
"completed": completed,
"time": episode.viewOffset,
}
return episode_guids_temp def get_guids(item: Union[Movie, Episode], completed=True):
return {
"title": item.title,
"locations": tuple([location.split("/")[-1] for location in item.locations]),
"status": {
"completed": completed,
"time": item.viewOffset,
},
} | extract_guids_from_item(
item
) # Merge the metadata and guid dictionaries
def get_user_library_watched_show(show): def get_user_library_watched_show(show):
try: try:
show_guids = {} show_guids: FrozenSet = frozenset(
try: (
for show_guid in show.guids: {
# Extract source and id from guid.id "title": show.title,
m = re.match(r"(.*)://(.*)", show_guid.id) "locations": tuple(
show_guid_source, show_guid_id = m.group(1).lower(), m.group(2) [location.split("/")[-1] for location in show.locations]
show_guids[show_guid_source] = show_guid_id ),
except Exception: }
logger( | extract_guids_from_item(show)
f"Plex: Failed to get guids for {show.title}, Using location only", 1 ).items() # Merge the metadata and guid dictionaries
)
watched_episodes = show.watched()
episode_guids = {
# Offset group data because the first value will be the key
season: [episode[1] for episode in episodes]
for season, episodes
# Group episodes by first element of tuple (episode.parentIndex)
in itertools_groupby(
[
(
episode.parentIndex,
get_guids(episode, completed=episode in watched_episodes),
)
for episode in show.episodes()
# Only include watched or partially-watched more than a minute episodes
if episode in watched_episodes or episode.viewOffset >= 60000
],
operator.itemgetter(0),
) )
}
show_guids["title"] = show.title
show_guids["locations"] = tuple([x.split("/")[-1] for x in show.locations])
show_guids = frozenset(show_guids.items())
# Get all watched episodes for show
episode_guids = {}
watched = show.watched()
for episode in show.episodes():
if episode in watched:
if episode.parentIndex not in episode_guids:
episode_guids[episode.parentIndex] = []
episode_guids[episode.parentIndex].append(
get_episode_guids(episode, show, completed=True)
)
elif episode.viewOffset > 0:
if episode.parentIndex not in episode_guids:
episode_guids[episode.parentIndex] = []
episode_guids[episode.parentIndex].append(
get_episode_guids(episode, show, completed=False)
)
return show_guids, episode_guids return show_guids, episode_guids
except Exception: except Exception:
return {}, {} return {}, {}
def get_user_library_watched(user, user_plex, library): def get_user_library_watched(user, user_plex, library):
user_name: str = user.title.lower()
try: try:
user_name = user.username.lower() if user.username else user.title.lower()
user_watched = {}
user_watched[user_name] = {}
logger( logger(
f"Plex: Generating watched for {user_name} in library {library.title}", f"Plex: Generating watched for {user_name} in library {library.title}",
0, 0,
@@ -139,58 +116,49 @@ def get_user_library_watched(user, user_plex, library):
library_videos = user_plex.library.section(library.title) library_videos = user_plex.library.section(library.title)
if library.type == "movie": if library.type == "movie":
user_watched[user_name][library.title] = [] watched = []
# Get all watched movies args = [
for video in library_videos.search(unwatched=False): [get_guids, video, True]
logger(f"Plex: Adding {video.title} to {user_name} watched list", 3) for video
# Get all watched movies
movie_guids = get_movie_guids(video, completed=True) in library_videos.search(unwatched=False)
] + [
user_watched[user_name][library.title].append(movie_guids) [get_guids, video, False]
for video
# Get all partially watched movies greater than 1 minute # Get all partially watched movies
for video in library_videos.search(inProgress=True): in library_videos.search(inProgress=True)
if video.viewOffset < 60000: # Only include partially-watched movies more than a minute
continue if video.viewOffset >= 60000
]
logger(f"Plex: Adding {video.title} to {user_name} watched list", 3)
movie_guids = get_movie_guids(video, completed=False)
user_watched[user_name][library.title].append(movie_guids)
for guid in future_thread_executor(args, threads=min(os.cpu_count(), 4)):
logger(f"Plex: Adding {guid['title']} to {user_name} watched list", 3)
watched.append(guid)
elif library.type == "show": elif library.type == "show":
user_watched[user_name][library.title] = {} watched = {}
# Parallelize show processing # Get all watched shows and partially watched shows
args = [] args = [
(get_user_library_watched_show, show)
# Get all watched shows for show in library_videos.search(unwatched=False)
for show in library_videos.search(unwatched=False): + library_videos.search(inProgress=True)
args.append([get_user_library_watched_show, show]) ]
# Get all partially watched shows
for show in library_videos.search(inProgress=True):
args.append([get_user_library_watched_show, show])
for show_guids, episode_guids in future_thread_executor(args, threads=4): for show_guids, episode_guids in future_thread_executor(args, threads=4):
if show_guids and episode_guids: if show_guids and episode_guids:
# append show, season, episode watched[show_guids] = episode_guids
if show_guids not in user_watched[user_name][library.title]:
user_watched[user_name][library.title][show_guids] = {}
user_watched[user_name][library.title][show_guids] = episode_guids
logger( logger(
f"Plex: Added {episode_guids} to {user_name} {show_guids} watched list", f"Plex: Added {episode_guids} to {user_name} {show_guids} watched list",
3, 3,
) )
else:
watched = None
logger(f"Plex: Got watched for {user_name} in library {library.title}", 1) logger(f"Plex: Got watched for {user_name} in library {library.title}", 1)
if library.title in user_watched[user_name]: logger(f"Plex: {watched}", 3)
logger(f"Plex: {user_watched[user_name][library.title]}", 3)
return user_watched return {user_name: {library.title: watched} if watched is not None else {}}
except Exception as e: except Exception as e:
logger( logger(
f"Plex: Failed to get watched for {user_name} in library {library.title}, Error: {e}", f"Plex: Failed to get watched for {user_name} in library {library.title}, Error: {e}",

View File

@@ -6,11 +6,17 @@ from src.library import generate_library_guids_dict
def combine_watched_dicts(dicts: list): def combine_watched_dicts(dicts: list):
# Ensure that the input is a list of dictionaries
if not all(isinstance(d, dict) for d in dicts):
raise ValueError("Input must be a list of dictionaries")
combined_dict = {} combined_dict = {}
for single_dict in dicts: for single_dict in dicts:
for key, value in single_dict.items(): for key, value in single_dict.items():
if key not in combined_dict: if key not in combined_dict:
combined_dict[key] = {} combined_dict[key] = {}
for subkey, subvalue in value.items(): for subkey, subvalue in value.items():
if subkey in combined_dict[key]: if subkey in combined_dict[key]:
# If the subkey already exists in the combined dictionary, # If the subkey already exists in the combined dictionary,
@@ -117,11 +123,18 @@ def cleanup_watched(
show_key_dict = dict(show_key_1) show_key_dict = dict(show_key_1)
for season in watched_list_1[user_1][library_1][show_key_1]: for season in watched_list_1[user_1][library_1][show_key_1]:
# Filter the episode_watched_list_2_keys_dict dictionary to handle cases
# where episode location names are not unique such as S01E01.mkv
filtered_episode_watched_list_2_keys_dict = (
filter_episode_watched_list_2_keys_dict(
episode_watched_list_2_keys_dict, show_key_dict, season
)
)
for episode in watched_list_1[user_1][library_1][show_key_1][ for episode in watched_list_1[user_1][library_1][show_key_1][
season season
]: ]:
episode_index = get_episode_index_in_dict( episode_index = get_episode_index_in_dict(
episode, episode_watched_list_2_keys_dict episode, filtered_episode_watched_list_2_keys_dict
) )
if episode_index is not None: if episode_index is not None:
if check_remove_entry( if check_remove_entry(
@@ -217,6 +230,59 @@ def get_movie_index_in_dict(movie, movies_watched_list_2_keys_dict):
return None return None
def filter_episode_watched_list_2_keys_dict(
episode_watched_list_2_keys_dict, show_key_dict, season
):
# Filter the episode_watched_list_2_keys_dict dictionary to only include values for the correct show and season
filtered_episode_watched_list_2_keys_dict = {}
show_indecies = []
season_indecies = []
# Iterate through episode_watched_list_2_keys_dict["season"] and find the indecies that match season
for season_index, season_value in enumerate(
episode_watched_list_2_keys_dict["season"]
):
if season_value == season:
season_indecies.append(season_index)
# Iterate through episode_watched_list_2_keys_dict["show"] and find the indecies that match show_key_dict
for show_index, show_value in enumerate(episode_watched_list_2_keys_dict["show"]):
# Iterate through the keys and values of the show_value dictionary and check if they match show_key_dict
for show_key, show_key_value in show_value.items():
if show_key == "locations":
# Iterate through the locations in the show_value dictionary
for location in show_key_value:
# If the location is in the episode_watched_list_2_keys_dict dictionary, return index of the key
if (
contains_nested(location, show_key_dict["locations"])
is not None
):
show_indecies.append(show_index)
break
else:
if show_key in show_key_dict.keys():
if show_key_value == show_key_dict[show_key]:
show_indecies.append(show_index)
break
# Find the intersection of the show_indecies and season_indecies lists
indecies = list(set(show_indecies) & set(season_indecies))
filtered_episode_watched_list_2_keys_dict = {}
# Create a copy of the dictionary with indecies that match the show and season and none that don't
for key, value in episode_watched_list_2_keys_dict.items():
if key not in filtered_episode_watched_list_2_keys_dict:
filtered_episode_watched_list_2_keys_dict[key] = []
for index, _ in enumerate(value):
if index in indecies:
filtered_episode_watched_list_2_keys_dict[key].append(value[index])
else:
filtered_episode_watched_list_2_keys_dict[key].append(None)
return filtered_episode_watched_list_2_keys_dict
def get_episode_index_in_dict(episode, episode_watched_list_2_keys_dict): def get_episode_index_in_dict(episode, episode_watched_list_2_keys_dict):
# Iterate through the keys and values of the episode dictionary # Iterate through the keys and values of the episode dictionary
for episode_key, episode_value in episode.items(): for episode_key, episode_value in episode.items():

View File

@@ -83,6 +83,16 @@ episode_titles = {
"tvdb": ["8444132"], "tvdb": ["8444132"],
"completed": [True], "completed": [True],
"time": [0], "time": [0],
"season": ["Season 1"],
"show": [
{
"imdb": "tt3581920",
"locations": ("The Last of Us",),
"title": "The Last of Us",
"tmdb": "100088",
"tvdb": "392256",
}
],
} }
movie_titles = { movie_titles = {
"imdb": ["tt2380307"], "imdb": ["tt2380307"],

View File

@@ -18,102 +18,225 @@ from src.watched import cleanup_watched, combine_watched_dicts
tv_shows_watched_list_1 = { tv_shows_watched_list_1 = {
frozenset( frozenset(
{ {
("tvdb", "75710"), ("locations", ("Doctor Who (2005) {tvdb-78804} {imdb-tt0436992}",)),
("title", "Criminal Minds"), ("imdb", "tt0436992"),
("imdb", "tt0452046"), ("tmdb", "57243"),
("locations", ("Criminal Minds",)), ("tvdb", "78804"),
("tmdb", "4057"), ("title", "Doctor Who (2005)"),
} }
): { ): {
"Season 1": [ 1: [
{ {
"imdb": "tt0550489", "imdb": "tt0563001",
"tmdb": "282843", "tmdb": "968589",
"tvdb": "176357", "tvdb": "295296",
"title": "Extreme Aggressor", "title": "The Unquiet Dead",
"locations": ( "locations": ("S01E03.mkv",),
"Criminal Minds S01E01 Extreme Aggressor WEBDL-720p.mkv",
),
"status": {"completed": True, "time": 0}, "status": {"completed": True, "time": 0},
}, },
{ {
"imdb": "tt0550487", "imdb": "tt0562985",
"tmdb": "282861", "tmdb": "968590",
"tvdb": "300385", "tvdb": "295297",
"title": "Compulsion", "title": "Aliens of London (1)",
"locations": ("Criminal Minds S01E02 Compulsion WEBDL-720p.mkv",), "locations": ("S01E04.mkv",),
"status": {"completed": False, "time": 240000},
},
{
"imdb": "tt0563003",
"tmdb": "968592",
"tvdb": "295298",
"title": "World War Three (2)",
"locations": ("S01E05.mkv",),
"status": {"completed": True, "time": 0}, "status": {"completed": True, "time": 0},
}, },
] ]
}, },
frozenset({("title", "Test"), ("locations", ("Test",))}): { frozenset(
"Season 1": [ {
("title", "Monarch: Legacy of Monsters"),
("imdb", "tt17220216"),
("tvdb", "422598"),
("tmdb", "202411"),
(
"locations",
("Monarch - Legacy of Monsters {tvdb-422598} {imdb-tt17220216}",),
),
}
): {
1: [
{ {
"title": "S01E01", "imdb": "tt21255044",
"locations": ("Test S01E01.mkv",), "tmdb": "4661246",
"tvdb": "10009418",
"title": "Secrets and Lies",
"locations": ("S01E03.mkv",),
"status": {"completed": True, "time": 0}, "status": {"completed": True, "time": 0},
}, },
{ {
"title": "S01E02", "imdb": "tt21255050",
"locations": ("Test S01E02.mkv",), "tmdb": "4712059",
"tvdb": "10009419",
"title": "Parallels and Interiors",
"locations": ("S01E04.mkv",),
"status": {"completed": False, "time": 240000},
},
{
"imdb": "tt23787572",
"tmdb": "4712061",
"tvdb": "10009420",
"title": "The Way Out",
"locations": ("S01E05.mkv",),
"status": {"completed": True, "time": 0},
},
]
},
frozenset(
{
("tmdb", "125928"),
("imdb", "tt14681924"),
("tvdb", "403172"),
(
"locations",
("My Adventures with Superman {tvdb-403172} {imdb-tt14681924}",),
),
("title", "My Adventures with Superman"),
}
): {
1: [
{
"imdb": "tt15699926",
"tmdb": "3070048",
"tvdb": "8438181",
"title": "Adventures of a Normal Man (1)",
"locations": ("S01E01.mkv",),
"status": {"completed": True, "time": 0}, "status": {"completed": True, "time": 0},
}, },
{ {
"title": "S01E04", "imdb": "tt20413322",
"locations": ("Test S01E04.mkv",), "tmdb": "4568681",
"status": {"completed": False, "time": 5}, "tvdb": "9829910",
"title": "Adventures of a Normal Man (2)",
"locations": ("S01E02.mkv",),
"status": {"completed": True, "time": 0},
},
{
"imdb": "tt20413328",
"tmdb": "4497012",
"tvdb": "9870382",
"title": "My Interview with Superman",
"locations": ("S01E03.mkv",),
"status": {"completed": True, "time": 0},
}, },
] ]
}, },
} }
tv_shows_watched_list_2 = { tv_shows_watched_list_2 = {
frozenset( frozenset(
{ {
("tvdb", "75710"), ("locations", ("Doctor Who (2005) {tvdb-78804} {imdb-tt0436992}",)),
("title", "Criminal Minds"), ("imdb", "tt0436992"),
("imdb", "tt0452046"), ("tmdb", "57243"),
("locations", ("Criminal Minds",)), ("title", "Doctor Who"),
("tmdb", "4057"), ("tvdb", "78804"),
("tvrage", "3332"),
} }
): { ): {
"Season 1": [ 1: [
{ {
"imdb": "tt0550487", "tvdb": "295294",
"tmdb": "282861", "imdb": "tt0562992",
"tvdb": "300385", "title": "Rose",
"title": "Compulsion", "locations": ("S01E01.mkv",),
"locations": ("Criminal Minds S01E02 Compulsion WEBDL-720p.mkv",),
"status": {"completed": True, "time": 0}, "status": {"completed": True, "time": 0},
}, },
{ {
"imdb": "tt0550498", "tvdb": "295295",
"tmdb": "282865", "imdb": "tt0562997",
"tvdb": "300474", "title": "The End of the World",
"title": "Won't Get Fooled Again", "locations": ("S01E02.mkv",),
"locations": ( "status": {"completed": False, "time": 300670},
"Criminal Minds S01E03 Won't Get Fooled Again WEBDL-720p.mkv", },
), {
"tvdb": "295298",
"imdb": "tt0563003",
"title": "World War Three (2)",
"locations": ("S01E05.mkv",),
"status": {"completed": True, "time": 0}, "status": {"completed": True, "time": 0},
}, },
] ]
}, },
frozenset({("title", "Test"), ("locations", ("Test",))}): { frozenset(
"Season 1": [ {
("title", "Monarch: Legacy of Monsters"),
("imdb", "tt17220216"),
("tvdb", "422598"),
("tmdb", "202411"),
(
"locations",
("Monarch - Legacy of Monsters {tvdb-422598} {imdb-tt17220216}",),
),
}
): {
1: [
{ {
"title": "S01E02", "tvdb": "9959300",
"locations": ("Test S01E02.mkv",), "imdb": "tt20412166",
"status": {"completed": False, "time": 10}, "title": "Aftermath",
}, "locations": ("S01E01.mkv",),
{
"title": "S01E03",
"locations": ("Test S01E03.mkv",),
"status": {"completed": True, "time": 0}, "status": {"completed": True, "time": 0},
}, },
{ {
"title": "S01E04", "tvdb": "10009417",
"locations": ("Test S01E04.mkv",), "imdb": "tt22866594",
"status": {"completed": False, "time": 10}, "title": "Departure",
"locations": ("S01E02.mkv",),
"status": {"completed": False, "time": 300741},
},
{
"tvdb": "10009420",
"imdb": "tt23787572",
"title": "The Way Out",
"locations": ("S01E05.mkv",),
"status": {"completed": True, "time": 0},
},
]
},
frozenset(
{
("tmdb", "125928"),
("imdb", "tt14681924"),
("tvdb", "403172"),
(
"locations",
("My Adventures with Superman {tvdb-403172} {imdb-tt14681924}",),
),
("title", "My Adventures with Superman"),
}
): {
1: [
{
"tvdb": "8438181",
"imdb": "tt15699926",
"title": "Adventures of a Normal Man (1)",
"locations": ("S01E01.mkv",),
"status": {"completed": True, "time": 0},
},
{
"tvdb": "9829910",
"imdb": "tt20413322",
"title": "Adventures of a Normal Man (2)",
"locations": ("S01E02.mkv",),
"status": {"completed": True, "time": 0},
},
{
"tvdb": "9870382",
"imdb": "tt20413328",
"title": "My Interview with Superman",
"locations": ("S01E03.mkv",),
"status": {"completed": True, "time": 0},
}, },
] ]
}, },
@@ -122,38 +245,61 @@ tv_shows_watched_list_2 = {
expected_tv_show_watched_list_1 = { expected_tv_show_watched_list_1 = {
frozenset( frozenset(
{ {
("tvdb", "75710"), ("locations", ("Doctor Who (2005) {tvdb-78804} {imdb-tt0436992}",)),
("title", "Criminal Minds"), ("imdb", "tt0436992"),
("imdb", "tt0452046"), ("tmdb", "57243"),
("locations", ("Criminal Minds",)), ("tvdb", "78804"),
("tmdb", "4057"), ("title", "Doctor Who (2005)"),
} }
): { ): {
"Season 1": [ 1: [
{ {
"imdb": "tt0550489", "imdb": "tt0563001",
"tmdb": "282843", "tmdb": "968589",
"tvdb": "176357", "tvdb": "295296",
"title": "Extreme Aggressor", "title": "The Unquiet Dead",
"locations": ( "locations": ("S01E03.mkv",),
"Criminal Minds S01E01 Extreme Aggressor WEBDL-720p.mkv",
),
"status": {"completed": True, "time": 0},
}
]
},
frozenset({("title", "Test"), ("locations", ("Test",))}): {
"Season 1": [
{
"title": "S01E01",
"locations": ("Test S01E01.mkv",),
"status": {"completed": True, "time": 0}, "status": {"completed": True, "time": 0},
}, },
{ {
"title": "S01E02", "imdb": "tt0562985",
"locations": ("Test S01E02.mkv",), "tmdb": "968590",
"tvdb": "295297",
"title": "Aliens of London (1)",
"locations": ("S01E04.mkv",),
"status": {"completed": False, "time": 240000},
},
]
},
frozenset(
{
("title", "Monarch: Legacy of Monsters"),
("imdb", "tt17220216"),
("tvdb", "422598"),
("tmdb", "202411"),
(
"locations",
("Monarch - Legacy of Monsters {tvdb-422598} {imdb-tt17220216}",),
),
}
): {
1: [
{
"imdb": "tt21255044",
"tmdb": "4661246",
"tvdb": "10009418",
"title": "Secrets and Lies",
"locations": ("S01E03.mkv",),
"status": {"completed": True, "time": 0}, "status": {"completed": True, "time": 0},
}, },
{
"imdb": "tt21255050",
"tmdb": "4712059",
"tvdb": "10009419",
"title": "Parallels and Interiors",
"locations": ("S01E04.mkv",),
"status": {"completed": False, "time": 240000},
},
] ]
}, },
} }
@@ -161,37 +307,57 @@ expected_tv_show_watched_list_1 = {
expected_tv_show_watched_list_2 = { expected_tv_show_watched_list_2 = {
frozenset( frozenset(
{ {
("tvdb", "75710"), ("locations", ("Doctor Who (2005) {tvdb-78804} {imdb-tt0436992}",)),
("title", "Criminal Minds"), ("imdb", "tt0436992"),
("imdb", "tt0452046"), ("tmdb", "57243"),
("locations", ("Criminal Minds",)), ("title", "Doctor Who"),
("tmdb", "4057"), ("tvdb", "78804"),
("tvrage", "3332"),
} }
): { ): {
"Season 1": [ 1: [
{ {
"imdb": "tt0550498", "tvdb": "295294",
"tmdb": "282865", "imdb": "tt0562992",
"tvdb": "300474", "title": "Rose",
"title": "Won't Get Fooled Again", "locations": ("S01E01.mkv",),
"locations": (
"Criminal Minds S01E03 Won't Get Fooled Again WEBDL-720p.mkv",
),
"status": {"completed": True, "time": 0},
}
]
},
frozenset({("title", "Test"), ("locations", ("Test",))}): {
"Season 1": [
{
"title": "S01E03",
"locations": ("Test S01E03.mkv",),
"status": {"completed": True, "time": 0}, "status": {"completed": True, "time": 0},
}, },
{ {
"title": "S01E04", "tvdb": "295295",
"locations": ("Test S01E04.mkv",), "imdb": "tt0562997",
"status": {"completed": False, "time": 10}, "title": "The End of the World",
"locations": ("S01E02.mkv",),
"status": {"completed": False, "time": 300670},
},
]
},
frozenset(
{
("title", "Monarch: Legacy of Monsters"),
("imdb", "tt17220216"),
("tvdb", "422598"),
("tmdb", "202411"),
(
"locations",
("Monarch - Legacy of Monsters {tvdb-422598} {imdb-tt17220216}",),
),
}
): {
1: [
{
"tvdb": "9959300",
"imdb": "tt20412166",
"title": "Aftermath",
"locations": ("S01E01.mkv",),
"status": {"completed": True, "time": 0},
},
{
"tvdb": "10009417",
"imdb": "tt22866594",
"title": "Departure",
"locations": ("S01E02.mkv",),
"status": {"completed": False, "time": 300741},
}, },
] ]
}, },
@@ -199,61 +365,92 @@ expected_tv_show_watched_list_2 = {
movies_watched_list_1 = [ movies_watched_list_1 = [
{ {
"imdb": "tt2380307", "imdb": "tt1254207",
"tmdb": "354912", "tmdb": "10378",
"title": "Coco", "tvdb": "12352",
"locations": ("Coco (2017) Remux-1080p.mkv",), "title": "Big Buck Bunny",
"locations": ("Big Buck Bunny.mkv",),
"status": {"completed": True, "time": 0}, "status": {"completed": True, "time": 0},
}, },
{ {
"tmdbcollection": "448150", "imdb": "tt16431870",
"imdb": "tt1431045", "tmdb": "1029575",
"tmdb": "293660", "tvdb": "351194",
"title": "Deadpool", "title": "The Family Plan",
"locations": ("Deadpool (2016) Remux-1080p.mkv",), "locations": ("The Family Plan (2023).mkv",),
"status": {"completed": True, "time": 0}, "status": {"completed": True, "time": 0},
}, },
{
"imdb": "tt5537002",
"tmdb": "466420",
"tvdb": "135852",
"title": "Killers of the Flower Moon",
"locations": ("Killers of the Flower Moon (2023).mkv",),
"status": {"completed": False, "time": 240000},
},
] ]
movies_watched_list_2 = [ movies_watched_list_2 = [
{ {
"imdb": "tt2380307", "imdb": "tt16431870",
"tmdb": "354912", "tmdb": "1029575",
"title": "Coco", "title": "The Family Plan",
"locations": ("Coco (2017) Remux-1080p.mkv",), "locations": ("The Family Plan (2023).mkv",),
"status": {"completed": True, "time": 0}, "status": {"completed": True, "time": 0},
}, },
{ {
"imdb": "tt0384793", "imdb": "tt4589218",
"tmdb": "9788", "tmdb": "507089",
"tvdb": "9103", "title": "Five Nights at Freddy's",
"title": "Accepted", "locations": ("Five Nights at Freddy's (2023).mkv",),
"locations": ("Accepted (2006) Remux-1080p.mkv",),
"status": {"completed": True, "time": 0}, "status": {"completed": True, "time": 0},
}, },
{
"imdb": "tt10545296",
"tmdb": "695721",
"tmdbcollection": "131635",
"title": "The Hunger Games: The Ballad of Songbirds & Snakes",
"locations": ("The Hunger Games The Ballad of Songbirds & Snakes (2023).mkv",),
"status": {"completed": False, "time": 301215},
},
] ]
expected_movie_watched_list_1 = [ expected_movie_watched_list_1 = [
{ {
"tmdbcollection": "448150", "imdb": "tt1254207",
"imdb": "tt1431045", "tmdb": "10378",
"tmdb": "293660", "tvdb": "12352",
"title": "Deadpool", "title": "Big Buck Bunny",
"locations": ("Deadpool (2016) Remux-1080p.mkv",), "locations": ("Big Buck Bunny.mkv",),
"status": {"completed": True, "time": 0}, "status": {"completed": True, "time": 0},
} },
{
"imdb": "tt5537002",
"tmdb": "466420",
"tvdb": "135852",
"title": "Killers of the Flower Moon",
"locations": ("Killers of the Flower Moon (2023).mkv",),
"status": {"completed": False, "time": 240000},
},
] ]
expected_movie_watched_list_2 = [ expected_movie_watched_list_2 = [
{ {
"imdb": "tt0384793", "imdb": "tt4589218",
"tmdb": "9788", "tmdb": "507089",
"tvdb": "9103", "title": "Five Nights at Freddy's",
"title": "Accepted", "locations": ("Five Nights at Freddy's (2023).mkv",),
"locations": ("Accepted (2006) Remux-1080p.mkv",),
"status": {"completed": True, "time": 0}, "status": {"completed": True, "time": 0},
} },
{
"imdb": "tt10545296",
"tmdb": "695721",
"tmdbcollection": "131635",
"title": "The Hunger Games: The Ballad of Songbirds & Snakes",
"locations": ("The Hunger Games The Ballad of Songbirds & Snakes (2023).mkv",),
"status": {"completed": False, "time": 301215},
},
] ]
# Test to see if objects get deleted all the way up to the root. # Test to see if objects get deleted all the way up to the root.

View File

@@ -0,0 +1,73 @@
# Check the mark.log file that is generated by the CI to make sure it contains the expected values
import os
def read_marklog():
marklog = os.path.join(os.getcwd(), "mark.log")
with open(marklog, "r") as f:
lines = f.readlines()
return lines
def check_marklog(lines, expected_values):
try:
# Check to make sure the marklog contains all the expected values and nothing else
found_values = []
for line in lines:
# Remove the newline character
line = line.strip()
if line not in expected_values:
raise Exception("Line not found in marklog: " + line)
found_values.append(line)
# Check to make sure the marklog contains the same number of values as the expected values
if len(found_values) != len(expected_values):
raise Exception(
"Marklog did not contain the same number of values as the expected values, found "
+ str(len(found_values))
+ " values, expected "
+ str(len(expected_values))
+ " values"
)
# Check that the two lists contain the same values
if sorted(found_values) != sorted(expected_values):
raise Exception(
"Marklog did not contain the same values as the expected values, found:\n"
+ "\n".join(sorted(found_values))
+ "\n\nExpected:\n"
+ "\n".join(sorted(expected_values))
)
return True
except Exception as e:
print(e)
return False
def main():
expected_values = [
"jellyplex_watched/Movies/Five Nights at Freddy's",
"jellyplex_watched/Movies/The Hunger Games: The Ballad of Songbirds & Snakes/301215",
"jellyplex_watched/TV Shows/Doctor Who (2005)/Rose",
"jellyplex_watched/TV Shows/Doctor Who (2005)/The End of the World/300670",
"jellyplex_watched/TV Shows/Monarch: Legacy of Monsters/Aftermath",
"jellyplex_watched/TV Shows/Monarch: Legacy of Monsters/Departure/300741",
"JellyUser/Movies/Big Buck Bunny",
"JellyUser/Shows/Doctor Who/The Unquiet Dead",
"JellyUser/Shows/Monarch: Legacy of Monsters/Secrets and Lies",
]
lines = read_marklog()
if not check_marklog(lines, expected_values):
print("Failed to validate marklog")
exit(1)
print("Successfully validated marklog")
exit(0)
if __name__ == "__main__":
main()