Merge pull request #135 from luigi311/dev

Dev
pull/169/head
Luigi311 2024-01-06 04:45:19 -07:00 committed by GitHub
commit 6ec003f899
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 637 additions and 267 deletions

View File

@ -1,5 +1,6 @@
name: CI
on:
workflow_dispatch:
push:
paths-ignore:
- .gitignore
@ -44,10 +45,16 @@ jobs:
sudo chown -R $PUID:$PGID JellyPlex-Watched-CI
docker pull lscr.io/linuxserver/plex &
docker pull lscr.io/linuxserver/jellyfin &
wait
docker-compose -f JellyPlex-Watched-CI/plex/docker-compose.yml up -d
docker-compose -f JellyPlex-Watched-CI/jellyfin/docker-compose.yml up -d
# Wait for containers to start
sleep 15
sleep 5
docker-compose -f JellyPlex-Watched-CI/plex/docker-compose.yml logs
docker-compose -f JellyPlex-Watched-CI/jellyfin/docker-compose.yml logs
@ -59,6 +66,7 @@ jobs:
python main.py
cat mark.log
python test/validate_ci_marklog.py
docker:
runs-on: ubuntu-latest

11
.vscode/launch.json vendored
View File

@ -11,6 +11,17 @@
"program": "main.py",
"console": "integratedTerminal",
"justMyCode": true
},
{
"name": "Pytest",
"type": "python",
"request": "launch",
"module": "pytest",
"args": [
"-vv"
],
"console": "integratedTerminal",
"justMyCode": true
}
]
}

View File

@ -33,7 +33,8 @@ ENV BLACKLIST_USERS ''
ENV WHITELIST_USERS ''
RUN addgroup --system jellyplex_user && \
RUN apk add --no-cache tini && \
addgroup --system jellyplex_user && \
adduser --system --no-create-home jellyplex_user --ingroup jellyplex_user && \
mkdir -p /app && \
chown -R jellyplex_user:jellyplex_user /app
@ -48,4 +49,5 @@ COPY --chown=jellyplex_user:jellyplex_user . .
USER jellyplex_user
ENTRYPOINT ["/sbin/tini", "--"]
CMD ["python", "-u", "main.py"]

View File

@ -33,7 +33,11 @@ ENV BLACKLIST_USERS ''
ENV WHITELIST_USERS ''
RUN addgroup --system jellyplex_user && \
RUN apt-get update && \
apt-get install tini --yes --no-install-recommends && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* && \
addgroup --system jellyplex_user && \
adduser --system --no-create-home jellyplex_user --ingroup jellyplex_user && \
mkdir -p /app && \
chown -R jellyplex_user:jellyplex_user /app
@ -48,4 +52,5 @@ COPY --chown=jellyplex_user:jellyplex_user . .
USER jellyplex_user
ENTRYPOINT ["/bin/tini", "--"]
CMD ["python", "-u", "main.py"]

View File

@ -1,9 +1,9 @@
import sys
if __name__ == "__main__":
# Check python version 3.6 or higher
if not (3, 6) <= tuple(map(int, sys.version_info[:2])):
print("This script requires Python 3.6 or higher")
# Check python version 3.9 or higher
if not (3, 9) <= tuple(map(int, sys.version_info[:2])):
print("This script requires Python 3.9 or higher")
sys.exit(1)
from src.main import main

View File

@ -64,9 +64,16 @@ def str_to_bool(value: any) -> bool:
# Search for nested element in list
def contains_nested(element, lst):
if lst is None:
return None
for i, item in enumerate(lst):
if item is None:
continue
if element in item:
return i
elif element == item:
return i
return None
@ -92,6 +99,13 @@ def future_thread_executor(args: list, threads: int = 32):
workers = min(int(os.getenv("MAX_THREADS", 32)), os.cpu_count() * 2, threads)
# If only one worker, run in main thread to avoid overhead
if workers == 1:
results = []
for arg in args:
results.append(arg[0](*arg[1:]))
return results
with ThreadPoolExecutor(max_workers=workers) as executor:
for arg in args:
# * arg unpacks the list into actual arguments

View File

@ -158,7 +158,7 @@ def show_title_dict(user_list: dict):
return show_output_dict
except Exception:
logger("Generating show_output_dict failed, skipping", 1)
logger("Skipping show_output_dict ", 1)
return {}
@ -168,12 +168,28 @@ def episode_title_dict(user_list: dict):
episode_output_dict["completed"] = []
episode_output_dict["time"] = []
episode_output_dict["locations"] = []
episode_output_dict["show"] = []
episode_output_dict["season"] = []
episode_counter = 0 # Initialize a counter for the current episode position
# Iterate through the shows, seasons, and episodes in user_list
for show in user_list:
for season in user_list[show]:
for episode in user_list[show][season]:
# Add the show title to the episode_output_dict if it doesn't exist
if "show" not in episode_output_dict:
episode_output_dict["show"] = [None] * episode_counter
# Add the season number to the episode_output_dict if it doesn't exist
if "season" not in episode_output_dict:
episode_output_dict["season"] = [None] * episode_counter
# Add the show title to the episode_output_dict
episode_output_dict["show"].append(dict(show))
# Add the season number to the episode_output_dict
episode_output_dict["season"].append(season)
# Iterate through the keys and values in each episode
for episode_key, episode_value in episode.items():
# If the key is not "status", add the key to episode_output_dict if it doesn't exist
@ -213,7 +229,7 @@ def episode_title_dict(user_list: dict):
return episode_output_dict
except Exception:
logger("Generating episode_output_dict failed, skipping", 1)
logger("Skipping episode_output_dict", 1)
return {}
@ -246,7 +262,7 @@ def movies_title_dict(user_list: dict):
return movies_output_dict
except Exception:
logger("Generating movies_output_dict failed, skipping", 1)
logger("Skipping movies_output_dict failed", 1)
return {}

View File

@ -1,7 +1,14 @@
import re, requests, traceback
import re, requests, os, traceback
from typing import Dict, Union, FrozenSet
import operator
from itertools import groupby as itertools_groupby
from urllib3.poolmanager import PoolManager
from math import floor
from requests.adapters import HTTPAdapter as RequestsHTTPAdapter
from plexapi.video import Episode, Movie
from plexapi.server import PlexServer
from plexapi.myplex import MyPlexAccount
@ -19,7 +26,7 @@ from src.library import (
# Bypass hostname validation for ssl. Taken from https://github.com/pkkid/python-plexapi/issues/143#issuecomment-775485186
class HostNameIgnoringAdapter(requests.adapters.HTTPAdapter):
class HostNameIgnoringAdapter(RequestsHTTPAdapter):
def init_poolmanager(self, connections, maxsize, block=..., **pool_kwargs):
self.poolmanager = PoolManager(
num_pools=connections,
@ -30,107 +37,77 @@ class HostNameIgnoringAdapter(requests.adapters.HTTPAdapter):
)
def get_movie_guids(video, completed=True):
logger(f"Plex: {video.title} {video.guids} {video.locations}", 3)
def extract_guids_from_item(item: Union[Movie, Episode]) -> Dict[str, str]:
guids: Dict[str, str] = dict(
guid.id.split("://")
for guid in item.guids
if guid.id is not None and len(guid.id.strip()) > 0
)
movie_guids = {}
try:
for guid in video.guids:
# Extract source and id from guid.id
m = re.match(r"(.*)://(.*)", guid.id)
guid_source, guid_id = m.group(1).lower(), m.group(2)
movie_guids[guid_source] = guid_id
except Exception:
logger(f"Plex: Failed to get guids for {video.title}, Using location only", 1)
movie_guids["title"] = video.title
movie_guids["locations"] = tuple([x.split("/")[-1] for x in video.locations])
movie_guids["status"] = {
"completed": completed,
"time": video.viewOffset,
}
return movie_guids
def get_episode_guids(episode, show, completed=True):
episode_guids_temp = {}
try:
for guid in episode.guids:
# Extract after :// from guid.id
m = re.match(r"(.*)://(.*)", guid.id)
guid_source, guid_id = m.group(1).lower(), m.group(2)
episode_guids_temp[guid_source] = guid_id
except Exception:
if len(guids) == 0:
logger(
f"Plex: Failed to get guids for {episode.title} in {show.title}, Using location only",
f"Plex: Failed to get any guids for {item.title}, Using location only",
1,
)
episode_guids_temp["title"] = episode.title
episode_guids_temp["locations"] = tuple(
[x.split("/")[-1] for x in episode.locations]
)
return guids
episode_guids_temp["status"] = {
"completed": completed,
"time": episode.viewOffset,
}
return episode_guids_temp
def get_guids(item: Union[Movie, Episode], completed=True):
return {
"title": item.title,
"locations": tuple([location.split("/")[-1] for location in item.locations]),
"status": {
"completed": completed,
"time": item.viewOffset,
},
} | extract_guids_from_item(
item
) # Merge the metadata and guid dictionaries
def get_user_library_watched_show(show):
try:
show_guids = {}
try:
for show_guid in show.guids:
# Extract source and id from guid.id
m = re.match(r"(.*)://(.*)", show_guid.id)
show_guid_source, show_guid_id = m.group(1).lower(), m.group(2)
show_guids[show_guid_source] = show_guid_id
except Exception:
logger(
f"Plex: Failed to get guids for {show.title}, Using location only", 1
show_guids: FrozenSet = frozenset(
(
{
"title": show.title,
"locations": tuple(
[location.split("/")[-1] for location in show.locations]
),
}
| extract_guids_from_item(show)
).items() # Merge the metadata and guid dictionaries
)
watched_episodes = show.watched()
episode_guids = {
# Offset group data because the first value will be the key
season: [episode[1] for episode in episodes]
for season, episodes
# Group episodes by first element of tuple (episode.parentIndex)
in itertools_groupby(
[
(
episode.parentIndex,
get_guids(episode, completed=episode in watched_episodes),
)
for episode in show.episodes()
# Only include watched or partially-watched more than a minute episodes
if episode in watched_episodes or episode.viewOffset >= 60000
],
operator.itemgetter(0),
)
show_guids["title"] = show.title
show_guids["locations"] = tuple([x.split("/")[-1] for x in show.locations])
show_guids = frozenset(show_guids.items())
# Get all watched episodes for show
episode_guids = {}
watched = show.watched()
for episode in show.episodes():
if episode in watched:
if episode.parentIndex not in episode_guids:
episode_guids[episode.parentIndex] = []
episode_guids[episode.parentIndex].append(
get_episode_guids(episode, show, completed=True)
)
elif episode.viewOffset > 0:
if episode.parentIndex not in episode_guids:
episode_guids[episode.parentIndex] = []
episode_guids[episode.parentIndex].append(
get_episode_guids(episode, show, completed=False)
)
}
return show_guids, episode_guids
except Exception:
return {}, {}
def get_user_library_watched(user, user_plex, library):
user_name: str = user.title.lower()
try:
user_name = user.username.lower() if user.username else user.title.lower()
user_watched = {}
user_watched[user_name] = {}
logger(
f"Plex: Generating watched for {user_name} in library {library.title}",
0,
@ -139,58 +116,49 @@ def get_user_library_watched(user, user_plex, library):
library_videos = user_plex.library.section(library.title)
if library.type == "movie":
user_watched[user_name][library.title] = []
watched = []
# Get all watched movies
for video in library_videos.search(unwatched=False):
logger(f"Plex: Adding {video.title} to {user_name} watched list", 3)
movie_guids = get_movie_guids(video, completed=True)
user_watched[user_name][library.title].append(movie_guids)
# Get all partially watched movies greater than 1 minute
for video in library_videos.search(inProgress=True):
if video.viewOffset < 60000:
continue
logger(f"Plex: Adding {video.title} to {user_name} watched list", 3)
movie_guids = get_movie_guids(video, completed=False)
user_watched[user_name][library.title].append(movie_guids)
args = [
[get_guids, video, True]
for video
# Get all watched movies
in library_videos.search(unwatched=False)
] + [
[get_guids, video, False]
for video
# Get all partially watched movies
in library_videos.search(inProgress=True)
# Only include partially-watched movies more than a minute
if video.viewOffset >= 60000
]
for guid in future_thread_executor(args, threads=min(os.cpu_count(), 4)):
logger(f"Plex: Adding {guid['title']} to {user_name} watched list", 3)
watched.append(guid)
elif library.type == "show":
user_watched[user_name][library.title] = {}
watched = {}
# Parallelize show processing
args = []
# Get all watched shows
for show in library_videos.search(unwatched=False):
args.append([get_user_library_watched_show, show])
# Get all partially watched shows
for show in library_videos.search(inProgress=True):
args.append([get_user_library_watched_show, show])
# Get all watched shows and partially watched shows
args = [
(get_user_library_watched_show, show)
for show in library_videos.search(unwatched=False)
+ library_videos.search(inProgress=True)
]
for show_guids, episode_guids in future_thread_executor(args, threads=4):
if show_guids and episode_guids:
# append show, season, episode
if show_guids not in user_watched[user_name][library.title]:
user_watched[user_name][library.title][show_guids] = {}
user_watched[user_name][library.title][show_guids] = episode_guids
watched[show_guids] = episode_guids
logger(
f"Plex: Added {episode_guids} to {user_name} {show_guids} watched list",
3,
)
else:
watched = None
logger(f"Plex: Got watched for {user_name} in library {library.title}", 1)
if library.title in user_watched[user_name]:
logger(f"Plex: {user_watched[user_name][library.title]}", 3)
logger(f"Plex: {watched}", 3)
return user_watched
return {user_name: {library.title: watched} if watched is not None else {}}
except Exception as e:
logger(
f"Plex: Failed to get watched for {user_name} in library {library.title}, Error: {e}",

View File

@ -6,11 +6,17 @@ from src.library import generate_library_guids_dict
def combine_watched_dicts(dicts: list):
# Ensure that the input is a list of dictionaries
if not all(isinstance(d, dict) for d in dicts):
raise ValueError("Input must be a list of dictionaries")
combined_dict = {}
for single_dict in dicts:
for key, value in single_dict.items():
if key not in combined_dict:
combined_dict[key] = {}
for subkey, subvalue in value.items():
if subkey in combined_dict[key]:
# If the subkey already exists in the combined dictionary,
@ -117,11 +123,18 @@ def cleanup_watched(
show_key_dict = dict(show_key_1)
for season in watched_list_1[user_1][library_1][show_key_1]:
# Filter the episode_watched_list_2_keys_dict dictionary to handle cases
# where episode location names are not unique such as S01E01.mkv
filtered_episode_watched_list_2_keys_dict = (
filter_episode_watched_list_2_keys_dict(
episode_watched_list_2_keys_dict, show_key_dict, season
)
)
for episode in watched_list_1[user_1][library_1][show_key_1][
season
]:
episode_index = get_episode_index_in_dict(
episode, episode_watched_list_2_keys_dict
episode, filtered_episode_watched_list_2_keys_dict
)
if episode_index is not None:
if check_remove_entry(
@ -217,6 +230,59 @@ def get_movie_index_in_dict(movie, movies_watched_list_2_keys_dict):
return None
def filter_episode_watched_list_2_keys_dict(
episode_watched_list_2_keys_dict, show_key_dict, season
):
# Filter the episode_watched_list_2_keys_dict dictionary to only include values for the correct show and season
filtered_episode_watched_list_2_keys_dict = {}
show_indecies = []
season_indecies = []
# Iterate through episode_watched_list_2_keys_dict["season"] and find the indecies that match season
for season_index, season_value in enumerate(
episode_watched_list_2_keys_dict["season"]
):
if season_value == season:
season_indecies.append(season_index)
# Iterate through episode_watched_list_2_keys_dict["show"] and find the indecies that match show_key_dict
for show_index, show_value in enumerate(episode_watched_list_2_keys_dict["show"]):
# Iterate through the keys and values of the show_value dictionary and check if they match show_key_dict
for show_key, show_key_value in show_value.items():
if show_key == "locations":
# Iterate through the locations in the show_value dictionary
for location in show_key_value:
# If the location is in the episode_watched_list_2_keys_dict dictionary, return index of the key
if (
contains_nested(location, show_key_dict["locations"])
is not None
):
show_indecies.append(show_index)
break
else:
if show_key in show_key_dict.keys():
if show_key_value == show_key_dict[show_key]:
show_indecies.append(show_index)
break
# Find the intersection of the show_indecies and season_indecies lists
indecies = list(set(show_indecies) & set(season_indecies))
filtered_episode_watched_list_2_keys_dict = {}
# Create a copy of the dictionary with indecies that match the show and season and none that don't
for key, value in episode_watched_list_2_keys_dict.items():
if key not in filtered_episode_watched_list_2_keys_dict:
filtered_episode_watched_list_2_keys_dict[key] = []
for index, _ in enumerate(value):
if index in indecies:
filtered_episode_watched_list_2_keys_dict[key].append(value[index])
else:
filtered_episode_watched_list_2_keys_dict[key].append(None)
return filtered_episode_watched_list_2_keys_dict
def get_episode_index_in_dict(episode, episode_watched_list_2_keys_dict):
# Iterate through the keys and values of the episode dictionary
for episode_key, episode_value in episode.items():

View File

@ -83,6 +83,16 @@ episode_titles = {
"tvdb": ["8444132"],
"completed": [True],
"time": [0],
"season": ["Season 1"],
"show": [
{
"imdb": "tt3581920",
"locations": ("The Last of Us",),
"title": "The Last of Us",
"tmdb": "100088",
"tvdb": "392256",
}
],
}
movie_titles = {
"imdb": ["tt2380307"],

View File

@ -18,102 +18,225 @@ from src.watched import cleanup_watched, combine_watched_dicts
tv_shows_watched_list_1 = {
frozenset(
{
("tvdb", "75710"),
("title", "Criminal Minds"),
("imdb", "tt0452046"),
("locations", ("Criminal Minds",)),
("tmdb", "4057"),
("locations", ("Doctor Who (2005) {tvdb-78804} {imdb-tt0436992}",)),
("imdb", "tt0436992"),
("tmdb", "57243"),
("tvdb", "78804"),
("title", "Doctor Who (2005)"),
}
): {
"Season 1": [
1: [
{
"imdb": "tt0550489",
"tmdb": "282843",
"tvdb": "176357",
"title": "Extreme Aggressor",
"locations": (
"Criminal Minds S01E01 Extreme Aggressor WEBDL-720p.mkv",
),
"imdb": "tt0563001",
"tmdb": "968589",
"tvdb": "295296",
"title": "The Unquiet Dead",
"locations": ("S01E03.mkv",),
"status": {"completed": True, "time": 0},
},
{
"imdb": "tt0550487",
"tmdb": "282861",
"tvdb": "300385",
"title": "Compulsion",
"locations": ("Criminal Minds S01E02 Compulsion WEBDL-720p.mkv",),
"imdb": "tt0562985",
"tmdb": "968590",
"tvdb": "295297",
"title": "Aliens of London (1)",
"locations": ("S01E04.mkv",),
"status": {"completed": False, "time": 240000},
},
{
"imdb": "tt0563003",
"tmdb": "968592",
"tvdb": "295298",
"title": "World War Three (2)",
"locations": ("S01E05.mkv",),
"status": {"completed": True, "time": 0},
},
]
},
frozenset({("title", "Test"), ("locations", ("Test",))}): {
"Season 1": [
frozenset(
{
("title", "Monarch: Legacy of Monsters"),
("imdb", "tt17220216"),
("tvdb", "422598"),
("tmdb", "202411"),
(
"locations",
("Monarch - Legacy of Monsters {tvdb-422598} {imdb-tt17220216}",),
),
}
): {
1: [
{
"title": "S01E01",
"locations": ("Test S01E01.mkv",),
"imdb": "tt21255044",
"tmdb": "4661246",
"tvdb": "10009418",
"title": "Secrets and Lies",
"locations": ("S01E03.mkv",),
"status": {"completed": True, "time": 0},
},
{
"title": "S01E02",
"locations": ("Test S01E02.mkv",),
"imdb": "tt21255050",
"tmdb": "4712059",
"tvdb": "10009419",
"title": "Parallels and Interiors",
"locations": ("S01E04.mkv",),
"status": {"completed": False, "time": 240000},
},
{
"imdb": "tt23787572",
"tmdb": "4712061",
"tvdb": "10009420",
"title": "The Way Out",
"locations": ("S01E05.mkv",),
"status": {"completed": True, "time": 0},
},
]
},
frozenset(
{
("tmdb", "125928"),
("imdb", "tt14681924"),
("tvdb", "403172"),
(
"locations",
("My Adventures with Superman {tvdb-403172} {imdb-tt14681924}",),
),
("title", "My Adventures with Superman"),
}
): {
1: [
{
"imdb": "tt15699926",
"tmdb": "3070048",
"tvdb": "8438181",
"title": "Adventures of a Normal Man (1)",
"locations": ("S01E01.mkv",),
"status": {"completed": True, "time": 0},
},
{
"title": "S01E04",
"locations": ("Test S01E04.mkv",),
"status": {"completed": False, "time": 5},
"imdb": "tt20413322",
"tmdb": "4568681",
"tvdb": "9829910",
"title": "Adventures of a Normal Man (2)",
"locations": ("S01E02.mkv",),
"status": {"completed": True, "time": 0},
},
{
"imdb": "tt20413328",
"tmdb": "4497012",
"tvdb": "9870382",
"title": "My Interview with Superman",
"locations": ("S01E03.mkv",),
"status": {"completed": True, "time": 0},
},
]
},
}
tv_shows_watched_list_2 = {
frozenset(
{
("tvdb", "75710"),
("title", "Criminal Minds"),
("imdb", "tt0452046"),
("locations", ("Criminal Minds",)),
("tmdb", "4057"),
("locations", ("Doctor Who (2005) {tvdb-78804} {imdb-tt0436992}",)),
("imdb", "tt0436992"),
("tmdb", "57243"),
("title", "Doctor Who"),
("tvdb", "78804"),
("tvrage", "3332"),
}
): {
"Season 1": [
1: [
{
"imdb": "tt0550487",
"tmdb": "282861",
"tvdb": "300385",
"title": "Compulsion",
"locations": ("Criminal Minds S01E02 Compulsion WEBDL-720p.mkv",),
"tvdb": "295294",
"imdb": "tt0562992",
"title": "Rose",
"locations": ("S01E01.mkv",),
"status": {"completed": True, "time": 0},
},
{
"imdb": "tt0550498",
"tmdb": "282865",
"tvdb": "300474",
"title": "Won't Get Fooled Again",
"locations": (
"Criminal Minds S01E03 Won't Get Fooled Again WEBDL-720p.mkv",
),
"tvdb": "295295",
"imdb": "tt0562997",
"title": "The End of the World",
"locations": ("S01E02.mkv",),
"status": {"completed": False, "time": 300670},
},
{
"tvdb": "295298",
"imdb": "tt0563003",
"title": "World War Three (2)",
"locations": ("S01E05.mkv",),
"status": {"completed": True, "time": 0},
},
]
},
frozenset({("title", "Test"), ("locations", ("Test",))}): {
"Season 1": [
frozenset(
{
("title", "Monarch: Legacy of Monsters"),
("imdb", "tt17220216"),
("tvdb", "422598"),
("tmdb", "202411"),
(
"locations",
("Monarch - Legacy of Monsters {tvdb-422598} {imdb-tt17220216}",),
),
}
): {
1: [
{
"title": "S01E02",
"locations": ("Test S01E02.mkv",),
"status": {"completed": False, "time": 10},
},
{
"title": "S01E03",
"locations": ("Test S01E03.mkv",),
"tvdb": "9959300",
"imdb": "tt20412166",
"title": "Aftermath",
"locations": ("S01E01.mkv",),
"status": {"completed": True, "time": 0},
},
{
"title": "S01E04",
"locations": ("Test S01E04.mkv",),
"status": {"completed": False, "time": 10},
"tvdb": "10009417",
"imdb": "tt22866594",
"title": "Departure",
"locations": ("S01E02.mkv",),
"status": {"completed": False, "time": 300741},
},
{
"tvdb": "10009420",
"imdb": "tt23787572",
"title": "The Way Out",
"locations": ("S01E05.mkv",),
"status": {"completed": True, "time": 0},
},
]
},
frozenset(
{
("tmdb", "125928"),
("imdb", "tt14681924"),
("tvdb", "403172"),
(
"locations",
("My Adventures with Superman {tvdb-403172} {imdb-tt14681924}",),
),
("title", "My Adventures with Superman"),
}
): {
1: [
{
"tvdb": "8438181",
"imdb": "tt15699926",
"title": "Adventures of a Normal Man (1)",
"locations": ("S01E01.mkv",),
"status": {"completed": True, "time": 0},
},
{
"tvdb": "9829910",
"imdb": "tt20413322",
"title": "Adventures of a Normal Man (2)",
"locations": ("S01E02.mkv",),
"status": {"completed": True, "time": 0},
},
{
"tvdb": "9870382",
"imdb": "tt20413328",
"title": "My Interview with Superman",
"locations": ("S01E03.mkv",),
"status": {"completed": True, "time": 0},
},
]
},
@ -122,38 +245,61 @@ tv_shows_watched_list_2 = {
expected_tv_show_watched_list_1 = {
frozenset(
{
("tvdb", "75710"),
("title", "Criminal Minds"),
("imdb", "tt0452046"),
("locations", ("Criminal Minds",)),
("tmdb", "4057"),
("locations", ("Doctor Who (2005) {tvdb-78804} {imdb-tt0436992}",)),
("imdb", "tt0436992"),
("tmdb", "57243"),
("tvdb", "78804"),
("title", "Doctor Who (2005)"),
}
): {
"Season 1": [
1: [
{
"imdb": "tt0550489",
"tmdb": "282843",
"tvdb": "176357",
"title": "Extreme Aggressor",
"locations": (
"Criminal Minds S01E01 Extreme Aggressor WEBDL-720p.mkv",
),
"status": {"completed": True, "time": 0},
}
]
},
frozenset({("title", "Test"), ("locations", ("Test",))}): {
"Season 1": [
{
"title": "S01E01",
"locations": ("Test S01E01.mkv",),
"imdb": "tt0563001",
"tmdb": "968589",
"tvdb": "295296",
"title": "The Unquiet Dead",
"locations": ("S01E03.mkv",),
"status": {"completed": True, "time": 0},
},
{
"title": "S01E02",
"locations": ("Test S01E02.mkv",),
"imdb": "tt0562985",
"tmdb": "968590",
"tvdb": "295297",
"title": "Aliens of London (1)",
"locations": ("S01E04.mkv",),
"status": {"completed": False, "time": 240000},
},
]
},
frozenset(
{
("title", "Monarch: Legacy of Monsters"),
("imdb", "tt17220216"),
("tvdb", "422598"),
("tmdb", "202411"),
(
"locations",
("Monarch - Legacy of Monsters {tvdb-422598} {imdb-tt17220216}",),
),
}
): {
1: [
{
"imdb": "tt21255044",
"tmdb": "4661246",
"tvdb": "10009418",
"title": "Secrets and Lies",
"locations": ("S01E03.mkv",),
"status": {"completed": True, "time": 0},
},
{
"imdb": "tt21255050",
"tmdb": "4712059",
"tvdb": "10009419",
"title": "Parallels and Interiors",
"locations": ("S01E04.mkv",),
"status": {"completed": False, "time": 240000},
},
]
},
}
@ -161,37 +307,57 @@ expected_tv_show_watched_list_1 = {
expected_tv_show_watched_list_2 = {
frozenset(
{
("tvdb", "75710"),
("title", "Criminal Minds"),
("imdb", "tt0452046"),
("locations", ("Criminal Minds",)),
("tmdb", "4057"),
("locations", ("Doctor Who (2005) {tvdb-78804} {imdb-tt0436992}",)),
("imdb", "tt0436992"),
("tmdb", "57243"),
("title", "Doctor Who"),
("tvdb", "78804"),
("tvrage", "3332"),
}
): {
"Season 1": [
1: [
{
"imdb": "tt0550498",
"tmdb": "282865",
"tvdb": "300474",
"title": "Won't Get Fooled Again",
"locations": (
"Criminal Minds S01E03 Won't Get Fooled Again WEBDL-720p.mkv",
),
"status": {"completed": True, "time": 0},
}
]
},
frozenset({("title", "Test"), ("locations", ("Test",))}): {
"Season 1": [
{
"title": "S01E03",
"locations": ("Test S01E03.mkv",),
"tvdb": "295294",
"imdb": "tt0562992",
"title": "Rose",
"locations": ("S01E01.mkv",),
"status": {"completed": True, "time": 0},
},
{
"title": "S01E04",
"locations": ("Test S01E04.mkv",),
"status": {"completed": False, "time": 10},
"tvdb": "295295",
"imdb": "tt0562997",
"title": "The End of the World",
"locations": ("S01E02.mkv",),
"status": {"completed": False, "time": 300670},
},
]
},
frozenset(
{
("title", "Monarch: Legacy of Monsters"),
("imdb", "tt17220216"),
("tvdb", "422598"),
("tmdb", "202411"),
(
"locations",
("Monarch - Legacy of Monsters {tvdb-422598} {imdb-tt17220216}",),
),
}
): {
1: [
{
"tvdb": "9959300",
"imdb": "tt20412166",
"title": "Aftermath",
"locations": ("S01E01.mkv",),
"status": {"completed": True, "time": 0},
},
{
"tvdb": "10009417",
"imdb": "tt22866594",
"title": "Departure",
"locations": ("S01E02.mkv",),
"status": {"completed": False, "time": 300741},
},
]
},
@ -199,61 +365,92 @@ expected_tv_show_watched_list_2 = {
movies_watched_list_1 = [
{
"imdb": "tt2380307",
"tmdb": "354912",
"title": "Coco",
"locations": ("Coco (2017) Remux-1080p.mkv",),
"imdb": "tt1254207",
"tmdb": "10378",
"tvdb": "12352",
"title": "Big Buck Bunny",
"locations": ("Big Buck Bunny.mkv",),
"status": {"completed": True, "time": 0},
},
{
"tmdbcollection": "448150",
"imdb": "tt1431045",
"tmdb": "293660",
"title": "Deadpool",
"locations": ("Deadpool (2016) Remux-1080p.mkv",),
"imdb": "tt16431870",
"tmdb": "1029575",
"tvdb": "351194",
"title": "The Family Plan",
"locations": ("The Family Plan (2023).mkv",),
"status": {"completed": True, "time": 0},
},
{
"imdb": "tt5537002",
"tmdb": "466420",
"tvdb": "135852",
"title": "Killers of the Flower Moon",
"locations": ("Killers of the Flower Moon (2023).mkv",),
"status": {"completed": False, "time": 240000},
},
]
movies_watched_list_2 = [
{
"imdb": "tt2380307",
"tmdb": "354912",
"title": "Coco",
"locations": ("Coco (2017) Remux-1080p.mkv",),
"imdb": "tt16431870",
"tmdb": "1029575",
"title": "The Family Plan",
"locations": ("The Family Plan (2023).mkv",),
"status": {"completed": True, "time": 0},
},
{
"imdb": "tt0384793",
"tmdb": "9788",
"tvdb": "9103",
"title": "Accepted",
"locations": ("Accepted (2006) Remux-1080p.mkv",),
"imdb": "tt4589218",
"tmdb": "507089",
"title": "Five Nights at Freddy's",
"locations": ("Five Nights at Freddy's (2023).mkv",),
"status": {"completed": True, "time": 0},
},
{
"imdb": "tt10545296",
"tmdb": "695721",
"tmdbcollection": "131635",
"title": "The Hunger Games: The Ballad of Songbirds & Snakes",
"locations": ("The Hunger Games The Ballad of Songbirds & Snakes (2023).mkv",),
"status": {"completed": False, "time": 301215},
},
]
expected_movie_watched_list_1 = [
{
"tmdbcollection": "448150",
"imdb": "tt1431045",
"tmdb": "293660",
"title": "Deadpool",
"locations": ("Deadpool (2016) Remux-1080p.mkv",),
"imdb": "tt1254207",
"tmdb": "10378",
"tvdb": "12352",
"title": "Big Buck Bunny",
"locations": ("Big Buck Bunny.mkv",),
"status": {"completed": True, "time": 0},
}
},
{
"imdb": "tt5537002",
"tmdb": "466420",
"tvdb": "135852",
"title": "Killers of the Flower Moon",
"locations": ("Killers of the Flower Moon (2023).mkv",),
"status": {"completed": False, "time": 240000},
},
]
expected_movie_watched_list_2 = [
{
"imdb": "tt0384793",
"tmdb": "9788",
"tvdb": "9103",
"title": "Accepted",
"locations": ("Accepted (2006) Remux-1080p.mkv",),
"imdb": "tt4589218",
"tmdb": "507089",
"title": "Five Nights at Freddy's",
"locations": ("Five Nights at Freddy's (2023).mkv",),
"status": {"completed": True, "time": 0},
}
},
{
"imdb": "tt10545296",
"tmdb": "695721",
"tmdbcollection": "131635",
"title": "The Hunger Games: The Ballad of Songbirds & Snakes",
"locations": ("The Hunger Games The Ballad of Songbirds & Snakes (2023).mkv",),
"status": {"completed": False, "time": 301215},
},
]
# Test to see if objects get deleted all the way up to the root.

View File

@ -0,0 +1,73 @@
# Check the mark.log file that is generated by the CI to make sure it contains the expected values
import os
def read_marklog():
marklog = os.path.join(os.getcwd(), "mark.log")
with open(marklog, "r") as f:
lines = f.readlines()
return lines
def check_marklog(lines, expected_values):
try:
# Check to make sure the marklog contains all the expected values and nothing else
found_values = []
for line in lines:
# Remove the newline character
line = line.strip()
if line not in expected_values:
raise Exception("Line not found in marklog: " + line)
found_values.append(line)
# Check to make sure the marklog contains the same number of values as the expected values
if len(found_values) != len(expected_values):
raise Exception(
"Marklog did not contain the same number of values as the expected values, found "
+ str(len(found_values))
+ " values, expected "
+ str(len(expected_values))
+ " values"
)
# Check that the two lists contain the same values
if sorted(found_values) != sorted(expected_values):
raise Exception(
"Marklog did not contain the same values as the expected values, found:\n"
+ "\n".join(sorted(found_values))
+ "\n\nExpected:\n"
+ "\n".join(sorted(expected_values))
)
return True
except Exception as e:
print(e)
return False
def main():
expected_values = [
"jellyplex_watched/Movies/Five Nights at Freddy's",
"jellyplex_watched/Movies/The Hunger Games: The Ballad of Songbirds & Snakes/301215",
"jellyplex_watched/TV Shows/Doctor Who (2005)/Rose",
"jellyplex_watched/TV Shows/Doctor Who (2005)/The End of the World/300670",
"jellyplex_watched/TV Shows/Monarch: Legacy of Monsters/Aftermath",
"jellyplex_watched/TV Shows/Monarch: Legacy of Monsters/Departure/300741",
"JellyUser/Movies/Big Buck Bunny",
"JellyUser/Shows/Doctor Who/The Unquiet Dead",
"JellyUser/Shows/Monarch: Legacy of Monsters/Secrets and Lies",
]
lines = read_marklog()
if not check_marklog(lines, expected_values):
print("Failed to validate marklog")
exit(1)
print("Successfully validated marklog")
exit(0)
if __name__ == "__main__":
main()