#!/usr/bin/python """ Update cache or --noupdate and search cache for rec_ids: tablocache3.py --search search_pattern Update cache with new recording and deletions tablocache3.py """ import urllib2 import urllib import json import os import sys import time import re import getopt def metadir(type_dir, new_id): meta_dir = type_dir + '/' + str(new_id) if (not os.path.isdir(meta_dir)): os.makedirs(meta_dir) return meta_dir def usage(): print __doc__ mediatypes = ["TV", "Movies", "Sports", "Manual"] # Some constants (maybe we find TABLOIP dynamically? Multiple Tablos, etc.) GET_TABLO_ROOT = '/tmp/GetTablo' TABLO_IPS = ['192.168.0.70'] # Get a timestamp string timestamp = time.strftime('%Y%d%m%H%M%S') # If GET_TABLO_ROOT does not exist create it. if (not os.path.isdir(GET_TABLO_ROOT)): os.mkdir(GET_TABLO_ROOT) try: opts, args = getopt.getopt(sys.argv[1:], 's:n', ['search=','noupdate']) except getopt.GetoptError: usage() sys.exit(2) search_pat = '' update=True for opt, arg in opts: if opt in ("-s", "--search"): search_pat = arg elif opt in ("-n", "--noupdate"): update=False # For sake of simplicity, a "tablo" is a tablo IP for tablo in TABLO_IPS: # If tablo subdir, named for the tablo ip, does not exist create it. tablo_dir = GET_TABLO_ROOT + '/' + tablo if (not os.path.isdir(tablo_dir)): os.mkdir(tablo_dir) tablo_vid_dir = tablo_dir + '/Tablo' if (not os.path.isdir(tablo_vid_dir)): os.mkdir(tablo_vid_dir) # Set cache file name and url to nab recording ids from rec_ids_cache_file = tablo_dir + '/rec_ids_cache.json' rec_ids_db_file = tablo_dir + '/rec_ids_db.json' rec_ids_url = 'http://' + tablo + ':18080/plex/rec_ids' # Load cache db if (os.path.isfile(rec_ids_db_file)): with open(rec_ids_db_file, 'r') as f: db_recs = json.loads("{" + f.read() + "}") else: db_recs = {} # Default is to always update the cache unless --noupdate if (update): # Channel ids ch_ids_url = 'http://' + tablo + ':18080/plex/ch_ids' # Get channel ids and info jch_ids = urllib2.urlopen(ch_ids_url) ch_ids = json.load(jch_ids) channels={} for ch_id in ch_ids["ids"]: ch_info_url = 'http://' + tablo + ':18080/plex/ch_info?id={}' ch_data_json = urllib2.urlopen(ch_info_url.format(ch_id)) ch_data = json.load(ch_data_json) channel_num = str(ch_data["meta"]["channelNumberMajor"]) + "." + str(ch_data["meta"]["channelNumberMinor"]) channel_sign = ch_data["meta"]["callSign"].encode('utf-8').strip(); try: channel_affiliate = ch_data["meta"]["affiliateCallSign"].encode('utf-8').strip(); except: channel_affiliate = "" channel_res_name = ch_data["meta"]["resolution"]["title"].encode('utf-8').strip(); channel_res_width = str(ch_data["meta"]["resolution"]["width"]) channel_res_height = str(ch_data["meta"]["resolution"]["height"]) channels[channel_num] = {"channel_num": channel_num, "channel_sign": channel_sign, "channel_affiliate": channel_affiliate, "channel_res_name": channel_res_name, "channel_res_width": channel_res_width, "channel_res_height": channel_res_height} # Get current known recording ids from tablo jrec_ids = urllib2.urlopen(rec_ids_url) rec_ids = json.load(jrec_ids) # Get recording ids from last run cache file # and try to figure out new recordings and deletions if (os.path.isfile(rec_ids_cache_file)): with open(rec_ids_cache_file, 'r') as f: oldrec_ids = json.load(f) else: # You could erase the rec_ids_cache_file and force reget of recording meta data oldrec_ids = {"ids":[]} # Find new rec_ids and perhaps removed ones oldrec_ids_set = set(oldrec_ids["ids"]) newrec_ids_set = set(rec_ids["ids"]) new_ids = [aa for aa in newrec_ids_set if aa not in oldrec_ids_set] removed_ids = [aa for aa in oldrec_ids_set if aa not in newrec_ids_set] #Create the Trash dirs if not present trash_root = tablo_dir + "/Trash" for mediatype in mediatypes: trash_dir = trash_root + "/" + mediatype if (not os.path.isdir(trash_dir)): os.makedirs(trash_dir) # Now let us loop through removed_ids (if we know/have them, local meta db that is) # and remove them from the cache db for remove_id in removed_ids: remove_id_s = str(remove_id) print "Trying to remove " + remove_id_s try: del db_recs[remove_id_s] except: print "Error: Could not find rec_id: " + remove_id_s print "Sync problem?" # Loop through and preserve known db_recs from the existing cache db first for rec_id in db_recs: if not 'records' in locals(): records = '"' + str(rec_id) + '": ' + json.dumps(db_recs[rec_id]) else: records = records + ",\n" + '"' + str(rec_id) + '": ' + json.dumps(db_recs[rec_id]) # Now let us loop through the new_ids and add their tablo meta data to our cache db for new_id in new_ids: print 'Try to add ' + str(new_id) new_data_url = 'http://' + tablo + ':18080/plex/rec_info?id={}' new_data_json = urllib2.urlopen(new_data_url.format(new_id)) new_data = json.load(new_data_json) try: new_data_meta = new_data["meta"] except: print 'No meta for ' + str(new_id) continue # Just in case? Manual key type? title = 'Unknown ' + str(new_id) # Tablo uses a non uniform meta data content based on type if 'recSportEvent' in new_data_meta: teamnames=[] print "Sporting Event" meta_type = "Sports" type_dir = tablo_vid_dir + "/" + meta_type meta_dir =metadir(type_dir, new_id) try: air_date = new_data_meta['recSportEvent']['jsonForClient']['airDate'].encode('utf-8').strip() except: air_date = "1900-01-01T00:00Z" try: title = new_data_meta['recSportEvent']['jsonForClient']['eventTitle'].encode('utf-8').strip() except: title = "Unknown " + str(new_id) try: teams = new_data_meta['recSportEvent']['jsonForClient']['teams'] except: teams = [] for team in teams: teamnames.append(team['title'].encode('utf-8').strip()) try: game_date = new_data_meta['recSportEvent']['jsonFromTribune']['program']['gameDate'].encode('utf-8').strip() except: game_date = "1900-01-01" try: images1 = new_data_meta['recSportOrganization']['imageJson']['images'] except: images1 = [] try: images2 = new_data_meta['recSportEvent']['imageJson']['images'] except: images2 = [] try: channel_num = new_data_meta['recSportEvent']['jsonFromTribune']['channels'][0].encode('utf-8').strip() chan_data = channels[channel_num] except: chan_data = {} rec_val={'air_date': air_date, 'rec_id':new_id, 'meta_type':meta_type, 'title':title, 'teams':teamnames, 'game_date':game_date, 'channel':chan_data} if 'recEpisode' in new_data_meta: print "TV Series/Program" meta_type = "TV" type_dir = tablo_vid_dir + "/" + meta_type meta_dir =metadir(type_dir, new_id) try: air_date = new_data_meta['recEpisode']['jsonForClient']['airDate'].encode('utf-8').strip() except: air_date = "1900-01-01T00:00Z" try: original_air_date = new_data_meta['recEpisode']['jsonForClient']['originalAirDate'].encode('utf-8').strip() except: original_air_date = "1900-01-01" try: series = new_data_meta['recSeries']['jsonForClient']['title'].encode('utf-8').strip() except: series = "Unknown " + str(new_id) try: episodetitle = new_data_meta['recEpisode']['jsonForClient']['title'].encode('utf-8').strip() except: episodetitle = "Unknown" try: seasonnum = new_data_meta['recEpisode']['jsonForClient']['seasonNumber'] except: seasonnum = -1 try: episodenum = new_data_meta['recEpisode']['jsonForClient']['episodeNumber'] except: episodenum = -1 if (seasonnum >= 0 and episodenum >= 0): title = series + " - " + "s{0:02d}e{1:02d}".format(seasonnum,episodenum) + " - " + episodetitle else: title = series + " - " + episodetitle try: cast = new_data_meta['recSeries']['jsonForClient']['cast'] except: cast = [] try: images1 = new_data_meta['recSeries']['imageJson']['images'] except: images1 = [] try: images2 = new_data_meta['recEpisode']['imageJson']['images'] except: images2 = [] try: channel_num = new_data_meta['recEpisode']['jsonFromTribune']['channels'][0].encode('utf-8').strip() chan_data = channels[channel_num] except: chan_data = {} rec_val={'air_date': air_date, 'rec_id':new_id, 'meta_type':meta_type, 'title':title, 'cast':cast, 'original_air_date':original_air_date, 'channel':chan_data} elif 'recMovie' in new_data_meta: print "Movie" meta_type = "Movie" type_dir = tablo_vid_dir + "/" + meta_type meta_dir =metadir(type_dir, new_id) try: air_date = new_data_meta['recMovieAiring']['jsonForClient']['airDate'].encode('utf-8').strip() except: air_date = "1900-01-01T00:00Z" try: release_year = new_data_meta['recMovie']['jsonForClient']['releaseYear'] except: release_year = 0 try: title = new_data_meta['recMovie']['jsonForClient']['title'].encode('utf-8').strip() except: title = "Unknown " + str(new_id) if (release_year): title = title + " (" + str(release_year) + ")" try: cast = new_data_meta['recMovie']['jsonForClient']['cast'] except: cast = [] try: images1 = new_data_meta['recMovie']['imageJson']['images'] except: images1 = [] try: images2 = new_data_meta['recMovieAiring']['imageJson']['images'] except: images2 = [] try: channel_num = new_data_meta['recMovieAiring']['jsonFromTribune']['channels'][0].encode('utf-8').strip() chan_data = channels[channel_num] except: chan_data = {} rec_val={'air_date': air_date, 'rec_id':new_id, 'meta_type':meta_type, 'title':title, 'cast':cast, 'release_year':release_year, 'channel':chan_data} elif 'recManual' in new_data_meta: print "Manual" meta_type = "Manual" type_dir = tablo_vid_dir + "/" + meta_type meta_dir =metadir(type_dir, new_id) try: air_date = new_data_meta['recManualProgramAiring']['jsonForClient']['airDate'].encode('utf-8').strip() except: air_date = "1900-01-01T00:00Z" try: title = new_data_meta['recManualProgram']['jsonForClient']['title'].encode('utf-8').strip() except: title = "Unknown " + str(new_id) images1 = [] images2 = [] chan_data = {} rec_val={'air_date': air_date, 'rec_id':new_id, 'meta_type':meta_type, 'title':title, 'channel':chan_data} if 'rec_val' in locals(): rec_val = json.dumps(rec_val) if not 'records' in locals(): records = '"' + str(new_id) + '": ' + rec_val else: records = records + ",\n" + '"' + str(new_id) + '": ' + rec_val images = images1 + images2 for image in images: imageid = image['imageID'] image_url = 'http://' + tablo + '/stream/thumb?id={}' imagefile = meta_dir + '/' + image['imageStyle'] + '.jpg' urllib.urlretrieve(image_url.format(imageid),imagefile) titlemeta_file = meta_dir + '/title.txt' with open(titlemeta_file, 'w') as f: f.write(title) print title meta_file = meta_dir + '/meta.json' with open(meta_file, 'w') as f: json.dump(new_data_meta, f) if 'records' in locals(): with open(rec_ids_db_file, 'w') as f: f.write(records) # Note, maybe we do this last in case of code problems? # Save current recording ids to use as difference in next run with open(rec_ids_cache_file, 'w') as f: json.dump(rec_ids, f) # Attempt search if provided # Note, doing -noupdate and no search_pat does nothing if (search_pat): for rec_id in db_recs: record_s = json.dumps(db_recs[rec_id]) if (re.search(search_pat, record_s)): print rec_id sys.stderr.write(json.dumps(db_recs[rec_id], sort_keys=True, indent=4) + "\n") #if (search_pat): # for rec_id in db_recs: # print rec_id # print db_recs[rec_id] # for key, value in db_recs[rec_id].iteritems(): # print key # print value # value_s = json.dumps(value) # if (re.search(search_pat, value_s)): # print rec_id # sys.stderr.write(key + ": " + value_s) sys.exit()