You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
cast/ChannelProvider/SVT/__init__.py

99 lines
3.5 KiB

#!/usr/bin/env python3
import hashlib
import io
import json
import os
import pickle
import ChannelProvider
import feedparser
import requests
import wx
from bs4 import BeautifulSoup
from Items import Item
class SVT(ChannelProvider.ChannelProvider):
m_cache: dict = dict()
m_cachefile = '/tmp/svt_cache'
def __init__(self) -> None:
super().__init__('SVT', 'http://www.svtplay.se/rss.xml')
if os.path.exists(self.m_cachefile):
with open(self.m_cachefile, 'rb') as cachehandle:
self.m_cache = pickle.load(cachehandle)
self.m_items: list[Item] = self.parse_feed()
def refresh_items(self):
self.m_items: list[Item] = self.parse_feed()
def parse_feed(self) -> list[Item]:
feed = feedparser.parse(self.get_feed())
entries = feed['entries']
items: list[Item] = list()
for entry in entries:
key = hashlib.sha256(entry['link'].encode('utf-8')).hexdigest()
if key in self.m_cache.keys():
thumbnail_link = self.m_cache[key]['thumbnail_link']
content = self.m_cache[key]['content']
resolved_link = self.m_cache[key]['resolved_link']
description = self.m_cache[key]['description']
published_parsed = self.m_cache[key]['published_parsed']
title = self.m_cache[key]['title']
else:
svt_id = ''
for link in entry['links']:
if str(link['type']).startswith('image/'):
thumbnail_link = str(link['href'])
break
page = requests.get(str(entry['link']))
soup = BeautifulSoup(page.text, 'html.parser')
for element in soup.find_all('a'):
href = element.get('href')
datart = element.get('data-rt')
if datart == 'top-area-play-button':
svt_id = href.split('=')[1].split('&')[0]
api = json.loads(
requests.get(
'https://api.svt.se/video/{}'.format(svt_id)).text)
resolved_link = ''
for reference in api['videoReferences']:
if reference['format'] == "dashhbbtv":
resolved_link = reference['url']
print(resolved_link)
description = str(entry['description'])
published_parsed = entry['published_parsed']
title = str(entry['title'])
res = requests.get(thumbnail_link)
content = res.content
content_bytes = io.BytesIO(content)
self.m_cache[key] = {'thumbnail_link': thumbnail_link}
self.m_cache[key]['content'] = content
self.m_cache[key]['resolved_link'] = resolved_link
self.m_cache[key]['description'] = description
self.m_cache[key]['published_parsed'] = published_parsed
self.m_cache[key]['title'] = title
image = wx.Image(content_bytes,
type=wx.BITMAP_TYPE_ANY,
index=-1)
thumbnail = wx.Bitmap(image)
item = Item(description, resolved_link, self.m_provider_name,
published_parsed, thumbnail, title)
items.append(item)
# write to cache file
with open(self.m_cachefile, 'wb') as cachehandle:
pickle.dump(self.m_cache, cachehandle)
return items