This repository has been archived on 2021-11-29. You can view files and clone it, but cannot push or open issues or pull requests.
rsstube-bash-old/rsstube

188 lines
3.5 KiB
Bash
Executable File

#!/bin/sh
# rsstube - get RSS feed for YouTube channels and other sites
# Copyright (C) 2019
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# run from absolute path of rsstube executable
cd `dirname "$(readlink -f "$0")"`
### UTILITIES
ERR='\033[1;31m'
WARN='\033[1;33m'
NC='\033[0m'
## for general output
notify() {
if [[ ! -n "$quiet" ]]
then
output "${1}"
fi
}
## for warning-level information, output in yellow
warn() {
if [[ ! -n "$quiet" ]]
then
echo -e "${WARN}${1}${NC}"
fi
}
## for errors. cannot be silenced, output in red
error() {
echo -e "${ERR}${1}${NC}"
}
## fail to get feed, terminate program
fail() {
error "Unable to get RSS feed."
exit
}
## general output, no formatting
output() {
echo "${1}"
}
## determine whether HTTP or HTTPS
## some sites may not support HTTPS
determine_protocol() {
protocol=${url%//*}
if [[ $protocol == $url ]]
then
protocol="http://"
else
protocol="${protocol}//"
fi
}
## get domain of URL
get_domain() {
domain=${url#*//}
domain=${domain%%/*}
}
## attempt to get feed from URL only
## if successful, output feed and terminate
url_extract() {
if [[ -f scripts/url-extractors/${site} ]]
then
source scripts/url-extractors/${site}
fi
if [[ -n $url_ext ]]
then
output $url_ext
exit
fi
}
## attempt to get feed from page data
## if successful, output feed and terminate
page_extract() {
if [[ -f scripts/page-extractors/${site} ]]
then
source scripts/page-extractors/$site
fi
if [[ -n $page_ext ]]
then
output $page_ext
exit
fi
}
# get user arguments
source scripts/options
# determine site ("" if not supported)
source scripts/determine_site
# get protocol and domain
determine_protocol
get_domain
if [[ -n "$site" ]]
then
notify "Site identified as $site"
fi
url_extract
if [[ -n $site ]]
then
notify "Unable to get feed from URL alone."
fi
# if no page extractor exists or user has opted not to download page, don't download the page
if [[ $network == "false" ]] || [[ -n $site && ! -f scripts/page-extractors/${site} ]]
then
fail
fi
# determine flags to pass to curl
args=" ${curl_args}"
vars=("proxy" "quiet")
for i in "${vars[@]}"
do
if [[ -n "${!i}" ]]
then
args="$args ${!i}"
fi
done
command="curl -L -i -D -$args $url"
notify "Downloading page..."
# download page with curl
content=`eval $command`
# check download for errors
source scripts/http_errors
# if unknown domain, try to determine if it uses known software
if [[ "$site" == "" ]]
then
notify "Attempting to determine if site uses known software..."
source scripts/determine_software
if [[ -n "$site" ]]
then
notify "Software identified as $site"
software_found="true"
fi
fi
# try to extract feed from page
notify "Attempting to get feed from page..."
page_extract
notify "Unable to get feed from page."
# try to get feed from URL if we determined the software
if [[ -n $software_found ]]
then
url_extract
notify "Unable to get feed from URL based on software."
fi
# unable to get feed
fail