Add handler for Mediafile downloads
This commit is contained in:
parent
aa680dadc2
commit
447eb4120a
155
c3dbdl/c3dbdl.py
155
c3dbdl/c3dbdl.py
|
@ -153,8 +153,6 @@ def fetchSongData(entries):
|
|||
for link_entry in download_links:
|
||||
link = link_entry.get("href")
|
||||
description = link_entry.get_text().strip()
|
||||
if "c3universe.com" not in link:
|
||||
continue
|
||||
messages.append(f"Found download link: {link} ({description})")
|
||||
dl_links.append(
|
||||
{
|
||||
|
@ -252,66 +250,7 @@ def buildDatabase(pages, concurrency):
|
|||
return found_songs
|
||||
|
||||
|
||||
def downloadSong(destination, filename, entry, dlid, dldesc):
|
||||
click.echo(
|
||||
f"""> Downloading song "{entry['artist']} - {entry['title']}" by {entry['author']}..."""
|
||||
)
|
||||
|
||||
if dlid is None:
|
||||
dl_links = entry["dl_links"]
|
||||
else:
|
||||
try:
|
||||
dl_links = [entry["dl_links"][dlid - 1]]
|
||||
except Exception:
|
||||
click.echo(f"Invalid download link ID {dlid}.")
|
||||
return
|
||||
|
||||
if dldesc is not None:
|
||||
new_dl_links = list()
|
||||
for link in dl_links:
|
||||
if dldesc in link["description"]:
|
||||
new_dl_links.append(link)
|
||||
dl_links = new_dl_links
|
||||
|
||||
if not dl_links:
|
||||
click.echo(f'No download link matching description "{dldesc}" found.')
|
||||
return
|
||||
|
||||
for dl_link in dl_links:
|
||||
try:
|
||||
p = requests.get(dl_link["link"])
|
||||
if p.status_code != 200:
|
||||
raise HTTPError(dl_link["link"], p.status_code, "", None, None)
|
||||
|
||||
parsed_html = BeautifulSoup(p.text, "html.parser")
|
||||
download_url = (
|
||||
parsed_html.body.find("div", attrs={"class": "lock-head"})
|
||||
.find("a")
|
||||
.get("href")
|
||||
)
|
||||
except Exception as e:
|
||||
click.echo(f"Failed parsing or retrieving HTML link: {e}")
|
||||
continue
|
||||
|
||||
download_filename = filename.format(
|
||||
genre=entry["genre"],
|
||||
artist=entry["artist"],
|
||||
album=entry["album"],
|
||||
title=entry["title"],
|
||||
year=entry["year"],
|
||||
author=entry["author"],
|
||||
orig_name=download_url.split("/")[-1],
|
||||
)
|
||||
download_filename = f"{destination}/{download_filename}"
|
||||
download_path = "/".join(f"{download_filename}".split("/")[0:-1])
|
||||
|
||||
click.echo(
|
||||
f"""Downloading file "{dl_link['description']}" from {download_url}..."""
|
||||
)
|
||||
if os.path.exists(download_filename):
|
||||
click.echo(f"File exists at {download_filename}")
|
||||
continue
|
||||
|
||||
def downloadFile(download_url, download_path, download_filename):
|
||||
attempts = 1
|
||||
p = None
|
||||
try:
|
||||
|
@ -342,8 +281,100 @@ def downloadSong(destination, filename, entry, dlid, dldesc):
|
|||
click.echo(f"Successfully downloaded to {download_filename}")
|
||||
except Exception as e:
|
||||
click.echo(f"Download attempt failed: {e}")
|
||||
return None
|
||||
|
||||
def parseC3Universe(dl_link):
|
||||
try:
|
||||
p = requests.get(dl_link)
|
||||
if p.status_code != 200:
|
||||
raise HTTPError(dl_link, p.status_code, "", None, None)
|
||||
|
||||
parsed_html = BeautifulSoup(p.text, "html.parser")
|
||||
download_url = (
|
||||
parsed_html.body.find("div", attrs={"class": "lock-head"})
|
||||
.find("a")
|
||||
.get("href")
|
||||
)
|
||||
return download_url
|
||||
except Exception as e:
|
||||
click.echo(f"Failed parsing or retrieving HTML link: {e}")
|
||||
return None
|
||||
|
||||
|
||||
def parseMediafire(dl_link):
|
||||
try:
|
||||
p = requests.get(dl_link)
|
||||
if p.status_code != 200:
|
||||
raise HTTPError(dl_link, p.status_code, "", None, None)
|
||||
|
||||
parsed_html = BeautifulSoup(p.text, "html.parser")
|
||||
download_url = parsed_html.find(
|
||||
"a", attrs={"id": "downloadButton", "rel": "nofollow", "aria-label": "Download file"}
|
||||
).get("href")
|
||||
return download_url
|
||||
except Exception as e:
|
||||
click.echo(f"Failed parsing or retrieving HTML link: {e}")
|
||||
return None
|
||||
|
||||
def downloadSong(destination, filename, entry, dlid, dldesc):
|
||||
click.echo(
|
||||
f"""> Downloading song "{entry['artist']} - {entry['title']}" by {entry['author']}..."""
|
||||
)
|
||||
|
||||
if dlid is None:
|
||||
dl_links = entry["dl_links"]
|
||||
else:
|
||||
try:
|
||||
dl_links = [entry["dl_links"][dlid - 1]]
|
||||
except Exception:
|
||||
click.echo(f"Invalid download link ID {dlid}.")
|
||||
return
|
||||
|
||||
if dldesc is not None:
|
||||
new_dl_links = list()
|
||||
for link in dl_links:
|
||||
if dldesc in link["description"]:
|
||||
new_dl_links.append(link)
|
||||
dl_links = new_dl_links
|
||||
|
||||
if not dl_links:
|
||||
click.echo(f'No download link matching description "{dldesc}" found.')
|
||||
return
|
||||
|
||||
for dl_link in dl_links:
|
||||
if 'dl.c3universe.com' in dl_link['link']:
|
||||
download_url = parseC3Universe(dl_link["link"])
|
||||
elif 'www.mediafire.com' in dl_link["link"]:
|
||||
download_url = parseMediafire(dl_link["link"])
|
||||
else:
|
||||
click.echo("Download URL is not valid for CLI download; skipping...")
|
||||
click.echo(f"URL: {dl_link['link']}")
|
||||
continue
|
||||
|
||||
if download_url is None:
|
||||
continue
|
||||
|
||||
print(entry)
|
||||
download_filename = filename.format(
|
||||
genre=entry["genre"],
|
||||
artist=entry["artist"],
|
||||
album=entry["album"],
|
||||
title=entry["title"],
|
||||
year=entry["year"],
|
||||
author=entry["author"],
|
||||
orig_name=download_url.split("/")[-1],
|
||||
)
|
||||
download_filename = f"{destination}/{download_filename}"
|
||||
download_path = "/".join(f"{download_filename}".split("/")[0:-1])
|
||||
|
||||
click.echo(
|
||||
f"""Downloading file "{dl_link['description']}" from {download_url}..."""
|
||||
)
|
||||
if os.path.exists(download_filename):
|
||||
click.echo(f"File exists at {download_filename}")
|
||||
continue
|
||||
|
||||
downloadFile(download_url, download_path, download_filename)
|
||||
|
||||
@click.command(name="build", short_help="Build the local database.")
|
||||
@click.option(
|
||||
|
|
Loading…
Reference in New Issue