Skip to content

Scrape anime and play episodes on browser without ads from terminal #5975

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 9 commits into from
Feb 1, 2022
205 changes: 205 additions & 0 deletions web_programming/fetch_anime_and_play.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,205 @@
from xml.dom import NotFoundErr

import requests
from bs4 import BeautifulSoup, NavigableString
from fake_useragent import UserAgent
from requests.exceptions import RequestException

BASE_URL = "https://ww1.gogoanime2.org"


def search_scraper(anime_name: str) -> list:

"""[summary]

This function will take an url and
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
This function will take an url and
Take an url and

return list of anime after scraping the site.

>>> type(search_scraper("demon_slayer"))
<class 'list'>

Args:
anime_name (str): [Name of anime]

Raises:
e: [Raises exception on failure]

Returns:
[list]: [List of animes]
"""

# concat the name to form the search url.
search_url = f"{BASE_URL}/search/{anime_name}"

try:
response = requests.get(
search_url, headers={"UserAgent": UserAgent().chrome}
) # request the url.

# Is the response ok?
response.raise_for_status()
except RequestException as e:
raise e
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There is no need to catch an Exception if you are only going to raise the same Exception unmodified.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ok. i've used response.raise_for_status() instead.


# parse with soup.
soup = BeautifulSoup(response.text, "html.parser")

# get list of anime
items_ul = soup.find("ul", {"class": "items"})
items_li = items_ul.children

# for each anime, insert to list. the name and url.
anime_list = []
for li in items_li:
if not isinstance(li, NavigableString):
try:
anime_url, anime_title = li.find("a")["href"], li.find("a")["title"]
anime_list.append(
{
"title": anime_title,
"url": anime_url,
}
)
except (NotFoundErr, KeyError):
pass

return anime_list


def search_anime_episode_list(episode_endpoint: str) -> list:

"""[summary]

This function will take an url and
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
This function will take an url and
Take an url and

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

changed them

return list of episodes after scraping the site
for an url.

>>> type(search_anime_episode_list("/anime/kimetsu-no-yaiba"))
<class 'list'>

Args:
episode_endpoint (str): [Endpoint of episode]

Raises:
e: [description]

Returns:
[list]: [List of episodes]
"""

request_url = f"{BASE_URL}{episode_endpoint}"

try:
response = requests.get(
url=request_url, headers={"UserAgent": UserAgent().chrome}
)
response.raise_for_status()
except RequestException as e:
raise e

soup = BeautifulSoup(response.text, "html.parser")

# With this id. get the episode list.
episode_page_ul = soup.find("ul", {"id": "episode_related"})
episode_page_li = episode_page_ul.children

episode_list = []
for children in episode_page_li:
try:
if not isinstance(children, NavigableString):
episode_list.append(
{
"title": children.find("div", {"class": "name"}).text.replace(
" ", ""
),
"url": children.find("a")["href"],
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It confuses the reader that children is plural yet these items are singular. It is easier to understand when each item is an episode...

Suggested change
for children in episode_page_li:
try:
if not isinstance(children, NavigableString):
episode_list.append(
{
"title": children.find("div", {"class": "name"}).text.replace(
" ", ""
),
"url": children.find("a")["href"],
for episode in episode_page_li:
try:
if not isinstance(episode, NavigableString):
episode_list.append(
{
"title": episode.find("div", {"class": "name"}).text.replace(
" ", ""
),
"url": episode.find("a")["href"],

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

for episode in episode_page_li:
for anime in anime_li:
Like this now

}
)
except (KeyError, NotFoundErr):
pass

return episode_list


def get_anime_episode(episode_endpoint: str) -> list:

"""[summary]

Get click url and download url from episode url

>>> type(get_anime_episode("/watch/kimetsu-no-yaiba/1"))
<class 'list'>

Args:
episode_endpoint (str): [Endpoint of episode]

Raises:
e: [description]

Returns:
[list]: [List of download and watch url]
"""

episode_page_url = f"{BASE_URL}{episode_endpoint}"

try:
response = requests.get(
url=episode_page_url, headers={"User-Agent": UserAgent().chrome}
)
except RequestException as e:
raise e

soup = BeautifulSoup(response.text, "html.parser")

try:
episode_url = soup.find("iframe", {"id": "playerframe"})["src"]
download_url = episode_url.replace("/embed/", "/playlist/") + ".m3u8"
except (KeyError, NotFoundErr) as e:
raise e

return [f"{BASE_URL}{episode_url}", f"{BASE_URL}{download_url}"]


if __name__ == "__main__":

try:

anime_name = input("Enter anime name: ").strip()
anime_list = search_scraper(anime_name)
print("\n")

if len(anime_list) == 0:
print("No anime found with this name")
else:

print(f"Found {len(anime_list)} results: ")
for (i, anime) in enumerate(anime_list):
anime_title = anime["title"]
print(f"{i+1}. {anime_title}")

anime_choice = int(
input("\nPlease choose from the following list: ").strip()
)
chosen_anime = anime_list[anime_choice - 1]
print(f"You chose {chosen_anime['title']}. Searching for episodes...")

episode_list = search_anime_episode_list(chosen_anime["url"])
if len(episode_list) == 0:
print("No episode found for this anime")
else:
print(f"Found {len(episode_list)} results: ")
for (i, episode) in enumerate(episode_list):
print(f"{i+1}. {episode['title']}")

episode_choice = int(
input("\nChoose an episode by serial no: ").strip()
)
chosen_episode = episode_list[episode_choice - 1]
print(f"You chose {chosen_episode['title']}. Searching...")

episode_url, download_url = get_anime_episode(chosen_episode["url"])
print(f"\nTo watch, ctrl+click on {episode_url}.")
print(f"To download, ctrl+click on {download_url}.")

except (ValueError, IndexError, TypeError) as e:
raise e
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We can lose the try / except if we are not going to handle the errors.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

removed the block.