From efac2730ca54586113fcc2bfa622b1dee3564694 Mon Sep 17 00:00:00 2001 From: Nikhil Raj Date: Fri, 24 Feb 2023 21:14:31 +0530 Subject: [PATCH] Formatted --- src/scrape_up/github/issue.py | 40 +++--- src/scrape_up/github/organization.py | 180 ++++++++++++++++----------- src/scrape_up/github/pull_request.py | 48 +++---- src/scrape_up/github/respository.py | 62 +++++---- src/scrape_up/github/users.py | 127 ++++++++++--------- 5 files changed, 259 insertions(+), 198 deletions(-) diff --git a/src/scrape_up/github/issue.py b/src/scrape_up/github/issue.py index c4a50254..61c6373c 100644 --- a/src/scrape_up/github/issue.py +++ b/src/scrape_up/github/issue.py @@ -1,28 +1,32 @@ import requests from bs4 import BeautifulSoup -class Issue: - def __init__(self, username: str, repository_name:str, issue_number:int): +class Issue: + def __init__(self, username: str, repository_name: str, issue_number: int): self.username = username self.repository = repository_name self.issue_number = issue_number def __scrape_page(self): - data = requests.get(f"https://github.com/{self.username}/{self.repository}/issues/{self.issue_number}") - data = BeautifulSoup(data.text,"html.parser") + data = requests.get( + f"https://github.com/{self.username}/{self.repository}/issues/{self.issue_number}" + ) + data = BeautifulSoup(data.text, "html.parser") return data - + def assignees(self): """ Fetch list of assignees """ data = self.__scrape_page() try: - assignees_body = data.find('span', class_='css-truncate js-issue-assignees') + assignees_body = data.find("span", class_="css-truncate js-issue-assignees") assignees = [] - for assignee in assignees_body.find_all('a', class_='assignee Link--primary css-truncate-target width-fit'): - assignees.append(assignee.text.replace('\n','').strip()) + for assignee in assignees_body.find_all( + "a", class_="assignee Link--primary css-truncate-target width-fit" + ): + assignees.append(assignee.text.replace("\n", "").strip()) return assignees except: message = "No assignees found" @@ -34,8 +38,10 @@ def labels(self): """ data = self.__scrape_page() try: - labelsDiv = data.find(class_ = "js-issue-labels d-flex flex-wrap") - allLabelsHtml = labelsDiv.find_all(class_="css-truncate css-truncate-target width-fit") + labelsDiv = data.find(class_="js-issue-labels d-flex flex-wrap") + allLabelsHtml = labelsDiv.find_all( + class_="css-truncate css-truncate-target width-fit" + ) allLabels = [] for label in allLabelsHtml: allLabels.append(label.text) @@ -44,36 +50,34 @@ def labels(self): message = "No label found" return message - def opened_by(self): """ Fetch the name of the user, who opened the issue """ data = self.__scrape_page() - author_name = data.find('a', class_='author text-bold Link--secondary').text + author_name = data.find("a", class_="author text-bold Link--secondary").text return author_name - def title(self): """ Fetch title of the issue """ data = self.__scrape_page() try: - title_body = data.find('bdi', class_="js-issue-title markdown-title") + title_body = data.find("bdi", class_="js-issue-title markdown-title") title = title_body.text.strip() return title except: message = "No title found" return message - + def opened_at(self): """ Returns a string containing the time when the issue was opened in ISO format """ try: data = self.__scrape_page() - return data.find('relative-time').text + return data.find("relative-time").text except: message = "Unable to fetch time" return message @@ -84,7 +88,9 @@ def is_milestone(self): """ data = self.__scrape_page() try: - milestone = data.find('a', class_='Link--secondary mt-1 d-block text-bold css-truncate').text.strip() + milestone = data.find( + "a", class_="Link--secondary mt-1 d-block text-bold css-truncate" + ).text.strip() return milestone except: message = "No milestone" diff --git a/src/scrape_up/github/organization.py b/src/scrape_up/github/organization.py index 635aefbf..d7202c4c 100644 --- a/src/scrape_up/github/organization.py +++ b/src/scrape_up/github/organization.py @@ -2,66 +2,68 @@ from bs4 import BeautifulSoup - class Organization: - def __init__(self,organization_name: str): - self.organization = organization_name + def __init__(self, organization_name: str): + self.organization = organization_name def __scrape_page(self): data = requests.get(f"https://github.com/{self.organization}") data = BeautifulSoup(data.text, "html.parser") return data + def top_languages(self): """ Returns a list of the most used languages in an organization """ try: - languages=[] - data=self.__scrape_page() - lang_raw=data.find_all("a",class_="no-wrap color-fg-muted d-inline-block Link--muted mt-2") + languages = [] + data = self.__scrape_page() + lang_raw = data.find_all( + "a", class_="no-wrap color-fg-muted d-inline-block Link--muted mt-2" + ) for lang in lang_raw: - languages.append(lang.get_text().strip()) return languages except: return "An exception occured, cannot get the languages" - def top_topics(self): """ Returns list of the most used topics in an organization """ page = self.__scrape_page() - all_topics = page.find_all(class_='topic-tag topic-tag-link') + all_topics = page.find_all(class_="topic-tag topic-tag-link") topics = [] for topic in all_topics: topics.append(topic.text.strip()) return topics - + def followers(self): """ Returns number of followers of an organization """ page = self.__scrape_page() try: - followers_body = page.find('a', class_='Link--secondary no-underline no-wrap') + followers_body = page.find( + "a", class_="Link--secondary no-underline no-wrap" + ) followers = followers_body.span.text.strip() return followers except: return "No followers found for this organization" - + def avatar(self): """ Returns url of the avatar of an organization """ page = self.__scrape_page() try: - avatar = page.find('a', attrs = {'itemprop': 'url'}) + avatar = page.find("a", attrs={"itemprop": "url"}) url = avatar.text.strip() return url except: return "No avatar found for this organization" - + def __scrape_repositories_page(self): """ scrapes the head page of repositories of an organization @@ -78,7 +80,7 @@ def __scrape_repositories(self, page): data = requests.get(page) data = BeautifulSoup(data.text, "html.parser") return data - + def repositories(self): """ Returns List of repositories of an organization @@ -86,30 +88,34 @@ def repositories(self): organization = self.organization data = self.__scrape_repositories_page() try: - pages_body = data.find('div', class_='paginate-container') - current_page = pages_body.find('em', class_='current') + pages_body = data.find("div", class_="paginate-container") + current_page = pages_body.find("em", class_="current") total_pages = 1 if current_page != None: - total_pages = (int)(current_page['data-total-pages']) - + total_pages = (int)(current_page["data-total-pages"]) + pages = [] if total_pages == 1: pages.append(f"https://github.com/orgs/{organization}/repositories") else: for i in range(1, total_pages + 1): - pages.append(f"https://github.com/orgs/{organization}/repositories?page={i}") - + pages.append( + f"https://github.com/orgs/{organization}/repositories?page={i}" + ) + repositories = [] for page in pages: page_data = self.__scrape_repositories(page) - repositories_body = page_data.find('div', id = 'org-repositories') - for repo in repositories_body.find_all('a', attrs = {'itemprop': 'name codeRepository'}): + repositories_body = page_data.find("div", id="org-repositories") + for repo in repositories_body.find_all( + "a", attrs={"itemprop": "name codeRepository"} + ): repositories.append(repo.text.strip()) return repositories except: return "No repositories found for this organization" - + def __scrape_people_page(self): """ scrapes the head page of people of an organization @@ -118,7 +124,7 @@ def __scrape_people_page(self): data = requests.get(f"https://github.com/orgs/{organization}/people") data = BeautifulSoup(data.text, "html.parser") return data - + def __scrape_people(self, page): """ scrapes the people page of an organization @@ -135,43 +141,44 @@ def people(self): organization = self.organization data = self.__scrape_people_page() try: - pages_body = data.find('div', class_='paginate-container') - current_page = pages_body.find('em', class_='current') + pages_body = data.find("div", class_="paginate-container") + current_page = pages_body.find("em", class_="current") total_pages = 1 if current_page != None: - total_pages = (int)(current_page['data-total-pages']) - + total_pages = (int)(current_page["data-total-pages"]) + pages = [] if total_pages == 1: pages.append(f"https://github.com/orgs/{organization}/people") else: for i in range(1, total_pages + 1): - pages.append(f"https://github.com/orgs/{organization}/people?page={i}") - + pages.append( + f"https://github.com/orgs/{organization}/people?page={i}" + ) + people = [] for page in pages: page_data = self.__scrape_people(page) - people_body = page_data.find('div', id = 'org-members-table') - for person in people_body.find_all('li'): - person_username = person.find('a', class_='d-inline-block') - people.append(person_username['href'][1:]) - + people_body = page_data.find("div", id="org-members-table") + for person in people_body.find_all("li"): + person_username = person.find("a", class_="d-inline-block") + people.append(person_username["href"][1:]) + return people except: return "No people found for this organization" - def peoples(self): """ Return number of people in a organizaton """ data = self.__scrape_people_page() try: - body = data.find('div', class_='paginate-container') - current_page = body.find('em', class_='current') + body = data.find("div", class_="paginate-container") + current_page = body.find("em", class_="current") page_count = 1 if current_page != None: - page_count = int((current_page['data-total-pages'])) + page_count = int((current_page["data-total-pages"])) pages = [] @@ -179,13 +186,15 @@ def peoples(self): pages.append(f"https://github.com/orgs/{self.organization}/people") else: for i in range(1, page_count + 1): - pages.append(f"https://github.com/orgs/{self.organization}/people?page={i}") - + pages.append( + f"https://github.com/orgs/{self.organization}/people?page={i}" + ) + people_count = 0 for page in pages: page_data = self.__scrape_people(page) - people_body = page_data.find('div', id = 'org-members-table') - people_count = len(people_body.find_all('li')) + people_body = page_data.find("div", id="org-members-table") + people_count = len(people_body.find_all("li")) return people_count except: @@ -198,27 +207,20 @@ def repository_stats(self, repo_url): data = self.__scrape_repositories(repo_url) try: # forks - forksCount = ( - data.find("span", id="repo-network-counter").text.strip() - ) + forksCount = data.find("span", id="repo-network-counter").text.strip() # stars - starCount = ( - data.find('span', id = 'repo-stars-counter-star').text.strip() - ) + starCount = data.find("span", id="repo-stars-counter-star").text.strip() # issues - issuesCount = ( - data.find("span", id= "issues-repo-tab-count").text.strip() - ) + issuesCount = data.find("span", id="issues-repo-tab-count").text.strip() # pull requests - pullRequests = ( - data.find("span", id="pull-requests-repo-tab-count").text.strip() - ) + pullRequests = data.find( + "span", id="pull-requests-repo-tab-count" + ).text.strip() return forksCount, starCount, issuesCount, pullRequests except: return "No such repository found" - def repository_details(self): """ Returns the details of all the repositories of an organization @@ -226,33 +228,65 @@ def repository_details(self): organization = self.organization data = self.__scrape_repositories_page() try: - pages_body = data.find('div', class_='paginate-container') - current_page = pages_body.find('em', class_='current') + pages_body = data.find("div", class_="paginate-container") + current_page = pages_body.find("em", class_="current") total_pages = 1 if current_page != None: - total_pages = (int)(current_page['data-total-pages']) - + total_pages = (int)(current_page["data-total-pages"]) + pages = [] if total_pages == 1: pages.append(f"https://github.com/orgs/{organization}/repositories") else: for i in range(1, total_pages + 1): - pages.append(f"https://github.com/orgs/{organization}/repositories?page={i}") - + pages.append( + f"https://github.com/orgs/{organization}/repositories?page={i}" + ) + repositories = [] for page in pages: page_data = self.__scrape_repositories(page) - repositories_body = page_data.find('div', id = 'org-repositories') - for repo in repositories_body.find_all('li'): - repo_name = repo.find('a', attrs = {'itemprop': 'name codeRepository'}).text.strip() + repositories_body = page_data.find("div", id="org-repositories") + for repo in repositories_body.find_all("li"): + repo_name = repo.find( + "a", attrs={"itemprop": "name codeRepository"} + ).text.strip() repo_url = f"https://github.com{repo.find('a', attrs = {'itemprop': 'name codeRepository'})['href']}" - repo_description_body = repo.find('p', attrs = {'itemprop': 'description'}) - repo_description = repo_description_body.text.strip() if repo_description_body != None else "No description" - repo_language_body = repo.find('span', attrs = {'itemprop': 'programmingLanguage'}) - repo_language = repo_language_body.text.strip() if repo_language_body != None else "No language" - repo_forks, repo_stars, repo_issues, repo_pull_requests = self.repository_stats(repo_url) - repositories.append({"name": repo_name, "url": repo_url, "description": repo_description, "language": repo_language, "forks": repo_forks, "stars": repo_stars, "issues": repo_issues, "pull_requests": repo_pull_requests}) - + repo_description_body = repo.find( + "p", attrs={"itemprop": "description"} + ) + repo_description = ( + repo_description_body.text.strip() + if repo_description_body != None + else "No description" + ) + repo_language_body = repo.find( + "span", attrs={"itemprop": "programmingLanguage"} + ) + repo_language = ( + repo_language_body.text.strip() + if repo_language_body != None + else "No language" + ) + ( + repo_forks, + repo_stars, + repo_issues, + repo_pull_requests, + ) = self.repository_stats(repo_url) + repositories.append( + { + "name": repo_name, + "url": repo_url, + "description": repo_description, + "language": repo_language, + "forks": repo_forks, + "stars": repo_stars, + "issues": repo_issues, + "pull_requests": repo_pull_requests, + } + ) + return repositories except: return "No repositories found for this organization" diff --git a/src/scrape_up/github/pull_request.py b/src/scrape_up/github/pull_request.py index bed1a065..0bfda375 100644 --- a/src/scrape_up/github/pull_request.py +++ b/src/scrape_up/github/pull_request.py @@ -3,67 +3,69 @@ class PullRequest: - - def __init__(self, username: str, repository_name:str, pull_request_number:int): + def __init__(self, username: str, repository_name: str, pull_request_number: int): self.username = username self.repository = repository_name self.pr_number = pull_request_number def __scrape_page(self): - data = requests.get(f"https://github.com/{self.username}/{self.repository}/pull/{self.pr_number}") - data = BeautifulSoup(data.text,"html.parser") + data = requests.get( + f"https://github.com/{self.username}/{self.repository}/pull/{self.pr_number}" + ) + data = BeautifulSoup(data.text, "html.parser") return data + def labels(self): - labels_found=[] - data=self.__scrape_page() - label_raw=data.find_all("a",class_="IssueLabel hx_IssueLabel width-fit mb-1 mr-1") + labels_found = [] + data = self.__scrape_page() + label_raw = data.find_all( + "a", class_="IssueLabel hx_IssueLabel width-fit mb-1 mr-1" + ) try: for d in label_raw: labels_found.append(d.get_text().strip()) - labels_found+1 + labels_found + 1 return labels_found except: - return "An exception occured" - - + return "An exception occured" def commits(self): """ Fetch the number of commits made in a pull request """ data = self.__scrape_page() - commits_count = data.find('span', id='commits_tab_counter').text.strip() + commits_count = data.find("span", id="commits_tab_counter").text.strip() return commits_count - + def title(self): """ Fetch the title of a pull request """ data = self.__scrape_page() try: - title_body = data.find('bdi', class_='js-issue-title markdown-title') + title_body = data.find("bdi", class_="js-issue-title markdown-title") title = title_body.text.strip() return title except: Message = "No title found" return Message - + def __files_changed_body(self): - ''' + """ scrape the data of files changed in a pull request - ''' + """ link = f"https://github.com/{self.username}/{self.repository}/pull/{self.pr_number}/files" data = requests.get(link) - data = BeautifulSoup(data.text,"html.parser") + data = BeautifulSoup(data.text, "html.parser") return data - + def files_changed(self): """ Fetch the number of files changed in a pull request """ data = self.__files_changed_body() try: - files_changed_body = data.find('span', id='files_tab_counter') + files_changed_body = data.find("span", id="files_tab_counter") files_changed = files_changed_body.text.strip() return files_changed except: @@ -77,7 +79,9 @@ def reviewers(self): data = self.__scrape_page() try: reviewerList = [] - reviewers = data.find_all('span',class_ = 'css-truncate-target width-fit v-align-middle') + reviewers = data.find_all( + "span", class_="css-truncate-target width-fit v-align-middle" + ) if len(reviewers) == 0: return f"Oops, The repository {self.repository} doesn't have any reviewers yet!" else: @@ -85,4 +89,4 @@ def reviewers(self): reviewerList.append(reviewer.text) return reviewerList except: - return "Oops! An Error Occured" \ No newline at end of file + return "Oops! An Error Occured" diff --git a/src/scrape_up/github/respository.py b/src/scrape_up/github/respository.py index 96c9eb24..cb7f08b5 100644 --- a/src/scrape_up/github/respository.py +++ b/src/scrape_up/github/respository.py @@ -3,7 +3,6 @@ import requests_html - class Repository: def __init__(self, username: str, repository_name: str): self.username = username @@ -29,11 +28,12 @@ def __scrape_releases_page(self): return data def __scrape_issues_page(self): - data = requests.get(f"https://github.com/{self.username}/{self.repository}/issues" + data = requests.get( + f"https://github.com/{self.username}/{self.repository}/issues" ) data = BeautifulSoup(data.text, "html.parser") return data - + def __scrape_pull_requests_page(self): data = requests.get( f"https://github.com/{self.username}/{self.repository}/pulls" @@ -121,7 +121,6 @@ def star_count(self): message = "Oops! No Stars found" return message - def pull_requests(self): """ Get the number of pull requests opened in a repository. @@ -130,7 +129,7 @@ def pull_requests(self): try: pull_requests = ( data.find_all(class_="UnderlineNav-item mr-0 mr-md-1 mr-lg-3")[2] - .find_all("span")[1] + .find_all("span")[1] .text.strip() ) return pull_requests @@ -171,7 +170,6 @@ def releases(self): return message def issues_count(self): - """ Fetch total issues in a repository """ @@ -182,47 +180,57 @@ def issues_count(self): except: message = "Failed to fetch no. of issues" return message - + def readme(self): """ Fetch readme.md of a user """ session = requests_html.HTMLSession() - r = session.get(f"https://github.com/{self.username}/{self.username}/blob/main/README.md") + r = session.get( + f"https://github.com/{self.username}/{self.username}/blob/main/README.md" + ) markdown_content = r.text try: - with open('out.md', 'w', encoding='utf-8') as f: + with open("out.md", "w", encoding="utf-8") as f: f.write(markdown_content) except: - err=f"No readme found for {self.username}" + err = f"No readme found for {self.username}" return err - + def get_pull_requests_ids(self): """ Fetch all opened pull requests id's of a repository """ data = self.__scrape_pull_requests_page() try: - pr_body = data.find('div', class_='js-navigation-container js-active-navigation-container') + pr_body = data.find( + "div", class_="js-navigation-container js-active-navigation-container" + ) pull_requests_ids = [] - for each_pr in pr_body.find_all('a', class_='Link--primary v-align-middle no-underline h4 js-navigation-open markdown-title'): - pr_id = each_pr['href'].split('/')[-1] + for each_pr in pr_body.find_all( + "a", + class_="Link--primary v-align-middle no-underline h4 js-navigation-open markdown-title", + ): + pr_id = each_pr["href"].split("/")[-1] pull_requests_ids.append(pr_id) - return pull_requests_ids + return pull_requests_ids except: message = "No pull requests found" return message - def commits(self): """ - Fetch the number of commits in a repository + Fetch the number of commits in a repository """ data = self.__scrape_page() try: - commits = (data.find("a",href=f"/{self.username}/{self.repository}/commits").find("span").text.strip()) + commits = ( + data.find("a", href=f"/{self.username}/{self.repository}/commits") + .find("span") + .text.strip() + ) return commits except: message = "No commits found" @@ -233,7 +241,9 @@ def get_issues(self): """ data = self.__scrape_issues_page() try: - issues = data.find_all(class_="Link--primary v-align-middle no-underline h4 js-navigation-open markdown-title") + issues = data.find_all( + class_="Link--primary v-align-middle no-underline h4 js-navigation-open markdown-title" + ) allIssues = [] for item in issues: @@ -244,14 +254,16 @@ def get_issues(self): return message def get_contributors(self): - data= self.__scrape_page() + data = self.__scrape_page() try: - contributors = data.find_all("a", href=f"/{self.username}/{self.repository}/graphs/contributors") - contributor=[] + contributors = data.find_all( + "a", href=f"/{self.username}/{self.repository}/graphs/contributors" + ) + contributor = [] for it in contributors: contributor.append(it.get_text()) - return contributor[0].strip() + return contributor[0].strip() except: - message="Oops! No contributors found" - return message + message = "Oops! No contributors found" + return message diff --git a/src/scrape_up/github/users.py b/src/scrape_up/github/users.py index a69d697a..81a5961b 100644 --- a/src/scrape_up/github/users.py +++ b/src/scrape_up/github/users.py @@ -3,7 +3,6 @@ class Users: - def __init__(self, username: str): self.username = username @@ -26,12 +25,12 @@ def followers(self): return message def following(self): - """" + """ " Fetch the number of following of a GitHub users. """ page = self.__scrape_page() try: - following=page.find_all(class_="text-bold color-fg-default") + following = page.find_all(class_="text-bold color-fg-default") # print(page.find_all("span")) return following[1].text except: @@ -45,7 +44,8 @@ def get_avatar(self): page = self.__scrape_page() try: avatar = page.find( - class_="avatar avatar-user width-full border color-bg-default") + class_="avatar avatar-user width-full border color-bg-default" + ) return avatar["src"] except: message = f"Avatart not found for username {self.username}" @@ -57,24 +57,29 @@ def get_bio(self): """ page = self.__scrape_page() try: - bio = page.find(class_="p-note user-profile-bio mb-3 js-user-profile-bio f4") + bio = page.find( + class_="p-note user-profile-bio mb-3 js-user-profile-bio f4" + ) return bio.text except: message = f"Bio not found for username {self.username}" return message - + def get_repo(self): """ Fetch the titles of all pinned repositories of a GitHub user. """ page = self.__scrape_page() try: - pinned_repos = page.find_all(class_="mb-3 d-flex flex-content-stretch col-12 col-md-6 col-lg-6") - titles = [repo.find('span', class_='repo').text for repo in pinned_repos] + pinned_repos = page.find_all( + class_="mb-3 d-flex flex-content-stretch col-12 col-md-6 col-lg-6" + ) + titles = [repo.find("span", class_="repo").text for repo in pinned_repos] return titles except: - message=f"pinned repositories not found for username {self.username}" + message = f"pinned repositories not found for username {self.username}" return message + def repo_count(self): """ Fetch the number of repositories of Github user. @@ -82,12 +87,12 @@ def repo_count(self): page = self.__scrape_page() try: count_repo = page.find_all(class_="Counter") - count_repo_list=[] + count_repo_list = [] for word in count_repo: - find_all_example=word.get_text() + find_all_example = word.get_text() count_repo_list.append(find_all_example) - return(count_repo_list[0]) - except: + return count_repo_list[0] + except: message = f"No. of Repos not found for username {self.username}" return message @@ -96,30 +101,29 @@ def star_count(self): Fetch the number of stars of Github user. """ page = self.__scrape_page() - try: + try: count_star = page.find_all(class_="Counter") - count_star_list=[] + count_star_list = [] for words in count_star: - find_all_example=words.get_text() + find_all_example = words.get_text() count_star_list.append(find_all_example) - return(count_star_list[3]) - except: + return count_star_list[3] + except: message = f"Starred repo not found for username {self.username}" return message - + def get_yearly_contributions(self): - - """ + """ Fetch the contributions made in 365 days frame - """ - page=self.__scrape_page() + """ + page = self.__scrape_page() try: - contributions = page.find('h2',class_="f4 text-normal mb-2") - return ''.join(contributions.text.split()) + contributions = page.find("h2", class_="f4 text-normal mb-2") + return "".join(contributions.text.split()) except: message = f"Yearly contributions not found for username {self.username}" return message - + def __get_repo_page(self): """ Scrape the repositories page of a GitHub user. @@ -135,19 +139,19 @@ def get_repositories(self): """ page = self.__get_repo_page() try: - repo_body = page.find('div', id = 'user-repositories-list') + repo_body = page.find("div", id="user-repositories-list") repositories = [] if repo_body != None: - for repo in repo_body.find_all('div', class_='col-10 col-lg-9 d-inline-block'): - repositories.append('https://github.com' + repo.a['href']) + for repo in repo_body.find_all( + "div", class_="col-10 col-lg-9 d-inline-block" + ): + repositories.append("https://github.com" + repo.a["href"]) return repositories except: message = f"Repositories not found for username {self.username}" return message - def get_organizations(self): - """ Fetch the names of organization, a user is part of """ @@ -159,25 +163,23 @@ def get_organizations(self): message = f"No organizations found for the username {self.username}" return message - def get_achievements(self): """ Fetch the names of achievements, a user is has achieved """ try: achievement = [] - data=self.__scrape_page() - data=data.find_all("img", class_="achievement-badge-sidebar",alt=True) - itr=0 - while itr