diff --git a/.github/workflows/pylint.yml b/.github/workflows/pylint.yml index 6cd7a31..2e7ef4f 100644 --- a/.github/workflows/pylint.yml +++ b/.github/workflows/pylint.yml @@ -7,22 +7,22 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: [3.11] + python-version: ["3.11"] steps: - uses: actions/checkout@v3 - - name: Set up Conda environment with Python ${{ matrix.python-version }} - uses: s-weigand/setup-conda@v1 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v3 with: python-version: ${{ matrix.python-version }} - conda-channels: 'conda-forge' - name: Install dependencies run: | - conda env update --file environment.yml - - name: Activate conda environment - run: | - conda init bash - source /home/runner/.bashrc - conda activate waldo-dev-env + python -m pip install --upgrade pip + pip install opencv-python + pip install pylint + pip install pytube + pip install pandas + pip install requests + pip install validators - name: Analyze the code with pylint run: | pylint $(git ls-files '*.py') diff --git a/utils/link_retrieval.py b/utils/link_retrieval.py index 50d51c4..f22cf77 100644 --- a/utils/link_retrieval.py +++ b/utils/link_retrieval.py @@ -1,10 +1,14 @@ +''' +Retrieves gameplay URLs from Waldo Vision API and stores them locally in a CSV. +''' + import argparse import os +from pathlib import Path import pandas as pd import requests import validators from common import ensure_dir_exists -from pathlib import Path # Set up command line arguments parser = argparse.ArgumentParser(description="Get URL's from API and store them locally") @@ -43,8 +47,8 @@ def parse_data(data): return print("Invalid URL: " + row['url']) return response_dataframe - except Exception as e: - print(f"Error while parsing data: {e}") + except Exception as error: + print(f"Error while parsing data: {error}") return pd.DataFrame(columns=['id', 'url', 'game']) def main(): @@ -85,14 +89,16 @@ def main(): # Save the downloaded links to a file valid_urls_df = pd.DataFrame(valid_urls) - valid_urls_df.to_csv(os.path.join(Path(download_dir), "links.csv"), index=True, columns=["id", "url", "game"]) + valid_urls_df.to_csv(os.path.join(Path(download_dir), "links.csv"), + index=True, columns=["id", "url", "game"]) except requests.exceptions.Timeout as timeout_error: print(f"Request timed out: {timeout_error}") except requests.exceptions.TooManyRedirects as redirect_error: print(f"Too many redirects: {redirect_error}") except requests.exceptions.RequestException as request_error: print(f"Request failed: {request_error}") - except Exception as e: - print(f"An error occurred: {e}") + except Exception as other_error: + print(f"An error occurred: {other_error}") + if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/utils/segmentation.py b/utils/segmentation.py index 6ed82eb..250ca86 100644 --- a/utils/segmentation.py +++ b/utils/segmentation.py @@ -1,3 +1,7 @@ +''' +Code for testing OCR for clip segmentation +''' + import argparse import os from pathlib import Path @@ -37,4 +41,4 @@ for line in result: for word in line: if word[1][1] >= 0.85: - print(word[1]) \ No newline at end of file + print(word[1])