Skip to content

Commit

Permalink
Add caching to prevent redundant calls to LLMs.
Browse files Browse the repository at this point in the history
  • Loading branch information
fniessink committed Mar 27, 2024
1 parent 1702159 commit df09629
Show file tree
Hide file tree
Showing 3 changed files with 7 additions and 2 deletions.
2 changes: 1 addition & 1 deletion .gitignore
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
.*_cache
.*_cache*
.coverage*
.DS_Store
.venv
Expand Down
3 changes: 2 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,8 @@
name = "Summarizer"
version = "0.1"
dependencies = [
"langchain-openai == 0.0.8",
"langchain == 0.1.13",
"langchain-openai == 0.1.1",
]

[project.scripts]
Expand Down
4 changes: 4 additions & 0 deletions src/summarizer/app.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,14 @@
"""Main module for the application."""

from langchain.cache import SQLiteCache
from langchain.globals import set_llm_cache

from summarizer.cli import create_argument_parser
from summarizer.summarize import summarize_path


def main() -> None:
"""Run the main program."""
set_llm_cache(SQLiteCache(database_path=".summarizer_cache.db"))
args = create_argument_parser().parse_args()
print(summarize_path(args.path))

0 comments on commit df09629

Please sign in to comment.