Skip to content

Commit

Permalink
Implement core application structure with logging, path resolution, a…
Browse files Browse the repository at this point in the history
…nd context preparation
  • Loading branch information
whoisdsmith committed Dec 23, 2024
1 parent 7941bd4 commit 70e4a7b
Show file tree
Hide file tree
Showing 22 changed files with 1,910 additions and 57 deletions.
21 changes: 21 additions & 0 deletions app/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
import logging
import sys
from rich.logging import RichHandler


def setup_logging(log_level: str = "INFO"):
"""Configures the logging module."""
logging.basicConfig(
level=log_level,
format="%(name)s - %(message)s",
datefmt="[%X]",
handlers=[RichHandler(
rich_tracebacks=True,
tracebacks_show_locals=True,
show_time=True,
show_level=True,
show_path=True
)]
)
logger = logging.getLogger()
logger.info("Logging initialized")
112 changes: 112 additions & 0 deletions app/chapter.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,112 @@
import logging
from typing import Optional
from pathlib import Path

from rich.console import Console
from rich.prompt import Prompt, Confirm
from rich.table import Table
from rich.progress import track
from rich.panel import Panel
from rich.live import Live

from .utils import (
generate_story,
load_config,
load_api_key
)

logger = logging.getLogger("chapter")
console = Console()


def generate_chapter_tui():
"""TUI for generating a single chapter."""
console.clear()
console.print(Panel("Generate a Single Chapter", style="bold blue"))

config = load_config()
api_key = load_api_key(config)

query = Prompt.ask("[bold blue]Enter your story query")
style = Prompt.ask("[bold blue]Enter the story style",
default=config['generation'].get('style', "dark fantasy"))
character = Prompt.ask("[bold blue]Enter the focus character (optional)")
situation = Prompt.ask("[bold blue]Enter the story situation (optional)")
output_file = Prompt.ask("[bold blue]Enter the output file (optional)")

# Advanced options (optional)
if Confirm.ask("Configure advanced options?"):
top_n = int(Prompt.ask(
"[bold blue]Enter the number of relevant chunks (top_n)", default="3"))
temperature = float(
Prompt.ask("[bold blue]Enter the temperature",
default=str(config['generation']['temperature']))
)
max_tokens = int(
Prompt.ask("[bold blue]Enter the max tokens",
default=str(config['generation']['max_tokens']))
)
max_iterations = int(
Prompt.ask("[bold blue]Enter the max iterations",
default=str(config['evaluation']['max_iterations']))
)
min_quality = float(
Prompt.ask(
"[bold blue]Enter the min quality", default=str(config['evaluation']['min_quality_score'])
)
)
force = Confirm.ask("Force regeneration (ignore cache)?")
else:
top_n = 3
temperature = config['generation']['temperature']
max_tokens = config['generation']['max_tokens']
max_iterations = config['evaluation']['max_iterations']
min_quality = config['evaluation']['min_quality_score']
force = False

log_level = "INFO" # Set log level for generate_story

try:
with Live(console=console, refresh_per_second=4) as live:
def update_progress(step, total, description):
table = Table(title=description)
table.add_column("Step", justify="right",
style="cyan", no_wrap=True)
table.add_column("Progress", style="magenta")
table.add_row(
f"{step}/{total}", f"[progress.percentage]{(step + 1) / total * 100:>3.0f}%")
live.update(table)

for step in track(range(100), description="Generating story..."):
# Simulate story generation process
if step % 20 == 0:
update_progress(step + 1, 100, "Generating Story")

generate_story(
query,
Path("data/embeddings.json"),
Path(output_file) if output_file else None,
"config.yaml",
style,
character,
situation,
top_n,
temperature,
max_tokens,
max_iterations,
min_quality,
api_key,
log_level,
force,
)

console.print(
Panel(
f"Story generation complete. Check the output file: {
output_file}",
style="bold green",
)
)
except Exception as e:
console.print(
f"[bold red]Error during story generation: {e}[/bold red]")
44 changes: 44 additions & 0 deletions app/character.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
import json
import logging
from typing import Any, Dict
from pathlib import Path

from .path_utils import resolve_data_path

# Set up a logger for this module.
logger = logging.getLogger('character')
logger.info("Character module initialized")


def load_character_profiles(character_profiles_path: str) -> Dict[str, Any]:
"""Loads character profiles from a JSON file.
Args:
character_profiles_path: The path to the JSON file containing the character profiles.
Returns:
A dictionary containing the character profiles.
Raises:
FileNotFoundError: If the character profiles file is not found.
json.JSONDecodeError: If there is an error decoding the JSON file.
"""
logger.debug(f"Loading character profiles from {character_profiles_path}")

filepath = resolve_data_path(character_profiles_path)
if not filepath.exists():
logger.error(f"Character profiles file not found: {filepath}")
raise FileNotFoundError(
f"Character profiles file not found: {filepath}")

try:
with open(filepath, 'r') as f:
profiles = json.load(f)
logger.info(f"Character profiles successfully loaded from {filepath}")
return profiles
except json.JSONDecodeError as e:
logger.error(
f"Error decoding JSON from character profiles file '{
filepath}': {e}"
)
raise
44 changes: 44 additions & 0 deletions app/context.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
import logging
from typing import List, Tuple, Dict, Any
from pathlib import Path

from .text_processing import chunk_text

# Set up a logger for this module.
logger = logging.getLogger('context')
logger.info("Context module initialized")


def prepare_context(embeddings_dict: Dict[str, Any], relevant_chunks: List[Tuple[str, int, float]]) -> str:
"""Prepare context from relevant chunks.
Args:
embeddings_dict: A dictionary where keys are file paths and values are dictionaries containing 'content' and 'chunk_embeddings' (and optionally 'chunk_content').
relevant_chunks: A list of tuples, each containing (file_path, chunk_index, similarity_score), as returned by semantic_search.
Returns:
A string containing the formatted context.
"""
CONTEXT_CHUNK_FALLBACK_SIZE = 5000 # Define constant here
logger.debug("Preparing context from relevant chunks")
context_parts = []

for file_path, chunk_idx, similarity in relevant_chunks:
data = embeddings_dict[file_path]
content = data['content']
chunks = data.get('chunk_content', chunk_text(content))

if chunk_idx is not None and chunk_idx < len(chunks):
context_chunk = chunks[chunk_idx]
source = f"{Path(file_path).name} (chunk {chunk_idx + 1})"
else:
logger.warning(f"Invalid chunk index {chunk_idx} for {
file_path}")
context_chunk = content[:CONTEXT_CHUNK_FALLBACK_SIZE]
source = Path(file_path).name

context_parts.append(f"Source: {source}\nRelevance: {
similarity:.2f}\n\n{context_chunk}\n")

logger.info(f"Prepared context with {len(context_parts)} parts")
return "\n---\n".join(context_parts)
65 changes: 65 additions & 0 deletions app/export.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
import logging
from typing import Optional
from pathlib import Path
from datetime import datetime
import markdown

# Set up a logger for this module.
logger = logging.getLogger('export')
logger.info("Export module initialized")


def export_story(text: str, format: str = "txt", filename: Optional[str] = None):
"""Exports the story in the specified format.
Args:
text: The story text to export.
format: The format to export the story in (txt, md, or html).
filename: The name of the file to export the story to. If None, a timestamped filename is generated.
"""
if filename is None:
timestamp = datetime.now().strftime("%Y%m%d-%H%M%S")
filename = f"story_{timestamp}.{format}"

filepath = Path(__file__).parent / filename

try:
with open(filepath, 'w') as f:
if format == "txt":
f.write(text)
elif format == "md":
f.write(markdown.markdown(text))
elif format == "html":
f.write(f"""
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Story Export</title>
<style>
body {{
font-family: Arial, sans-serif;
line-height: 1.6;
color: #333;
margin: 20px;
}}
h1, h2, h3, h4, h5, h6 {{
color: #0056b3;
}}
p {{
margin-bottom: 1em;
}}
</style>
</head>
<body>
{markdown.markdown(text)}
</body>
</html>
""")
else:
logger.error(f"Unsupported format: {format}")
return
logger.info(f"Story exported to {filename} in {format} format")
except Exception as e:
logger.error(f"Error exporting story to {filename}: {e}")
Loading

0 comments on commit 70e4a7b

Please sign in to comment.