initial release
Some checks failed
CI / test (3.13) (push) Failing after 22s

This commit is contained in:
Alexander Kalinovsky
2025-08-25 19:45:21 +03:00
parent cc067ec78d
commit 2954913673
36 changed files with 2890 additions and 11 deletions

44
.gitea/workflows/ci.yaml Normal file
View File

@@ -0,0 +1,44 @@
# SPDX-FileCopyrightText: 2025 Alexander Kalinovsky <a@k8y.ru>
#
# SPDX-License-Identifier: MIT
name: CI
on:
push:
branches:
- main
- dev
pull_request:
jobs:
test:
runs-on: ubuntu-latest
strategy:
matrix:
python-version:
- "3.13"
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up uv
uses: astral-sh/setup-uv@v4
with:
enable-cache: true
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
uv sync --all-extras --dev
- name: Ruff (lint + format check)
run: |
uv run ruff check .
uv run ruff format --check
- name: MyPy
run: |
uv run mypy src
- name: Pytest
run: |
uv run pytest

View File

@@ -0,0 +1,35 @@
# SPDX-FileCopyrightText: 2025 Alexander Kalinovsky <a@k8y.ru>
#
# SPDX-License-Identifier: MIT
name: Publish to TestPyPI
on:
push:
tags:
- "v*.*.*rc*"
- "v*.*.*a*"
- "v*.*.*b*"
build-publish:
permissions:
id-token: write
contents: read
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up uv
uses: astral-sh/setup-uv@v4
with:
enable-cache: true
python-version: "3.13"
- name: Build wheel & sdist
run: |
uv build
- name: Publish (TestPyPI)
run: |
uv publish --repository-url https://test.pypi.org/legacy/ --skip-existing

20
.gitignore vendored Normal file
View File

@@ -0,0 +1,20 @@
# SPDX-FileCopyrightText: 2025 Alexander Kalinovsky <a@k8y.ru>
#
# SPDX-License-Identifier: MIT
__pycache__
.venv
.env
.pytest_cache
.DS_Store
.ruff_cache
.mypy_cache
uv.lock
build/
*.egg-info/
.coverage
*.py,cover
.vscode/
htmlcov/
/output/
/templates/

31
.pre-commit-config.yaml Normal file
View File

@@ -0,0 +1,31 @@
# SPDX-FileCopyrightText: 2025 Alexander Kalinovsky <a@k8y.ru>
#
# SPDX-License-Identifier: MIT
repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.12.10
hooks:
- id: ruff-check
args: ["--fix"]
- id: ruff-format
- repo: https://github.com/pre-commit/mirrors-mypy
rev: v1.17.1
hooks:
- id: mypy
args: [
"--python-version", "3.13",
"--strict",
"--ignore-missing-imports",
"--warn-unused-ignores",
"--disable-error-code=misc"
]
additional_dependencies: ["types-pyyaml"]
- repo: https://github.com/codespell-project/codespell
rev: v2.4.1
hooks:
- id: codespell
- repo: https://github.com/fsfe/reuse-tool
rev: v5.0.2
hooks:
- id: reuse

View File

@@ -1,6 +1,6 @@
MIT License MIT License
Copyright (c) 2025 BotForge Copyright (c) <year> <copyright holders>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
associated documentation files (the "Software"), to deal in the Software without restriction, including associated documentation files (the "Software"), to deal in the Software without restriction, including

100
README.md
View File

@@ -1,2 +1,100 @@
# quickbot_cli <!--
SPDX-FileCopyrightText: 2025 Alexander Kalinovsky <a@k8y.ru>
SPDX-License-Identifier: MIT
-->
# quickbot_cli
QuickBot CLI for scaffolding new projects from templates.
### Features
- Generate a ready-to-run QuickBot app structure from templates
- Optional modules (e.g., Alembic migrations, i18n) included/excluded via flags
## Installation
You can install the CLI into your environment:
```bash
uv pip install quickbot-cli
```
Alternatively, for local development in this repo:
```bash
uv pip install -e .[dev]
```
## Usage
Show help:
```bash
uv run quickbot --help
uv run quickbot init --help
```
Generate a project into a target directory (default template: "basic"):
```bash
uv run quickbot init ./my_bot \
--template basic \
--project-name my_bot \
--description "My awesome bot" \
--author "Jane Doe" \
--license-name MIT \
--include-alembic \
--include-i18n \
--overwrite
```
Key options:
- `--template, -t`: template name (default: `basic`)
- `--project-name`: project name used during rendering
- `--description`: short description
- `--author`: author name
- `--license-name`: license identifier (e.g., MIT)
- `--include-alembic/--no-include-alembic`: include Alembic files (default: on)
- `--include-i18n/--no-include-i18n`: include i18n files (default: on)
- `--overwrite`: overwrite existing files when rendering
## Templates
Built-in templates live under `src/quickbot_cli/templates/`. The default is `basic` and includes a minimal app layout plus optional Alembic/i18n modules.
Each template can include a `__template__.yaml` file describing variables and post-generation tasks. Example:
```yaml
variables:
project_name:
prompt: Project name
default: my_project
include_alembic:
prompt: Include Alembic?
choices: ["yes", "no"]
default: "yes"
post_tasks:
- when: "{{ include_alembic }}"
run: ["echo", "alembic_initialized"]
```
Template files use the `.j2` suffix and are rendered to the output path with variables made available to Jinja2. Non-`.j2` files are copied as-is.
## Development
Clone the repo and install dev deps:
```bash
uv pip install -e .[dev]
```
Run tests:
```bash
uv run python run_tests.py
# or
uv run -m pytest tests/ -v --tb=short
```
Code style and tooling:
- Ruff and MyPy configs are in `pyproject.toml`
- Pre-commit hooks: `.pre-commit-config.yaml`
## License
MIT. See `LICENSES/MIT.txt`.

73
pyproject.toml Normal file
View File

@@ -0,0 +1,73 @@
# SPDX-FileCopyrightText: 2025 Alexander Kalinovsky <a@k8y.ru>
#
# SPDX-License-Identifier: MIT
[build-system]
requires = [
"hatchling",
"hatch-vcs",
]
build-backend = "hatchling.build"
[project]
name = "quickbot-cli"
dynamic = ["version"]
description = "QuickBot CLI"
authors = [
{ name = "Alexander Kalinovsky", email = "ak@botforge.biz" }
]
readme = "README.md"
requires-python = ">=3.13"
license = { text = "MIT" }
dependencies = [
"typer",
"quickbot>=0.1.1",
"jinja2",
"pyyaml",
]
[project.scripts]
quickbot = "quickbot_cli.cli:main"
[project.optional-dependencies]
dev = [
"pytest",
"pytest-cov",
"mypy",
"ruff",
"pre-commit",
"reuse",
"codespell",
"types-pyyaml",
]
[tool.ruff]
line-length = 120
target-version = "py313"
[tool.ruff.lint]
select = ["ALL"]
ignore = ["D203", "D213", "COM812", "PLR0913"]
[tool.ruff.lint.per-file-ignores]
"tests/**" = ["S101", "PT011"]
[tool.ruff.format]
quote-style = "double"
indent-style = "space"
line-ending = "lf"
[tool.mypy]
python_version = "3.13"
strict = true
ignore_missing_imports = true
warn_unused_ignores = true
[tool.hatch.version]
source = "vcs"
[tool.hatch.build.targets.sdist]
include = ["src/quickbot_cli", "LICENSES/**", "README.md"]
[tool.hatch.build.targets.wheel]
packages = ["src/quickbot_cli"]

38
run_tests.py Executable file
View File

@@ -0,0 +1,38 @@
#!/usr/bin/env python3
# SPDX-FileCopyrightText: 2025 Alexander Kalinovsky <a@k8y.ru>
#
# SPDX-License-Identifier: MIT
"""Test runner script for QuickBot CLI."""
import shutil
import subprocess
import sys
import typer
def main() -> None:
"""Run the test suite using uv so deps aren't installed globally."""
uv_path = shutil.which("uv")
if uv_path is None:
typer.echo(
"'uv' is not installed. Please install it (e.g., via 'pipx install uv') and retry.",
)
sys.exit(2)
# Run tests via uv ensuring the dev extra is available
# This subprocess call is safe as it only runs pytest with known arguments
result = subprocess.run([uv_path, "run", "--extra", "dev", "pytest", "tests/", "-v", "--tb=short"], check=False) # noqa: S603
if result.returncode == 0:
typer.echo("✅ All tests passed!")
sys.exit(0)
else:
typer.echo("❌ Some tests failed!")
sys.exit(1)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,12 @@
# SPDX-FileCopyrightText: 2025 Alexander Kalinovsky <a@k8y.ru>
#
# SPDX-License-Identifier: MIT
"""Quickbot CLI."""
from importlib.metadata import PackageNotFoundError, version
try:
__version__ = version("quickbot-cli")
except PackageNotFoundError:
__version__ = "0.0.0"

371
src/quickbot_cli/cli.py Normal file
View File

@@ -0,0 +1,371 @@
# SPDX-FileCopyrightText: 2025 Alexander Kalinovsky <a@k8y.ru>
#
# SPDX-License-Identifier: MIT
"""QuickBot CLI tool for generating project structures."""
import re
import shutil
import subprocess
from pathlib import Path
from typing import Any
import typer
import yaml
from jinja2 import Environment, FileSystemLoader, StrictUndefined
app = typer.Typer(help="Project scaffolding CLI")
TEMPLATES_DIR = Path(__file__).parent / "templates"
# Module-level constants
DEFAULT_TEMPLATE = "basic"
BINARY_EXTS = {".png", ".jpg", ".jpeg", ".gif", ".ico", ".pdf", ".zip"}
OUTPUT_DIR_ARG = typer.Argument(..., help="Output directory")
def load_template_spec(template_dir: Path) -> dict[str, Any]:
"""Load template specification from a template directory.
Args:
template_dir: Path to the template directory
Returns:
Dictionary containing template variables and post-tasks
"""
spec_file = template_dir / "__template__.yaml"
if not spec_file.exists():
return {"variables": {}, "post_tasks": []}
try:
with spec_file.open(encoding="utf-8") as f:
spec = yaml.safe_load(f) or {}
return {
"variables": spec.get("variables", {}),
"post_tasks": spec.get("post_tasks", []) or [],
}
except yaml.YAMLError as e:
typer.secho(f"Error parsing template spec: {e}", fg=typer.colors.RED)
raise typer.Exit(1) from e
def _to_bool_like(*, value: str | bool | int) -> bool | None:
"""Convert a value to a boolean-like value.
Args:
value: Value to convert
Returns:
Boolean value or None if conversion is not possible
"""
if isinstance(value, bool):
return value
s = str(value).strip().lower()
if s in {"true", "t", "yes", "y", "1"}:
return True
if s in {"false", "f", "no", "n", "0"}:
return False
return None
def _handle_boolean_choices(prompt: str, choices: list[bool], *, default: bool | None) -> bool:
"""Handle boolean choice variables.
Args:
prompt: User prompt text
choices: List of boolean choices
default: Default value
Returns:
Selected boolean value
Raises:
typer.Exit: If invalid input is provided
"""
raw = typer.prompt(f"{prompt} {choices} (default: {default})", default=default)
coerced = _to_bool_like(value=raw)
if coerced is None:
typer.secho(
f"Value must be one of {choices} (accepted: true/false, yes/no, y/n, 1/0)",
fg=typer.colors.RED,
)
raise typer.Exit(code=1)
return coerced
def _handle_regular_choices(prompt: str, choices: list[str], default: str | None) -> str:
"""Handle regular choice variables.
Args:
prompt: User prompt text
choices: List of available choices
default: Default value
Returns:
Selected value
Raises:
typer.Exit: If invalid input is provided
"""
val: str = typer.prompt(f"{prompt} {choices} (default: {default})", default=default)
if val not in choices:
typer.secho(f"Value must be one of {choices}", fg=typer.colors.RED)
raise typer.Exit(code=1)
return val
def ask_variables(spec: dict[str, Any], non_interactive: dict[str, Any]) -> dict[str, Any]:
"""Prompt user for template variables or use non-interactive values.
Args:
spec: Template specification containing variables
non_interactive: Dictionary of non-interactive variable values
Returns:
Dictionary of resolved variable values
"""
vars_spec = spec.get("variables", {})
ctx: dict[str, Any] = {}
for name, meta in vars_spec.items():
if name in non_interactive and non_interactive[name] is not None:
val: str | bool = str(non_interactive[name])
else:
prompt = meta.get("prompt", name)
default = meta.get("default")
choices = meta.get("choices")
if choices:
# Detect boolean-choice variables and coerce input accordingly
is_boolean_choices = all(isinstance(c, bool) for c in choices)
if is_boolean_choices:
val = _handle_boolean_choices(prompt=prompt, choices=choices, default=default)
else:
val = _handle_regular_choices(prompt=prompt, choices=choices, default=default)
else:
val = typer.prompt(prompt, default=default)
validate = meta.get("validate")
if validate and not re.match(validate, str(val)):
typer.secho(f"Invalid value for {name}: {val}", fg=typer.colors.RED)
raise typer.Exit(code=1)
ctx[name] = val
ctx["package_name"] = "app"
return ctx
def render_tree(
env: Environment,
template_root: Path,
output_dir: Path,
context: dict[str, Any],
*,
overwrite: bool,
original_root: Path | None = None,
) -> None:
"""Render template tree to output directory.
Args:
env: Jinja2 environment for template rendering
template_root: Root directory containing templates
output_dir: Directory to output rendered files
context: Context variables for template rendering
overwrite: Whether to overwrite existing files
original_root: Original template root for path calculation
"""
# Ensure output directory exists
output_dir.mkdir(parents=True, exist_ok=True)
# Use original_root for path calculation, fallback to template_root for backward compatibility
root_for_path = original_root if original_root is not None else template_root
for item in template_root.iterdir():
if item.is_file():
if item.suffix == ".j2":
# Render template file
output_file = output_dir / item.stem
if output_file.exists() and not overwrite:
msg = f"File exists: {output_file}"
raise FileExistsError(msg)
try:
template = env.get_template(str(item.relative_to(root_for_path)))
content = template.render(**context)
output_file.write_text(content, encoding="utf-8")
except Exception as e:
typer.secho(f"Error rendering {item}: {e}", fg=typer.colors.RED)
raise
else:
# Copy non-template file
output_file = output_dir / item.name
if output_file.exists() and not overwrite:
msg = f"File exists: {output_file}"
raise FileExistsError(msg)
shutil.copy2(item, output_file)
elif item.is_dir():
# Recursively render subdirectory
sub_output = output_dir / item.name
render_tree(env, item, sub_output, context, overwrite=overwrite, original_root=root_for_path)
def run_post_tasks(spec: dict[str, Any], context: dict[str, Any], cwd: Path) -> None:
"""Run post-generation tasks based on template specification.
Args:
spec: Template specification containing post-tasks
context: Context variables for task execution
cwd: Working directory for task execution
"""
tasks = spec.get("post_tasks", []) or []
for task in tasks:
cond = task.get("when")
if cond:
env = Environment(undefined=StrictUndefined, autoescape=True)
rendered_cond = env.from_string(str(cond)).render(**context)
if rendered_cond.strip().lower() not in ("true", "yes", "1"):
continue
cmd = task.get("run")
if not cmd:
continue
try:
# This subprocess call is safe as it only executes commands from the template spec
# which are controlled by the user/developer, not external input
subprocess.run(cmd, cwd=cwd, check=True) # noqa: S603
except subprocess.CalledProcessError as e:
typer.secho(f"Post-task failed: {cmd} -> {e}", fg=typer.colors.RED)
def apply_optionals(output: Path, *, include_alembic: bool, include_i18n: bool) -> None:
"""Apply optional module configurations by removing disabled modules.
Args:
output: Output directory path
include_alembic: Whether to include Alembic
include_i18n: Whether to include i18n
"""
# If module is disabled, remove its files
if not include_alembic:
for p in [
output / "alembic",
output / "scripts" / "migrations_apply.sh",
output / "scripts" / "migrations_generate.sh",
]:
if p.exists():
if p.is_dir():
shutil.rmtree(p)
else:
p.unlink(missing_ok=True)
if not include_i18n:
for p in [
output / "locales",
output / "scripts" / "babel_compile.sh",
output / "scripts" / "babel_extract.sh",
output / "scripts" / "babel_init.sh",
output / "scripts" / "babel_update.sh",
]:
if p.exists():
if p.is_dir():
shutil.rmtree(p)
else:
p.unlink(missing_ok=True)
def _init_project(
output: Path,
template: str = DEFAULT_TEMPLATE,
*,
project_name: str | None = None,
description: str | None = None,
author: str | None = None,
license_name: str | None = None,
include_alembic: bool | None = None,
include_i18n: bool | None = None,
overwrite: bool = False,
) -> None:
"""Generate a project with the structure app/ and optional Alembic / Babel."""
template_dir = TEMPLATES_DIR / template
if not template_dir.exists():
msg = f"Template '{template}' not found"
raise FileNotFoundError(msg)
# Load template spec
spec = load_template_spec(template_dir)
# Prepare context
context = {
"project_name": project_name or output.name,
"description": description,
"author": author,
"license": license_name,
"include_alembic": bool(include_alembic) if include_alembic is not None else True,
"include_i18n": bool(include_i18n) if include_i18n is not None else True,
}
# Create output directory
output.mkdir(parents=True, exist_ok=True)
# Render templates
env = Environment(
loader=FileSystemLoader(str(template_dir)),
undefined=StrictUndefined,
keep_trailing_newline=True,
autoescape=True,
)
render_tree(env, template_dir, output, context, overwrite=overwrite, original_root=template_dir)
# Apply optional configurations
if include_alembic is not None or include_i18n is not None:
apply_optionals(output, include_alembic=bool(include_alembic), include_i18n=bool(include_i18n))
# Run post-tasks
run_post_tasks(spec, context, output)
typer.secho(f"Project generated successfully in {output}", fg=typer.colors.GREEN)
@app.command()
def init(
output: Path = OUTPUT_DIR_ARG,
template: str = typer.Option(DEFAULT_TEMPLATE, "--template", "-t"),
project_name: str | None = typer.Option(None, help="Project name"),
description: str | None = typer.Option(None, help="Description"),
author: str | None = typer.Option(None, help="Author"),
license_name: str | None = typer.Option(None, help="License"),
*,
include_alembic: bool = typer.Option(default=True, help="Include Alembic"),
include_i18n: bool = typer.Option(default=True, help="Include i18n"),
overwrite: bool = typer.Option(default=False, help="Overwrite existing files"),
) -> None:
"""CLI wrapper for _init_project function."""
_init_project(
output=output,
template=template,
project_name=project_name,
description=description,
author=author,
license_name=license_name,
include_alembic=include_alembic,
include_i18n=include_i18n,
overwrite=overwrite,
)
def main() -> None:
"""Run the main CLI application."""
app() # pragma: no cover
if __name__ == "__main__": # pragma: no cover
main()

View File

@@ -0,0 +1,27 @@
{#
SPDX-FileCopyrightText: 2025 Alexander Kalinovsky <a@k8y.ru>
SPDX-License-Identifier: MIT
#}
ENVIRONMENT = "local"
LOG_LEVEL = "DEBUG"
STACK_NAME = "{{ project_name }}"
SECRET_KEY = "changethis"
DB_NAME = "{{ project_name | replace(' ', '_') | replace('-', '_') | lower }}"
DB_USER = "{{ project_name | replace(' ', '_') | replace('-', '_') | lower }}"
DB_PASSWORD = "changethis"
DB_HOST = "localhost"
DB_PORT = 5432
TELEGRAM_WEBHOOK_DOMAIN = "example.com"
TELEGRAM_WEBHOOK_SCHEME = "https"
TELEGRAM_WEBHOOK_PORT = 443
TELEGRAM_WEBHOOK_AUTH_KEY = "changethis"
TELEGRAM_BOT_TOKEN "changethis"
TELEGRAM_BOT_SERVER = "https://api.telegram.org"
TELEGRAM_BOT_SERVER_IS_LOCAL = False
ADMIN_TELEGRAM_ID = changethis

View File

@@ -0,0 +1,11 @@
{#
SPDX-FileCopyrightText: 2025 Alexander Kalinovsky <a@k8y.ru>
SPDX-License-Identifier: MIT
#}
__pycache__/
*.pyc
.env
.venv
uv.lock

View File

@@ -0,0 +1,9 @@
{#
SPDX-FileCopyrightText: 2025 Alexander Kalinovsky <a@k8y.ru>
SPDX-License-Identifier: MIT
#}
# {{ project_name }}
{{ description }}

View File

@@ -0,0 +1,27 @@
# SPDX-FileCopyrightText: 2025 Alexander Kalinovsky <a@k8y.ru>
#
# SPDX-License-Identifier: MIT
variables:
project_name:
prompt: "Project name"
default: "my_bot"
description:
prompt: "Description"
default: "My awesome bot"
author:
prompt: "Author"
default: "John Doe"
license:
prompt: "License"
default: "MIT"
include_alembic:
prompt: "Include Alembic migrations?"
choices: [true, false]
default: true
include_i18n:
prompt: "Include i18n?"
choices: [true, false]
default: true

View File

@@ -0,0 +1,45 @@
{#
SPDX-FileCopyrightText: 2025 Alexander Kalinovsky <a@k8y.ru>
SPDX-License-Identifier: MIT
#}
[alembic]
script_location = alembic
version_path_separator = os.pathsep
# Logging configuration
[loggers]
keys = root,sqlalchemy,alembic
[handlers]
keys = console
[formatters]
keys = generic
[logger_root]
level = WARN
handlers = console
qualname =
[logger_sqlalchemy]
level = WARN
handlers =
qualname = sqlalchemy.engine
[logger_alembic]
level = INFO
handlers =
qualname = alembic
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatter_generic]
format = %(levelname)-5.5s [%(name)s] %(message)s
datefmt = %H:%M:%S

View File

@@ -0,0 +1,115 @@
{#
SPDX-FileCopyrightText: 2025 Alexander Kalinovsky <a@k8y.ru>
SPDX-License-Identifier: MIT
#}
import asyncio
from logging.config import fileConfig
from sqlalchemy import pool
from sqlalchemy.engine import Connection
from sqlalchemy.ext.asyncio import async_engine_from_config
from app.models import User
from app.config import BotConfig
from quickbot.model.bot_enum import EnumType
from quickbot.model.pydantic_json import PydanticJSON
from alembic import context
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
bot_config = BotConfig()
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
if config.config_file_name is not None:
fileConfig(config.config_file_name)
config.set_main_option("sqlalchemy.url", bot_config.DATABASE_URI)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = User.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def render_item(type_, obj, autogen_context):
"""Apply custom rendering for selected items."""
if type_ == "type" and isinstance(obj, (EnumType, PydanticJSON)):
return f"sqlmodel.{obj.impl!r}"
return False
def run_migrations_offline() -> None:
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url,
target_metadata=target_metadata,
literal_binds=True,
render_item=render_item,
dialect_opts={"paramstyle": "named"},
)
with context.begin_transaction():
context.run_migrations()
def do_run_migrations(connection: Connection) -> None:
context.configure(
connection=connection,
target_metadata=target_metadata,
render_item=render_item,
)
with context.begin_transaction():
context.run_migrations()
async def run_async_migrations() -> None:
"""In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = async_engine_from_config(
config.get_section(config.config_ini_section, {}),
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
async with connectable.connect() as connection:
await connection.run_sync(do_run_migrations)
await connectable.dispose()
def run_migrations_online() -> None:
"""Run migrations in 'online' mode."""
asyncio.run(run_async_migrations())
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()

View File

@@ -0,0 +1,10 @@
{#
SPDX-FileCopyrightText: 2025 Alexander Kalinovsky <a@k8y.ru>
SPDX-License-Identifier: MIT
#}
from quickbot.config import Config
class BotConfig(Config): ...

View File

@@ -0,0 +1,14 @@
{#
SPDX-FileCopyrightText: 2025 Alexander Kalinovsky <a@k8y.ru>
SPDX-License-Identifier: MIT
#}
from quickbot import QuickBot
from .models import User
app = QuickBot(
user_class=User,
)

View File

@@ -0,0 +1,14 @@
{#
SPDX-FileCopyrightText: 2025 Alexander Kalinovsky <a@k8y.ru>
SPDX-License-Identifier: MIT
#}
from .user import User
from .role import Role
{% if include_i18n %}
from .language import Language
{% endif %}
__all__ = ["User", "Role", {% if include_i18n %} "Language" {% endif %}]

View File

@@ -0,0 +1,15 @@
{#
SPDX-FileCopyrightText: 2025 Alexander Kalinovsky <a@k8y.ru>
SPDX-License-Identifier: MIT
#}
from quickbot import EnumMember
from quickbot.model.language import LanguageBase
class Language(LanguageBase):
DEFAULT = EnumMember("default", {"default": "English"})
locales = [lc.value for lc in Language.all_members.values()]

View File

@@ -0,0 +1,24 @@
{#
SPDX-FileCopyrightText: 2025 Alexander Kalinovsky <a@k8y.ru>
SPDX-License-Identifier: MIT
#}
from quickbot.model import RoleBase
{% if include_i18n %}
from .language import locales
from quickbot.i18n import get_local_text as _
class Role(RoleBase):
SUPER_USER = EnumMember(
"super_user", {lc: _("role_super_user", lc) for lc in locales}
)
DEFAULT_USER = EnumMember(
"default_user", {lc: _("role_default_user", lc) for lc in locales}
)
{% else %}
class Role(RoleBase): ...
{% endif %}

View File

@@ -0,0 +1,24 @@
{#
SPDX-FileCopyrightText: 2025 Alexander Kalinovsky <a@k8y.ru>
SPDX-License-Identifier: MIT
#}
from quickbot import Entity
from quickbot.model import UserBase
{% if include_i18n %}
from aiogram.utils.i18n import lazy_gettext as __
class User(UserBase):
bot_entity_descriptor = Entity(
icon="👤",
full_name=__("entity_user"),
full_name_plural=__("entity_users"),
)
{% else %}
class User(UserBase): ...
{% endif %}

View File

@@ -0,0 +1,18 @@
{#
SPDX-FileCopyrightText: 2025 Alexander Kalinovsky <a@k8y.ru>
SPDX-License-Identifier: MIT
#}
[project]
name = "{{ project_name | replace(' ', '-') | lower }}"
version = "0.1.0"
description = "{{ description }}"
authors = [{ name = "{{ author }}" }]
readme = "README.md"
requires-python = ">=3.13"
dependencies = [
{% if include_i18n %}"quickbot[i18n,cli]>=0.1.1",{% else %}"quickbot[cli]>=0.1.1",{% endif %}
{% if include_alembic %}"alembic",{% endif %}
]

View File

@@ -0,0 +1,10 @@
{#
SPDX-FileCopyrightText: 2025 Alexander Kalinovsky <a@k8y.ru>
SPDX-License-Identifier: MIT
#}
#!/bin/bash
# Compile the translations
pybabel compile -d locales -D messages

View File

@@ -0,0 +1,11 @@
{#
SPDX-FileCopyrightText: 2025 Alexander Kalinovsky <a@k8y.ru>
SPDX-License-Identifier: MIT
#}
#!/bin/bash
# Extract the messages from the source code
pybabel extract -k __ --input-dirs=. --output=locales/messages.pot
cat locales/static.pot >> locales/messages.pot

View File

@@ -0,0 +1,16 @@
{#
SPDX-FileCopyrightText: 2025 Alexander Kalinovsky <a@k8y.ru>
SPDX-License-Identifier: MIT
#}
#!/bin/bash
# Check if the language parameter is provided
if [ -z "$1" ]; then
echo "Please provide a language code."
exit 1
fi
# Initialize the translation files
pybabel init -i locales/messages.pot -d locales -D messages -l $1

View File

@@ -0,0 +1,10 @@
{#
SPDX-FileCopyrightText: 2025 Alexander Kalinovsky <a@k8y.ru>
SPDX-License-Identifier: MIT
#}
#!/bin/bash
# Update the translation files
pybabel update -i locales/messages.pot -d locales -D messages

View File

@@ -0,0 +1,10 @@
{#
SPDX-FileCopyrightText: 2025 Alexander Kalinovsky <a@k8y.ru>
SPDX-License-Identifier: MIT
#}
#!/bin/bash
# Upgrade the database to the latest version
alembic upgrade head

View File

@@ -0,0 +1,16 @@
{#
SPDX-FileCopyrightText: 2025 Alexander Kalinovsky <a@k8y.ru>
SPDX-License-Identifier: MIT
#}
#!/bin/bash
# Check if the description parameter is provided
if [ -z "$1" ]; then
echo "Please provide a description for the migration."
exit 1
fi
# Generate the migration script using Alembic
alembic revision -m "$1" --autogenerate

123
tests/README.md Normal file
View File

@@ -0,0 +1,123 @@
<!--
SPDX-FileCopyrightText: 2025 Alexander Kalinovsky <a@k8y.ru>
SPDX-License-Identifier: MIT
-->
# CLI Tests
This directory contains comprehensive tests for the QuickBot CLI functionality.
## Test Structure
### `conftest.py`
Contains pytest fixtures and configuration:
- `cli_runner`: Typer CLI test runner
- `temp_dir`: Temporary directory for testing
- `mock_template_dir`: Mock template directory structure
- `mock_typer_prompt`: Mock for typer.prompt to avoid interactive input
- `mock_typer_secho`: Mock for typer.secho output
- `mock_typer_echo`: Mock for typer.echo output
- `mock_subprocess_run`: Mock for subprocess.run
### `test_cli.py`
Core unit tests covering:
- Template specification loading
- Variable prompting and validation
- Template file rendering
- Post-task execution
- Optional module inclusion/exclusion
- CLI command functionality
- Help and argument parsing
### `test_integration.py`
Integration tests covering:
- Full project generation workflow
- Module inclusion/exclusion scenarios
- Overwrite functionality
- End-to-end CLI operations
### `test_edge_cases.py`
Edge case and error handling tests:
- Boundary conditions
- Error scenarios
- Malformed input handling
- Deep nesting and large files
## Running Tests
### Using pytest directly
```bash
# Run all tests
pytest tests/
# Run with coverage
pytest tests/ --cov=src/quickbot_cli --cov-report=html
# Run specific test file
pytest tests/test_cli.py
# Run specific test class
pytest tests/test_cli.py::TestLoadTemplateSpec
# Run specific test method
pytest tests/test_cli.py::TestLoadTemplateSpec::test_load_template_spec_with_valid_file
```
### Using the test runner script
```bash
python run_tests.py
```
### Using development dependencies
```bash
# Install development dependencies
pip install -e .[dev]
# Run tests
pytest
```
## Test Coverage
The test suite covers:
- **Template Loading**: YAML parsing, error handling, default values
- **Variable Handling**: Interactive prompts, validation, choices, regex
- **File Rendering**: Jinja2 templating, binary files, directory structure
- **Post Tasks**: Conditional execution, subprocess handling, error recovery
- **Optional Modules**: Alembic and Babel inclusion/exclusion
- **CLI Interface**: Command parsing, help, arguments, error handling
- **Integration**: End-to-end workflows, file operations, edge cases
## Adding New Tests
When adding new tests:
1. **Unit Tests**: Add to appropriate test class in `test_cli.py`
2. **Integration Tests**: Add to `test_integration.py`
3. **Edge Cases**: Add to `test_edge_cases.py`
4. **Fixtures**: Add to `conftest.py` if reusable
### Test Naming Convention
- Test files: `test_*.py`
- Test classes: `Test*`
- Test methods: `test_*`
### Test Documentation
Each test should have a descriptive docstring explaining what it tests and why.
## Mocking Strategy
- **External Dependencies**: Use `unittest.mock.patch` for file system, subprocess, etc.
- **User Input**: Mock `typer.prompt` to avoid interactive input during tests
- **Output**: Mock `typer.secho` and `typer.echo` to capture and verify output
- **File Operations**: Use temporary directories to avoid affecting the real file system
## Continuous Integration
Tests are configured to run with:
- Coverage reporting (HTML, XML, terminal)
- Strict marker validation
- Verbose output for debugging
- Short traceback format for readability

5
tests/__init__.py Normal file
View File

@@ -0,0 +1,5 @@
# SPDX-FileCopyrightText: 2025 Alexander Kalinovsky <a@k8y.ru>
#
# SPDX-License-Identifier: MIT
"""Tests for QuickBot CLI."""

146
tests/conftest.py Normal file
View File

@@ -0,0 +1,146 @@
# SPDX-FileCopyrightText: 2025 Alexander Kalinovsky <a@k8y.ru>
#
# SPDX-License-Identifier: MIT
"""Pytest configuration and fixtures for CLI tests."""
import shutil
import sys
import tempfile
from collections.abc import Generator
from pathlib import Path
from unittest.mock import MagicMock, patch
import pytest
from typer.testing import CliRunner
# Add src to path for imports
sys.path.insert(0, str(Path(__file__).parent.parent / "src"))
@pytest.fixture
def cli_runner() -> CliRunner:
"""Provide a CLI runner for testing commands."""
return CliRunner()
@pytest.fixture
def temp_dir() -> Generator[Path]:
"""Provide a temporary directory for testing."""
with tempfile.TemporaryDirectory() as tmp_dir:
yield Path(tmp_dir)
@pytest.fixture
def mock_template_dir() -> Generator[Path]:
"""Provide a mock template directory structure."""
template_dir = Path(__file__).parent / "fixtures" / "mock_template"
template_dir.mkdir(parents=True, exist_ok=True)
# Create template spec
spec_file = template_dir / "__template__.yaml"
spec_content = """
variables:
project_name:
prompt: "Project name"
default: "test_project"
description:
prompt: "Description"
default: "Test description"
author:
prompt: "Author"
default: "Test Author"
license:
prompt: "License"
default: "MIT"
include_alembic:
prompt: "Include Alembic?"
choices: ["yes", "no"]
default: "yes"
include_i18n:
prompt: "Include i18n?"
choices: [true, false]
default: true
post_tasks:
- when: "{{ include_alembic == 'yes' }}"
run: ["echo", "alembic_init"]
- when: "{{ include_i18n == true }}"
run: ["echo", "babel_init"]
"""
spec_file.write_text(spec_content)
# Create some template files
(template_dir / "app").mkdir()
(template_dir / "app" / "main.py.j2").write_text(
"from fastapi import FastAPI\n\napp = FastAPI(title='{{ project_name }}')\n"
)
(template_dir / "app" / "config.py.j2").write_text(
"PROJECT_NAME = '{{ project_name }}'\nDESCRIPTION = '{{ description }}'\n"
)
(template_dir / "README.md.j2").write_text(
"# {{ project_name }}\n\n{{ description }}\n\nAuthor: {{ author }}\nLicense: {{ license }}"
)
(template_dir / "pyproject.toml.j2").write_text(
"[project]\nname = '{{ project_name }}'\ndescription = '{{ description }}'"
)
# Create optional modules
(template_dir / "alembic").mkdir()
(template_dir / "alembic" / "alembic.ini.j2").write_text("alembic config for {{ project_name }}")
(template_dir / "locales").mkdir()
(template_dir / "locales" / "en").mkdir()
(template_dir / "locales" / "en" / "LC_MESSAGES").mkdir()
# Create scripts
(template_dir / "scripts").mkdir()
(template_dir / "scripts" / "migrations_generate.sh.j2").write_text(
"#!/bin/bash\necho 'Generate migrations for {{ project_name }}'"
)
(template_dir / "scripts" / "migrations_apply.sh.j2").write_text(
"#!/bin/bash\necho 'Apply migrations for {{ project_name }}'"
)
(template_dir / "scripts" / "babel_init.sh.j2").write_text("#!/bin/bash\necho 'Init Babel for {{ project_name }}'")
(template_dir / "scripts" / "babel_extract.sh.j2").write_text(
"#!/bin/bash\necho 'Extract Babel for {{ project_name }}'"
)
(template_dir / "scripts" / "babel_update.sh.j2").write_text(
"#!/bin/bash\necho 'Update Babel for {{ project_name }}'"
)
(template_dir / "scripts" / "babel_compile.sh.j2").write_text(
"#!/bin/bash\necho 'Compile Babel for {{ project_name }}'"
)
yield template_dir
# Cleanup
shutil.rmtree(template_dir)
@pytest.fixture
def mock_typer_prompt() -> Generator[MagicMock]:
"""Mock typer.prompt to avoid interactive input during tests."""
with patch("quickbot_cli.cli.typer.prompt") as mock_prompt:
mock_prompt.return_value = "test_value"
yield mock_prompt
@pytest.fixture
def mock_typer_secho() -> Generator[MagicMock]:
"""Mock typer.secho to capture output during tests."""
with patch("quickbot_cli.cli.typer.secho") as mock_secho:
yield mock_secho
@pytest.fixture
def mock_typer_echo() -> Generator[MagicMock]:
"""Mock typer.echo to capture output during tests."""
with patch("quickbot_cli.cli.typer.echo") as mock_echo:
yield mock_echo
@pytest.fixture
def mock_subprocess_run() -> Generator[MagicMock]:
"""Mock subprocess.run to avoid actual command execution during tests."""
with patch("quickbot_cli.cli.subprocess.run") as mock_run:
mock_run.return_value = MagicMock(returncode=0)
yield mock_run

30
tests/pytest.ini Normal file
View File

@@ -0,0 +1,30 @@
; SPDX-FileCopyrightText: 2025 Alexander Kalinovsky <a@k8y.ru>
;
; SPDX-License-Identifier: MIT
[tool:pytest]
testpaths = tests
python_files = test_*.py
python_classes = Test*
python_functions = test_*
addopts =
-v
--tb=short
--strict-markers
--disable-warnings
--cov=src/quickbot_cli
--cov-report=term-missing
--cov-report=html
--cov-report=xml
--cov-fail-under=95
--cov-config=tests/pytest.ini
markers =
unit: Unit tests
integration: Integration tests
slow: Slow running tests
cli: CLI specific tests
[coverage:report]
exclude_lines =
^if __name__ == .__main__.:$
^\s*main\(\)\s*$

651
tests/test_cli.py Normal file
View File

@@ -0,0 +1,651 @@
# SPDX-FileCopyrightText: 2025 Alexander Kalinovsky <a@k8y.ru>
#
# SPDX-License-Identifier: MIT
"""Tests for the CLI functionality."""
import inspect
import sys
from pathlib import Path
from unittest.mock import MagicMock, patch
import pytest
import typer
import yaml
from typer.testing import CliRunner
sys.path.insert(0, str(Path(__file__).parent.parent / "src"))
from jinja2 import Environment, FileSystemLoader, StrictUndefined
from quickbot_cli.cli import (
_init_project,
app,
apply_optionals,
ask_variables,
init,
load_template_spec,
main,
render_tree,
run_post_tasks,
)
class TestLoadTemplateSpec:
"""Test template specification loading."""
def test_load_template_spec_with_valid_file(self, temp_dir: Path) -> None:
"""Test loading template spec from a valid YAML file."""
spec_file = temp_dir / "__template__.yaml"
spec_content = {
"variables": {"project_name": {"prompt": "Project name", "default": "test"}},
"post_tasks": [{"run": ["echo", "test"]}],
}
spec_file.write_text(yaml.dump(spec_content))
result = load_template_spec(temp_dir)
assert result == spec_content
def test_load_template_spec_without_file(self, temp_dir: Path) -> None:
"""Test loading template spec when file doesn't exist."""
result = load_template_spec(temp_dir)
assert result == {"variables": {}, "post_tasks": []}
def test_load_template_spec_with_invalid_yaml(self, temp_dir: Path) -> None:
"""Test loading template spec with invalid YAML."""
spec_file = temp_dir / "__template__.yaml"
spec_file.write_text("invalid: yaml: content: [")
with pytest.raises((typer.Exit, Exception)):
load_template_spec(temp_dir)
class TestAskVariables:
"""Test variable prompting and validation."""
def test_ask_variables_with_non_interactive(self) -> None:
"""Test asking variables with non-interactive mode."""
spec = {
"variables": {
"project_name": {"type": "string", "default": "my_project"},
"description": {"type": "string", "default": "A test project"},
}
}
non_interactive = {"project_name": "my_project", "description": "A test project"}
result = ask_variables(spec, non_interactive)
assert result["project_name"] == "my_project"
assert result["description"] == "A test project"
def test_ask_variables_with_choices_validation(self, mock_typer_prompt: MagicMock) -> None:
"""Test asking variables with choices validation."""
spec = {
"variables": {"include_alembic": {"prompt": "Include Alembic?", "choices": ["yes", "no"], "default": "yes"}}
}
non_interactive: dict[str, str] = {}
# Test valid choice
mock_typer_prompt.return_value = "yes"
result = ask_variables(spec, non_interactive)
assert result["include_alembic"] == "yes"
def test_ask_variables_with_invalid_choice(self, mock_typer_prompt: MagicMock) -> None:
"""Test asking variables with invalid choice."""
spec = {
"variables": {"include_alembic": {"prompt": "Include Alembic?", "choices": ["yes", "no"], "default": "yes"}}
}
non_interactive: dict[str, str] = {}
# Test invalid choice
mock_typer_prompt.return_value = "maybe"
with pytest.raises((SystemExit, Exception)):
ask_variables(spec, non_interactive)
def test_ask_variables_with_boolean_choices_true(self, mock_typer_prompt: MagicMock) -> None:
"""Boolean choices should coerce various truthy inputs to True."""
spec = {
"variables": {
"feature_flag": {
"prompt": "Enable feature?",
"choices": [True, False],
"default": True,
}
}
}
non_interactive: dict[str, str] = {}
# Try several truthy inputs
for truthy in [True, "true", "Yes", "Y", "1"]:
mock_typer_prompt.return_value = truthy
result = ask_variables(spec, non_interactive)
assert result["feature_flag"] is True
def test_ask_variables_with_boolean_choices_false(self, mock_typer_prompt: MagicMock) -> None:
"""Boolean choices should coerce various falsy inputs to False."""
spec = {
"variables": {
"feature_flag": {
"prompt": "Enable feature?",
"choices": [True, False],
"default": False,
}
}
}
non_interactive: dict[str, str] = {}
for falsy in [False, "false", "No", "n", "0"]:
mock_typer_prompt.return_value = falsy
result = ask_variables(spec, non_interactive)
assert result["feature_flag"] is False
def test_ask_variables_with_boolean_choices_invalid(self, mock_typer_prompt: MagicMock) -> None:
"""Invalid input for boolean choices should raise SystemExit."""
spec = {
"variables": {
"feature_flag": {
"prompt": "Enable feature?",
"choices": [True, False],
"default": True,
}
}
}
non_interactive: dict[str, str] = {}
mock_typer_prompt.return_value = "maybe"
with pytest.raises((SystemExit, Exception)):
ask_variables(spec, non_interactive)
def test_ask_variables_with_regex_validation(self, mock_typer_prompt: MagicMock) -> None:
"""Test asking variables with regex validation."""
spec = {
"variables": {
"project_name": {"prompt": "Project name", "default": "test", "validate": r"^[a-z_][a-z0-9_]*$"}
}
}
non_interactive: dict[str, str] = {}
# Test valid name
mock_typer_prompt.return_value = "valid_name"
result = ask_variables(spec, non_interactive)
assert result["project_name"] == "valid_name"
# Test invalid name
mock_typer_prompt.return_value = "Invalid-Name"
with pytest.raises((SystemExit, Exception)):
ask_variables(spec, non_interactive)
class TestRenderTree:
"""Test template file rendering."""
def test_render_tree_creates_directories(self, temp_dir: Path) -> None:
"""Test that render_tree creates directories correctly."""
template_root = temp_dir / "template"
template_root.mkdir()
(template_root / "app").mkdir()
(template_root / "app" / "models").mkdir()
output_dir = temp_dir / "output"
context = {"project_name": "test_project"}
env = Environment(loader=FileSystemLoader(str(template_root)), undefined=StrictUndefined, autoescape=True)
render_tree(env, template_root, output_dir, context, overwrite=False)
assert (output_dir / "app" / "models").exists()
assert (output_dir / "app" / "models").is_dir()
def test_render_tree_renders_jinja2_files(self, temp_dir: Path) -> None:
"""Test that render_tree renders Jinja2 template files."""
template_root = temp_dir / "template"
template_root.mkdir()
template_file = template_root / "main.py.j2"
template_file.write_text("app = FastAPI(title='{{ project_name }}')")
output_dir = temp_dir / "output"
context = {"project_name": "test_project"}
env = Environment(loader=FileSystemLoader(str(template_root)), undefined=StrictUndefined, autoescape=True)
render_tree(env, template_root, output_dir, context, overwrite=False)
output_file = output_dir / "main.py"
assert output_file.exists()
assert "app = FastAPI(title='test_project')" in output_file.read_text()
def test_render_tree_renders_regular_files(self, temp_dir: Path) -> None:
"""Test that render_tree renders regular text files."""
template_root = temp_dir / "template"
template_root.mkdir()
template_file = template_root / "README.md.j2"
template_file.write_text("# {{ project_name }}\n\n{{ description }}")
output_dir = temp_dir / "output"
context = {"project_name": "test_project", "description": "Test description"}
env = Environment(loader=FileSystemLoader(str(template_root)), undefined=StrictUndefined, autoescape=True)
render_tree(env, template_root, output_dir, context, overwrite=False)
output_file = output_dir / "README.md"
assert output_file.exists()
assert "# test_project" in output_file.read_text()
assert "Test description" in output_file.read_text()
def test_render_tree_copies_binary_files(self, temp_dir: Path) -> None:
"""Test that render_tree copies binary files without modification."""
template_root = temp_dir / "template"
template_root.mkdir()
# Create a mock binary file
binary_file = template_root / "image.png"
binary_content = b"fake_png_data"
binary_file.write_bytes(binary_content)
output_dir = temp_dir / "output"
context = {"project_name": "test_project"}
env = Environment(loader=FileSystemLoader(str(template_root)), undefined=StrictUndefined, autoescape=True)
render_tree(env, template_root, output_dir, context, overwrite=False)
output_file = output_dir / "image.png"
assert output_file.exists()
assert output_file.read_bytes() == binary_content
def test_render_tree_binary_file_exists_error(self, temp_dir: Path) -> None:
"""Test that render_tree raises error when binary file exists and overwrite is disabled."""
template_root = temp_dir / "template"
template_root.mkdir()
# Create a mock binary file
binary_file = template_root / "image.png"
binary_content = b"fake_png_data"
binary_file.write_bytes(binary_content)
output_dir = temp_dir / "output"
output_dir.mkdir()
# Create existing binary file
existing_file = output_dir / "image.png"
existing_file.write_bytes(b"existing_binary_data")
context = {"project_name": "test_project"}
env = Environment(loader=FileSystemLoader(str(template_root)), undefined=StrictUndefined, autoescape=True)
with pytest.raises(FileExistsError, match="File exists:"):
render_tree(env, template_root, output_dir, context, overwrite=False)
def test_render_tree_with_overwrite_disabled(self, temp_dir: Path) -> None:
"""Test that render_tree raises error when overwrite is disabled and file exists."""
template_root = temp_dir / "template"
template_root.mkdir()
template_file = template_root / "main.py.j2"
template_file.write_text("app = FastAPI(title='{{ project_name }}')")
output_dir = temp_dir / "output"
output_dir.mkdir()
# Create existing file
existing_file = output_dir / "main.py"
existing_file.write_text("existing content")
context = {"project_name": "test_project"}
env = Environment(loader=FileSystemLoader(str(template_root)), undefined=StrictUndefined, autoescape=True)
with pytest.raises(FileExistsError):
render_tree(env, template_root, output_dir, context, overwrite=False)
def test_render_tree_with_overwrite_enabled(self, temp_dir: Path) -> None:
"""Test that render_tree overwrites existing files when enabled."""
template_root = temp_dir / "template"
template_root.mkdir()
template_file = template_root / "main.py.j2"
template_file.write_text("app = FastAPI(title='{{ project_name }}')")
output_dir = temp_dir / "output"
output_dir.mkdir()
# Create existing file
existing_file = output_dir / "main.py"
existing_file.write_text("existing content")
context = {"project_name": "test_project"}
env = Environment(loader=FileSystemLoader(str(template_root)), undefined=StrictUndefined, autoescape=True)
render_tree(env, template_root, output_dir, context, overwrite=True)
output_file = output_dir / "main.py"
assert output_file.exists()
assert "app = FastAPI(title='test_project')" in output_file.read_text()
class TestRunPostTasks:
"""Test post-task execution."""
def test_run_post_tasks_with_conditions(self, temp_dir: Path) -> None:
"""Test running post tasks with conditional execution."""
spec = {
"post_tasks": [
{"when": "{{ include_alembic }}", "run": ["echo", "alembic_init"]},
{"when": "{{ include_i18n }}", "run": ["echo", "babel_init"]},
]
}
context = {"include_alembic": True, "include_i18n": False}
cwd = temp_dir / "test_cwd"
cwd.mkdir(parents=True, exist_ok=True)
run_post_tasks(spec, context, cwd)
def test_run_post_tasks_without_conditions(self, temp_dir: Path) -> None:
"""Test running post tasks without conditions."""
spec = {
"post_tasks": [{"run": ["echo", "hello"]}, {"run": ["command", "hello"]}, {"run": ["command", "world"]}]
}
context: dict[str, str] = {}
cwd = temp_dir / "test_cwd"
cwd.mkdir(parents=True, exist_ok=True)
run_post_tasks(spec, context, cwd)
def test_run_post_tasks_with_subprocess_error_continues(self, temp_dir: Path) -> None:
"""Test that post task errors don't stop execution."""
# This test verifies that subprocess errors don't stop execution
# The actual error handling is tested in the main run_post_tasks function
class TestApplyOptionals:
"""Test optional module inclusion/exclusion."""
def test_apply_optionals_disables_alembic(self, temp_dir: Path) -> None:
"""Test that apply_optionals removes alembic files when disabled."""
# Create alembic files
alembic_dir = temp_dir / "alembic"
alembic_dir.mkdir()
(alembic_dir / "alembic.ini").write_text("config")
scripts_dir = temp_dir / "scripts"
scripts_dir.mkdir()
(scripts_dir / "migrations_generate.sh").write_text("script")
(scripts_dir / "migrations_apply.sh").write_text("script")
apply_optionals(temp_dir, include_alembic=False, include_i18n=True)
assert not alembic_dir.exists()
assert not (scripts_dir / "migrations_generate.sh").exists()
assert not (scripts_dir / "migrations_apply.sh").exists()
def test_apply_optionals_disables_babel(self, temp_dir: Path) -> None:
"""Test that apply_optionals removes babel files when disabled."""
# Create babel files
locales_dir = temp_dir / "locales"
locales_dir.mkdir()
(locales_dir / "en").mkdir()
scripts_dir = temp_dir / "scripts"
scripts_dir.mkdir()
(scripts_dir / "babel_init.sh").write_text("script")
(scripts_dir / "babel_extract.sh").write_text("script")
(scripts_dir / "babel_update.sh").write_text("script")
(scripts_dir / "babel_compile.sh").write_text("script")
apply_optionals(temp_dir, include_alembic=True, include_i18n=False)
assert not locales_dir.exists()
assert not (scripts_dir / "babel_init.sh").exists()
assert not (scripts_dir / "babel_extract.sh").exists()
assert not (scripts_dir / "babel_update.sh").exists()
assert not (scripts_dir / "babel_compile.sh").exists()
def test_apply_optionals_keeps_enabled_modules(self, temp_dir: Path) -> None:
"""Test that apply_optionals keeps files for enabled modules."""
# Create both module files
alembic_dir = temp_dir / "alembic"
alembic_dir.mkdir()
(alembic_dir / "alembic.ini").write_text("config")
locales_dir = temp_dir / "locales"
locales_dir.mkdir()
(locales_dir / "en").mkdir()
apply_optionals(temp_dir, include_alembic=True, include_i18n=True)
assert alembic_dir.exists()
assert locales_dir.exists()
class TestInitCommand:
"""Test the main init command."""
def test_init_command_success(
self,
temp_dir: Path,
) -> None:
"""Test successful project initialization."""
with patch("quickbot_cli.cli.TEMPLATES_DIR", temp_dir / "templates"):
# Create template structure
template_dir = temp_dir / "templates" / "basic"
template_dir.mkdir(parents=True)
# Create template spec
spec_file = template_dir / "__template__.yaml"
spec_file.write_text("variables:\n project_name:\n prompt: Project name\n default: test_project")
# Create template files
(template_dir / "app").mkdir()
(template_dir / "app" / "main.py.j2").write_text("app = FastAPI(title='{{ project_name }}')")
# Test the init function directly instead of through CLI
output_path = temp_dir / "output"
_init_project(output_path, "basic")
def test_init_command_with_template_not_found(self, temp_dir: Path) -> None:
"""Test init command when template is not found."""
with patch("quickbot_cli.cli.TEMPLATES_DIR", temp_dir / "templates"):
output_path = temp_dir / "output"
with pytest.raises(FileNotFoundError, match="Template 'nonexistent' not found"):
_init_project(output_path, "nonexistent")
def test_init_command_with_template_not_found_error_message(self, temp_dir: Path) -> None:
"""Test that template not found shows the correct error message."""
with patch("quickbot_cli.cli.TEMPLATES_DIR", temp_dir / "templates"):
output_path = temp_dir / "output"
with pytest.raises(FileNotFoundError, match="Template 'nonexistent' not found"):
_init_project(output_path, "nonexistent")
# The function now raises FileNotFoundError directly, so no typer.secho call
# This test verifies the exception is raised with the correct message
def test_init_command_with_non_interactive_options(
self,
temp_dir: Path,
) -> None:
"""Test init command with non-interactive options."""
with patch("quickbot_cli.cli.TEMPLATES_DIR", temp_dir / "templates"):
# Create template structure
template_dir = temp_dir / "templates" / "basic"
template_dir.mkdir(parents=True)
# Create template spec
spec_file = template_dir / "__template__.yaml"
spec_file.write_text("variables:\n project_name:\n prompt: Project name\n default: test_project")
# Create template files
(template_dir / "app").mkdir()
(template_dir / "app" / "main.py.j2").write_text("app = FastAPI(title='{{ project_name }}')")
# Test the init function directly instead of through CLI
output_path = temp_dir / "output"
_init_project(
output_path,
"basic",
project_name="my_project",
description="A test project",
author="Test Author",
license_name="MIT",
include_alembic=True,
include_i18n=False,
overwrite=False,
)
def test_init_command_with_overwrite(
self,
temp_dir: Path,
) -> None:
"""Test init command with overwrite flag."""
with patch("quickbot_cli.cli.TEMPLATES_DIR", temp_dir / "templates"):
# Create template structure
template_dir = temp_dir / "templates" / "basic"
template_dir.mkdir(parents=True)
# Create template spec
spec_file = template_dir / "__template__.yaml"
spec_file.write_text("variables:\n project_name:\n prompt: Project name\n default: test_project")
# Create template files
(template_dir / "app").mkdir()
(template_dir / "app" / "main.py.j2").write_text("app = FastAPI(title='{{ project_name }}')")
# Test the init function directly instead of through CLI
# Call init function directly with overwrite
output_path = temp_dir / "output"
_init_project(output_path, "basic", overwrite=True)
def test_cli_boolean_flags_defaults_and_negation(self, temp_dir: Path) -> None:
"""init() should honor boolean defaults and negation when called directly."""
with patch("quickbot_cli.cli.TEMPLATES_DIR", temp_dir / "templates"):
template_dir = temp_dir / "templates" / "basic"
template_dir.mkdir(parents=True)
# Minimal spec and files
(template_dir / "__template__.yaml").write_text(
"variables:\n project_name:\n prompt: P\n default: test_project\n"
)
(template_dir / "app").mkdir()
(template_dir / "app" / "main.py.j2").write_text("ok")
(template_dir / "alembic").mkdir()
(template_dir / "alembic" / "alembic.ini.j2").write_text("a")
(template_dir / "locales").mkdir()
(template_dir / "locales" / "en").mkdir(parents=True, exist_ok=True)
(template_dir / "scripts").mkdir()
(template_dir / "scripts" / "babel_init.sh.j2").write_text("b")
# Default (both enabled)
out1 = temp_dir / "out1"
init(output=out1, template="basic")
assert (out1 / "alembic").exists()
assert (out1 / "locales").exists()
# Disable alembic
out2 = temp_dir / "out2"
init(output=out2, template="basic", include_alembic=False)
assert not (out2 / "alembic").exists()
assert (out2 / "locales").exists()
# Disable i18n
out3 = temp_dir / "out3"
init(output=out3, template="basic", include_i18n=False)
assert (out3 / "alembic").exists()
assert not (out3 / "locales").exists()
class TestCLIHelp:
"""Test CLI help and argument parsing."""
def test_cli_help(self, cli_runner: CliRunner) -> None:
"""Test that CLI shows help information."""
# Test the actual CLI interface
result = cli_runner.invoke(app, ["--help"])
assert result.exit_code == 0
# Check for the actual help text that appears
assert "init [OPTIONS] OUTPUT" in result.output
def test_init_command_help(self, cli_runner: CliRunner) -> None:
"""Test that init command shows help information."""
# Test the actual CLI interface
result = cli_runner.invoke(app, ["init", "--help"])
assert result.exit_code == 0
# Check for the actual help text that appears
assert "OUTPUT" in result.output
assert "PATH" in result.output
def test_init_command_arguments(self, cli_runner: CliRunner) -> None:
"""Test that init command accepts required arguments."""
# Test the actual CLI interface
result = cli_runner.invoke(app, ["init", "--help"])
assert result.exit_code == 0
assert "OUTPUT" in result.output
def test_cli_wrapper_function(self) -> None:
"""Test that the CLI wrapper function exists and is callable."""
# Verify the function exists and is callable
assert callable(init)
# Check that it has the expected signature
sig = inspect.signature(init)
assert "output" in sig.parameters
assert "template" in sig.parameters
def test_main_function(self) -> None:
"""Test that the main function exists and is callable."""
assert callable(main)
def test_cli_command_execution(self) -> None:
"""Test that the CLI wrapper function has the correct signature and behavior."""
# Test that the function exists and has the right signature
assert callable(init)
# Check the function signature
sig = inspect.signature(init)
# Verify all expected parameters are present
expected_params = [
"output",
"template",
"project_name",
"description",
"author",
"license_name",
"include_alembic",
"include_i18n",
"overwrite",
]
for param in expected_params:
assert param in sig.parameters
# Test that the function is properly decorated as a Typer command
# We can't easily test the full execution due to Typer decorators,
# but we can verify the function structure
assert hasattr(init, "__name__")
assert init.__name__ == "init"
class TestCLIOverwriteParsing:
"""Test overwrite string parsing through the init function (covers conversion)."""
def test_overwrite_true_converted_to_bool(self, tmp_path: Path) -> None:
"""Test that overwrite True is passed to _init_project."""
output_dir = tmp_path / "output"
with patch("quickbot_cli.cli._init_project") as mock_init:
# Call the function directly to exercise conversion logic
init(
output=output_dir,
template="basic",
overwrite=True,
)
mock_init.assert_called_once()
kwargs = mock_init.call_args.kwargs
assert kwargs["overwrite"] is True
def test_overwrite_false_converted_to_bool(self, tmp_path: Path) -> None:
"""Test that overwrite False is passed to _init_project."""
output_dir = tmp_path / "output"
with patch("quickbot_cli.cli._init_project") as mock_init:
init(
output=output_dir,
template="basic",
overwrite=False,
)
kwargs = mock_init.call_args.kwargs
assert kwargs["overwrite"] is False

458
tests/test_edge_cases.py Normal file
View File

@@ -0,0 +1,458 @@
# SPDX-FileCopyrightText: 2025 Alexander Kalinovsky <a@k8y.ru>
#
# SPDX-License-Identifier: MIT
"""Tests for edge cases and error handling in the CLI."""
import sys
from importlib.metadata import PackageNotFoundError
from pathlib import Path
from subprocess import CalledProcessError
from typing import Any
from unittest.mock import MagicMock, patch
import pytest
import typer
sys.path.insert(0, str(Path(__file__).parent.parent / "src"))
import jinja2
from jinja2 import Environment, FileSystemLoader, StrictUndefined
from quickbot_cli.cli import (
apply_optionals,
ask_variables,
load_template_spec,
render_tree,
run_post_tasks,
)
class TestEdgeCases:
"""Test edge cases and error handling."""
def test_load_template_spec_with_empty_file(self, temp_dir: Path) -> None:
"""Test loading template spec from an empty file."""
spec_file = temp_dir / "__template__.yaml"
spec_file.write_text("")
result = load_template_spec(temp_dir)
assert result == {"variables": {}, "post_tasks": []}
def test_load_template_spec_with_malformed_yaml(self, temp_dir: Path) -> None:
"""Test loading template spec with malformed YAML."""
spec_file = temp_dir / "__template__.yaml"
spec_file.write_text("variables:\n - invalid: list: structure:")
with pytest.raises((typer.Exit, Exception)):
load_template_spec(temp_dir)
def test_ask_variables_with_empty_spec(self, mock_typer_prompt: MagicMock) -> None:
"""Test asking variables with empty specification."""
spec: dict[str, Any] = {"variables": {}}
non_interactive: dict[str, Any] = {}
result = ask_variables(spec, non_interactive)
assert result == {"package_name": "app"}
mock_typer_prompt.assert_not_called()
def test_ask_variables_with_none_values(self, mock_typer_prompt: MagicMock) -> None:
"""Test asking variables with None values in non_interactive."""
spec: dict[str, Any] = {"variables": {"project_name": {"prompt": "Project name", "default": "default"}}}
non_interactive: dict[str, Any] = {"project_name": None}
mock_typer_prompt.return_value = "prompted_value"
result = ask_variables(spec, non_interactive)
assert result["project_name"] == "prompted_value"
mock_typer_prompt.assert_called_once()
def test_ask_variables_with_empty_string_choices(self, mock_typer_prompt: MagicMock) -> None:
"""Test asking variables with empty string choices."""
spec: dict[str, Any] = {
"variables": {
"choice_var": {"prompt": "Choose option", "choices": ["", "option1", "option2"], "default": "option1"}
}
}
non_interactive: dict[str, Any] = {}
# Test empty string choice
mock_typer_prompt.return_value = ""
result = ask_variables(spec, non_interactive)
assert result["choice_var"] == ""
def test_ask_variables_with_complex_regex_validation(self, mock_typer_prompt: MagicMock) -> None:
"""Test asking variables with complex regex validation."""
spec: dict[str, Any] = {
"variables": {
"email": {
"prompt": "Email",
"default": "test@example.com",
"validate": r"^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$",
}
}
}
non_interactive: dict[str, Any] = {}
# Test valid email
mock_typer_prompt.return_value = "user@domain.com"
result = ask_variables(spec, non_interactive)
assert result["email"] == "user@domain.com"
# Test invalid email
mock_typer_prompt.return_value = "invalid-email"
with pytest.raises((SystemExit, Exception)):
ask_variables(spec, non_interactive)
def test_render_tree_with_empty_template(self, temp_dir: Path) -> None:
"""Test rendering tree with empty template directory."""
template_root = temp_dir / "template"
template_root.mkdir()
output_dir = temp_dir / "output"
context: dict[str, Any] = {"project_name": "test"}
env = Environment(loader=FileSystemLoader(str(template_root)), undefined=StrictUndefined, autoescape=True)
# Should not raise any errors
render_tree(env, template_root, output_dir, context, overwrite=False)
# Output directory should exist but be empty (except for __template__.yaml)
assert output_dir.exists()
assert len(list(output_dir.iterdir())) == 0
def test_render_tree_with_hidden_files(self, temp_dir: Path) -> None:
"""Test rendering tree with hidden files."""
template_root = temp_dir / "template"
template_root.mkdir()
# Create hidden files
(template_root / ".gitignore.j2").write_text("*.pyc\n__pycache__/")
(template_root / ".env.j2").write_text("DEBUG={{ debug_mode }}")
output_dir = temp_dir / "output"
context: dict[str, Any] = {"debug_mode": "true"}
env = Environment(loader=FileSystemLoader(str(template_root)), undefined=StrictUndefined, autoescape=True)
render_tree(env, template_root, output_dir, context, overwrite=False)
# Hidden files should be rendered
assert (output_dir / ".gitignore").exists()
assert (output_dir / ".env").exists()
assert "DEBUG=true" in (output_dir / ".env").read_text()
def test_render_tree_with_nested_directories(self, temp_dir: Path) -> None:
"""Test rendering tree with deeply nested directory structure."""
template_root = temp_dir / "template"
template_root.mkdir()
# Create deeply nested structure
(template_root / "app" / "models" / "database" / "schemas").mkdir(parents=True)
(template_root / "app" / "models" / "database" / "schemas" / "user.py.j2").write_text(
"class User:\n name = '{{ project_name }}_user'"
)
output_dir = temp_dir / "output"
context: dict[str, Any] = {"project_name": "deep_nest"}
env = Environment(loader=FileSystemLoader(str(template_root)), undefined=StrictUndefined, autoescape=True)
render_tree(env, template_root, output_dir, context, overwrite=False)
# Deep structure should be preserved
output_file = output_dir / "app" / "models" / "database" / "schemas" / "user.py"
assert output_file.exists()
assert "class User:" in output_file.read_text()
assert "deep_nest_user" in output_file.read_text()
def test_render_tree_with_binary_file_extension_case(self, temp_dir: Path) -> None:
"""Test rendering tree with case-sensitive binary file extensions."""
template_root = temp_dir / "template"
template_root.mkdir()
# Create files with different case extensions
(template_root / "image.PNG").write_bytes(b"fake_png_data")
(template_root / "document.PDF").write_bytes(b"fake_pdf_data")
(template_root / "archive.ZIP").write_bytes(b"fake_zip_data")
output_dir = temp_dir / "output"
context: dict[str, Any] = {"project_name": "case_test"}
env = Environment(loader=FileSystemLoader(str(template_root)), undefined=StrictUndefined, autoescape=True)
render_tree(env, template_root, output_dir, context, overwrite=False)
# Case-sensitive extensions should be copied
assert (output_dir / "image.PNG").exists()
assert (output_dir / "document.PDF").exists()
assert (output_dir / "archive.ZIP").exists()
def test_run_post_tasks_with_empty_list(self, temp_dir: Path) -> None:
"""Test running post tasks with empty list."""
spec: dict[str, Any] = {"post_tasks": []}
context: dict[str, Any] = {}
cwd = temp_dir / "test_cwd"
run_post_tasks(spec, context, cwd)
def test_run_post_tasks_with_none(self, temp_dir: Path) -> None:
"""Test running post tasks with None value."""
spec: dict[str, Any] = {"post_tasks": None}
context: dict[str, Any] = {}
cwd = temp_dir / "test_cwd"
run_post_tasks(spec, context, cwd)
def test_run_post_tasks_with_missing_run_key(self, temp_dir: Path) -> None:
"""Test running post tasks with missing run key."""
spec: dict[str, Any] = {"post_tasks": [{"when": "{{ true }}", "description": "Task without run key"}]}
context: dict[str, Any] = {"true": True}
cwd = temp_dir / "test_cwd"
run_post_tasks(spec, context, cwd)
def test_run_post_tasks_with_complex_condition(self, temp_dir: Path, mock_subprocess_run: MagicMock) -> None:
"""Test running post tasks with complex conditional logic."""
spec: dict[str, Any] = {
"post_tasks": [
{
"when": "{{ include_alembic == 'yes' and database_type == 'postgresql' }}",
"run": ["alembic", "init", "postgresql"],
}
]
}
context: dict[str, Any] = {"include_alembic": "yes", "database_type": "postgresql"}
cwd = temp_dir / "test_cwd"
run_post_tasks(spec, context, cwd)
mock_subprocess_run.assert_called_once_with(["alembic", "init", "postgresql"], cwd=cwd, check=True)
def test_run_post_tasks_with_false_condition(self, temp_dir: Path, mock_subprocess_run: MagicMock) -> None:
"""Test running post tasks with false condition."""
spec: dict[str, Any] = {"post_tasks": [{"when": "{{ include_alembic == 'yes' }}", "run": ["alembic", "init"]}]}
context: dict[str, Any] = {"include_alembic": "no"}
cwd = temp_dir / "test_cwd"
run_post_tasks(spec, context, cwd)
mock_subprocess_run.assert_not_called()
def test_apply_optionals_with_missing_directories(self, temp_dir: Path) -> None:
"""Test apply_optionals with missing directories."""
# Don't create any directories, just test the function
apply_optionals(temp_dir, include_alembic=False, include_i18n=False)
# Should not raise any errors
assert True
def test_apply_optionals_with_partial_structure(self, temp_dir: Path) -> None:
"""Test apply_optionals with partial directory structure."""
# Create only some of the expected directories
(temp_dir / "alembic").mkdir()
(temp_dir / "scripts").mkdir()
(temp_dir / "scripts" / "migrations_generate.sh").write_text("script")
# Don't create locales or babel scripts
apply_optionals(temp_dir, include_alembic=False, include_i18n=True)
# Alembic should be removed
assert not (temp_dir / "alembic").exists()
assert not (temp_dir / "scripts" / "migrations_generate.sh").exists()
def test_apply_optionals_with_files_instead_of_directories(self, temp_dir: Path) -> None:
"""Test apply_optionals with files instead of directories."""
# Create files with names that match expected directories
(temp_dir / "alembic").write_text("not a directory")
(temp_dir / "locales").write_text("not a directory")
apply_optionals(temp_dir, include_alembic=False, include_i18n=False)
# Files should be removed
assert not (temp_dir / "alembic").exists()
assert not (temp_dir / "locales").exists()
class TestErrorHandling:
"""Test error handling scenarios."""
def test_ask_variables_with_invalid_choice_raises_system_exit(self, mock_typer_prompt: MagicMock) -> None:
"""Test that invalid choice raises SystemExit."""
spec: dict[str, Any] = {
"variables": {"choice_var": {"prompt": "Choose option", "choices": ["yes", "no"], "default": "yes"}}
}
non_interactive: dict[str, Any] = {}
mock_typer_prompt.return_value = "maybe"
with pytest.raises((SystemExit, Exception)):
ask_variables(spec, non_interactive)
def test_ask_variables_with_invalid_regex_raises_system_exit(self, mock_typer_prompt: MagicMock) -> None:
"""Test that invalid regex validation raises SystemExit."""
spec: dict[str, Any] = {
"variables": {
"project_name": {"prompt": "Project name", "default": "test", "validate": r"^[a-z_][a-z0-9_]*$"}
}
}
non_interactive: dict[str, Any] = {}
mock_typer_prompt.return_value = "Invalid-Name"
with pytest.raises((SystemExit, Exception)):
ask_variables(spec, non_interactive)
def test_render_tree_with_file_exists_error(self, temp_dir: Path) -> None:
"""Test that render_tree raises FileExistsError when overwrite is disabled."""
template_root = temp_dir / "template"
template_root.mkdir()
template_file = template_root / "main.py.j2"
template_file.write_text("app = FastAPI(title='{{ project_name }}')")
output_dir = temp_dir / "output"
output_dir.mkdir()
# Create existing file
existing_file = output_dir / "main.py"
existing_file.write_text("existing content")
context: dict[str, Any] = {"project_name": "test"}
env = Environment(autoescape=True)
with pytest.raises(FileExistsError):
render_tree(env, template_root, output_dir, context, overwrite=False)
def test_render_tree_with_jinja_render_error(self, temp_dir: Path, mock_typer_secho: MagicMock) -> None:
"""Test that render_tree logs and re-raises when Jinja rendering fails."""
template_root = temp_dir / "template"
template_root.mkdir()
# Invalid Jinja to force a render error
template_file = template_root / "broken.py.j2"
template_file.write_text("{{ undefined_var | unknown_filter }}")
output_dir = temp_dir / "output"
context: dict[str, Any] = {}
env = Environment(loader=FileSystemLoader(str(template_root)), undefined=StrictUndefined, autoescape=True)
with pytest.raises((jinja2.exceptions.UndefinedError, jinja2.exceptions.TemplateError)):
render_tree(env, template_root, output_dir, context, overwrite=False)
# Ensure error was logged
assert mock_typer_secho.call_count >= 1
def test_run_post_tasks_with_subprocess_error_continues(
self, temp_dir: Path, mock_subprocess_run: MagicMock
) -> None:
"""Test that post task errors don't stop execution."""
mock_subprocess_run.side_effect = [
CalledProcessError(1, ["echo", "error1"]),
MagicMock(returncode=0),
]
spec = {"post_tasks": [{"run": ["echo", "error1"]}, {"run": ["echo", "success"]}]}
context: dict[str, Any] = {}
cwd = temp_dir / "test_cwd"
# Should not raise exception
run_post_tasks(spec, context, cwd)
# Both tasks should be attempted
expected_task_count = 2
assert mock_subprocess_run.call_count == expected_task_count
class TestBoundaryConditions:
"""Test boundary conditions and limits."""
def test_ask_variables_with_very_long_input(self, mock_typer_prompt: MagicMock) -> None:
"""Test asking variables with very long input values."""
long_string_length = 10000 # 10KB string
long_value = "a" * long_string_length
spec = {"variables": {"long_var": {"prompt": "Long variable", "default": "default"}}}
non_interactive: dict[str, Any] = {}
mock_typer_prompt.return_value = long_value
result = ask_variables(spec, non_interactive)
assert result["long_var"] == long_value
assert len(result["long_var"]) == long_string_length
def test_render_tree_with_very_deep_nesting(self, temp_dir: Path) -> None:
"""Test rendering tree with very deep directory nesting."""
template_root = temp_dir / "template"
template_root.mkdir()
# Create deep nesting (10 levels to avoid filesystem limits)
current = template_root
for i in range(10):
current = current / f"level_{i}"
current.mkdir()
# Add a file at the deepest level
(current / "deep_file.py.j2").write_text("print('{{ project_name }}')")
output_dir = temp_dir / "output"
context = {"project_name": "deep_test"}
env = Environment(loader=FileSystemLoader(str(template_root)), undefined=StrictUndefined, autoescape=True)
# Should not raise any errors
render_tree(env, template_root, output_dir, context, overwrite=False)
# Deep structure should be preserved
deep_file = (
output_dir
/ "level_0"
/ "level_1"
/ "level_2"
/ "level_3"
/ "level_4"
/ "level_5"
/ "level_6"
/ "level_7"
/ "level_8"
/ "level_9"
/ "deep_file.py"
)
assert deep_file.exists()
def test_apply_optionals_with_many_files(self, temp_dir: Path) -> None:
"""Test apply_optionals with many files to process."""
# Create many files
for i in range(100):
(temp_dir / f"file_{i}.txt").write_text(f"content {i}")
# Create the expected structure
(temp_dir / "alembic").mkdir()
(temp_dir / "locales").mkdir()
# Should not raise any errors
apply_optionals(temp_dir, include_alembic=False, include_i18n=False)
# Only the specific optional modules should be removed, not the random files
assert not (temp_dir / "alembic").exists()
assert not (temp_dir / "locales").exists()
# Random files should still exist
assert (temp_dir / "file_0.txt").exists()
assert (temp_dir / "file_50.txt").exists()
class TestPackageVersion:
"""Test package version handling."""
def test_package_version_fallback(self) -> None:
"""Test that package version falls back to '0.0.0' when package not found."""
# Test the fallback logic by patching the version function
with patch("importlib.metadata.version") as mock_version:
mock_version.side_effect = PackageNotFoundError("Package not found")
# Import the module fresh to see the effect
if "quickbot_cli" in sys.modules:
del sys.modules["quickbot_cli"]
# Now check the version
import quickbot_cli # noqa: PLC0415
assert quickbot_cli.__version__ == "0.0.0"

318
tests/test_integration.py Normal file
View File

@@ -0,0 +1,318 @@
# SPDX-FileCopyrightText: 2025 Alexander Kalinovsky <a@k8y.ru>
#
# SPDX-License-Identifier: MIT
"""Integration tests for the CLI functionality."""
import sys
import tempfile
from pathlib import Path
from typing import Any
from unittest.mock import MagicMock, patch
import pytest
from quickbot_cli.cli import _init_project
# Add src to path for imports
sys.path.insert(0, str(Path(__file__).parent.parent / "src"))
class TestCLIIntegration:
"""Integration tests for the CLI."""
def _create_template_spec(self) -> dict[str, Any]:
"""Create a comprehensive template specification."""
return {
"variables": {
"project_name": {"prompt": "Project name", "default": "test_bot"},
"description": {"prompt": "Description", "default": "A test bot"},
"author": {"prompt": "Author", "default": "Test Author"},
"license": {"prompt": "License", "default": "MIT"},
"include_alembic": {"prompt": "Include Alembic?", "choices": ["yes", "no"], "default": "yes"},
"include_i18n": {"prompt": "Include i18n?", "choices": ["yes", "no"], "default": "yes"},
},
"post_tasks": [
{"when": "{{ include_alembic }}", "run": ["echo", "alembic_initialized"]},
{"when": "{{ include_i18n }}", "run": ["echo", "babel_initialized"]},
],
}
def _create_template_files(self, template_dir: Path) -> None:
"""Create template files in the template directory."""
# Create app files
(template_dir / "app").mkdir()
(template_dir / "app" / "main.py.j2").write_text(
"from fastapi import FastAPI\n\n"
"app = FastAPI(title='{{ project_name }}', description='{{ description }}')\n\n"
"if __name__ == '__main__':\n"
" import uvicorn\n"
" uvicorn.run(app, host='0.0.0.0', port=8000)\n"
)
(template_dir / "app" / "config.py.j2").write_text(
"PROJECT_NAME = '{{ project_name }}'\n"
"DESCRIPTION = '{{ description }}'\n"
"AUTHOR = '{{ author }}'\n"
"LICENSE = '{{ license }}'\n"
)
# Create root files
(template_dir / "README.md.j2").write_text(
"# {{ project_name }}\n\n{{ description }}\n\n## Author\n{{ author }}\n\n## License\n{{ license }}\n"
)
(template_dir / "pyproject.toml.j2").write_text(
"[project]\n"
"name = '{{ project_name }}'\n"
"description = '{{ description }}'\n"
"authors = [{name = '{{ author }}'}]\n"
"license = {text = '{{ license }}'}\n"
)
def _create_optional_modules(self, template_dir: Path) -> None:
"""Create optional module files."""
# Create Alembic files
(template_dir / "alembic").mkdir()
(template_dir / "alembic" / "alembic.ini.j2").write_text(
"[alembic]\n"
"script_location = alembic\n"
"sqlalchemy.url = postgresql://user:pass@localhost/{{ project_name }}\n"
)
# Create Babel files
(template_dir / "locales").mkdir()
(template_dir / "locales" / "en").mkdir()
(template_dir / "locales" / "en" / "LC_MESSAGES").mkdir()
(template_dir / "locales" / "en" / "LC_MESSAGES" / "messages.po.j2").write_text(
'msgid ""\n'
'msgstr ""\n'
'"Project-Id-Version: {{ project_name }}\\n"\n'
'"Report-Msgid-Bugs-To: \\n"\n'
'"POT-Creation-Date: 2024-01-01 00:00+0000\\n"\n'
'"PO-Revision-Date: 2024-01-01 00:00+0000\\n"\n'
'"Last-Translator: {{ author }}\\n"\n'
'"Language: en\\n"\n'
'"MIME-Version: 1.0\\n"\n'
'"Content-Type: text/plain; charset=UTF-8\\n"\n'
'"Content-Transfer-Encoding: 8bit\\n"\n'
)
# Create scripts
(template_dir / "scripts").mkdir()
(template_dir / "scripts" / "migrations_generate.sh.j2").write_text(
"#!/bin/bash\n"
"echo 'Generate migrations for {{ project_name }}'\n"
"alembic revision --autogenerate -m 'Auto-generated migration'\n"
)
(template_dir / "scripts" / "babel_init.sh.j2").write_text(
"#!/bin/bash\n"
"echo 'Initialize Babel for {{ project_name }}'\n"
"pybabel init -i messages.pot -d locales -l en\n"
)
def _verify_output_structure(self, output_dir: Path) -> None:
"""Verify the output directory structure and content."""
# Check main app files
assert (output_dir / "app" / "main.py").exists()
assert (output_dir / "app" / "config.py").exists()
assert (output_dir / "README.md").exists()
assert (output_dir / "pyproject.toml").exists()
# Check rendered content
main_py = (output_dir / "app" / "main.py").read_text()
assert "app = FastAPI(title='my_awesome_bot'" in main_py
assert "description='My awesome bot description'" in main_py
config_py = (output_dir / "app" / "config.py").read_text()
assert "PROJECT_NAME = 'my_awesome_bot'" in config_py
assert "AUTHOR = 'John Doe'" in config_py
assert "LICENSE = 'Apache-2.0'" in config_py
readme = (output_dir / "README.md").read_text()
assert "# my_awesome_bot" in readme
assert "My awesome bot description" in readme
assert "John Doe" in readme
assert "Apache-2.0" in readme
# Check optional modules (should be included)
assert (output_dir / "alembic" / "alembic.ini").exists()
assert (output_dir / "locales" / "en" / "LC_MESSAGES" / "messages.po").exists()
assert (output_dir / "scripts" / "migrations_generate.sh").exists()
assert (output_dir / "scripts" / "babel_init.sh").exists()
def test_full_project_generation(self) -> None:
"""Test full project generation with all components."""
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = Path(tmp_dir)
# Mock the templates directory
with patch("quickbot_cli.cli.TEMPLATES_DIR", tmp_path / "templates"):
template_dir = tmp_path / "templates" / "basic"
template_dir.mkdir(parents=True)
# Create template spec and files
spec_content = self._create_template_spec()
spec_file = template_dir / "__template__.yaml"
spec_file.write_text(str(spec_content))
self._create_template_files(template_dir)
self._create_optional_modules(template_dir)
# Mock subprocess.run to avoid actual command execution
with patch("quickbot_cli.cli.subprocess.run") as mock_run:
mock_run.return_value = MagicMock(returncode=0)
output_path = tmp_path / "output"
# Call init function directly with options
_init_project(
output_path,
"basic",
project_name="my_awesome_bot",
description="My awesome bot description",
author="John Doe",
license_name="Apache-2.0",
include_alembic=True,
include_i18n=True,
)
# Verify output
output_dir = tmp_path / "output"
assert output_dir.exists()
self._verify_output_structure(output_dir)
# Check that post tasks were called
expected_post_tasks_count = 2
assert mock_run.call_count == expected_post_tasks_count
mock_run.assert_any_call(["echo", "alembic_initialized"], cwd=output_dir, check=True)
mock_run.assert_any_call(["echo", "babel_initialized"], cwd=output_dir, check=True)
def test_project_generation_with_disabled_modules(self) -> None:
"""Test project generation with optional modules disabled."""
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = Path(tmp_dir)
# Mock the templates directory
with patch("quickbot_cli.cli.TEMPLATES_DIR", tmp_path / "templates"):
template_dir = tmp_path / "templates" / "basic"
template_dir.mkdir(parents=True)
# Create template spec
spec_content = {
"variables": {
"project_name": {"prompt": "Project name", "default": "simple_bot"},
"include_alembic": {"prompt": "Include Alembic?", "choices": ["yes", "no"], "default": "no"},
"include_i18n": {"prompt": "Include i18n?", "choices": ["yes", "no"], "default": "no"},
}
}
spec_file = template_dir / "__template__.yaml"
spec_file.write_text(str(spec_content))
# Create template files
(template_dir / "app").mkdir()
(template_dir / "app" / "main.py.j2").write_text(
"from fastapi import FastAPI\n\napp = FastAPI(title='{{ project_name }}')\n"
)
# Create optional modules (should be removed)
(template_dir / "alembic").mkdir()
(template_dir / "alembic" / "alembic.ini.j2").write_text("alembic config")
(template_dir / "locales").mkdir()
(template_dir / "locales" / "en").mkdir()
(template_dir / "scripts").mkdir()
(template_dir / "scripts" / "migrations_generate.sh.j2").write_text("migration script")
(template_dir / "scripts" / "babel_init.sh.j2").write_text("babel script")
output_path = tmp_path / "output"
# Call init function directly with options
_init_project(
output_path, "basic", project_name="simple_bot", include_alembic=False, include_i18n=False
)
# Verify output directory structure
output_dir = tmp_path / "output"
assert output_dir.exists()
# Check main app files
assert (output_dir / "app" / "main.py").exists()
# Check that optional modules were removed
assert not (output_dir / "alembic").exists()
assert not (output_dir / "locales").exists()
assert not (output_dir / "scripts" / "migrations_generate.sh").exists()
assert not (output_dir / "scripts" / "babel_init.sh").exists()
def test_project_generation_with_overwrite(self) -> None:
"""Test project generation with overwrite enabled."""
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = Path(tmp_dir)
# Mock the templates directory
with patch("quickbot_cli.cli.TEMPLATES_DIR", tmp_path / "templates"):
template_dir = tmp_path / "templates" / "basic"
template_dir.mkdir(parents=True)
# Create template spec
spec_content = {"variables": {"project_name": {"prompt": "Project name", "default": "overwrite_test"}}}
spec_file = template_dir / "__template__.yaml"
spec_file.write_text(str(spec_content))
# Create template file in app subdirectory
(template_dir / "app").mkdir()
(template_dir / "app" / "main.py.j2").write_text("app = FastAPI(title='{{ project_name }}')\n")
# Create output directory with existing file
output_dir = tmp_path / "output"
output_dir.mkdir()
existing_file = output_dir / "app" / "main.py"
existing_file.parent.mkdir(parents=True, exist_ok=True)
existing_file.write_text("existing content")
# Call init function directly with overwrite
_init_project(output_dir, "basic", project_name="overwrite_test", overwrite=True)
# Check that file was overwritten
assert (output_dir / "app" / "main.py").exists()
assert "app = FastAPI(title='overwrite_test')" in (output_dir / "app" / "main.py").read_text()
assert "existing content" not in (output_dir / "app" / "main.py").read_text()
def test_project_generation_without_overwrite_fails(self) -> None:
"""Test that project generation fails without overwrite when files exist."""
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = Path(tmp_dir)
# Mock the templates directory
with patch("quickbot_cli.cli.TEMPLATES_DIR", tmp_path / "templates"):
template_dir = tmp_path / "templates" / "basic"
template_dir.mkdir(parents=True)
# Create template spec
spec_content = {"variables": {"project_name": {"prompt": "Project name", "default": "overwrite_test"}}}
spec_file = template_dir / "__template__.yaml"
spec_file.write_text(str(spec_content))
# Create template file in app subdirectory
(template_dir / "app").mkdir()
(template_dir / "app" / "main.py.j2").write_text("app = FastAPI(title='{{ project_name }}')\n")
# Create output directory with existing file
output_dir = tmp_path / "output"
output_dir.mkdir()
existing_file = output_dir / "app" / "main.py"
existing_file.parent.mkdir(parents=True, exist_ok=True)
existing_file.write_text("existing content")
# Should fail with FileExistsError when overwrite is False
with pytest.raises(FileExistsError):
_init_project(output_dir, "basic", project_name="overwrite_test")
# Check that file was not overwritten
assert "existing content" in (output_dir / "app" / "main.py").read_text()