Create App Fastapi Backend - iff133/first GitHub Wiki
Install Fastapi
pip install "fastapi[all]"
- Create folders:
- api
- crud
- models
- schemas
- routers
- tests
- api
Install Alembic
- cd into the root of the directory and run
alembic init alembic
- this command generates:
- an
alembic.ini
file - alembic folder in the root directory
- an
- Inside the alembic folder create a folder called
versions
- Create a
env.py
file inside the alembic
from logging.config import fileConfig
from sqlalchemy import create_engine
from sqlalchemy import pool
from alembic import context
from os import getenv
from api.models.database import Base
from api.models.models import Users, Files
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = Base.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
pguser = getenv("PG_USER")
pgpass = getenv("PG_PASSWORD")
pghost = getenv("PG_HOST")
url = f"postgresql://{pguser}:{pgpass}@{pghost}/postgres"
# url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url,
target_metadata=target_metadata,
literal_binds=True,
dialect_opts={"paramstyle": "named"},
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
# connectable = engine_from_config(
# config.get_section(config.config_ini_section),
# prefix="sqlalchemy.",
# poolclass=pool.NullPool,
# )
pguser = getenv("PG_USER")
pgpass = getenv("PG_PASSWORD")
pghost = getenv("PG_HOST")
url = f"postgresql://{pguser}:{pgpass}@{pghost}/postgres"
connectable = create_engine(url)
with connectable.connect() as connection:
context.configure(connection=connection, target_metadata=target_metadata)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
- it is very important to import the models, database
from api.models.models import Users, Files
from api.models.database import Base
-
Create an
alembic_version
table in postgres:-- Table: public.alembic_version -- DROP TABLE public.alembic_version; CREATE TABLE public.alembic_version ( version_num character varying(32) COLLATE pg_catalog."default" NOT NULL, CONSTRAINT alembic_version_pkc PRIMARY KEY (version_num) ) TABLESPACE pg_default; ALTER TABLE public.alembic_version OWNER to postgres;
-
Add models to the models and then run:
alembic revision --autogenerate -m "A commit message"
-
Check the migration script in
alembic/versions
if everything is ok run:alembic upgrade head
Create Files In Root Directory
-
Dockerfile.backend
# For more information, please refer to https://aka.ms/vscode-docker-python FROM python:3.8-slim-buster as builder EXPOSE 8000 # Keeps Python from generating .pyc files in the container ENV PYTHONDONTWRITEBYTECODE=1 # Turns off buffering for easier container logging ENV PYTHONUNBUFFERED=1 WORKDIR /usr/src/app # # Install Tesseract dependencies RUN apt-get update && \ apt-get install -y g++ # RUN apt-get update && \ # apt-get install -y g++ libtesseract-dev pkg-config poppler-utils libsm-dev libxrender-dev libxext-dev libtesseract4 # Install pip requirements COPY ./requirements.txt . RUN python -m pip install --upgrade pip && \ python -m pip install wheel && \ python -m pip wheel --no-cache-dir --no-deps --wheel-dir /usr/src/app/wheels -r requirements.txt FROM python:3.8-buster ENV LC_ALL=C # Install dependencies RUN apt-get update && \ apt-get install -y zlib1g-dev libjpeg-dev python3-pythonmagick inkscape xvfb poppler-utils libfile-mimeinfo-perl qpdf libimage-exiftool-perl ufraw-batch ffmpeg locales # RUN apt-get update && \ # apt-get install -y zlib1g-dev libjpeg-dev python3-pythonmagick inkscape xvfb poppler-utils libfile-mimeinfo-perl qpdf libimage-exiftool-perl ufraw-batch ffmpeg \ # g++ libtesseract-dev tesseract-ocr pkg-config poppler-utils libsm-dev libxrender-dev libxext-dev default-jre locales RUN sed -i '/en_US.UTF-8/s/^#//g' /etc/locale.gen RUN locale-gen ENV TZ=Europe/London # Set up the app dir, copy over the code WORKDIR /app COPY . /app # Install the wheels from the builder image COPY --from=builder /usr/src/app/wheels /wheels COPY --from=builder /usr/src/app/requirements.txt . RUN python -m pip install wheel && \ python -m pip install --upgrade pip && \ python -m pip install --no-cache /wheels/* # Creates a non-root user and adds permission to access the /app folder RUN useradd -m appuser && chown -R appuser /app USER appuser # Install the nltk stopwords dataset RUN python -m nltk.downloader stopwords CMD ["uvicorn", "--host", "0.0.0.0", "--reload", "--reload-dir", "/app/api/", "api.main:app"]
-
Dockerfile.celery
FROM gitlab.demarq.com:5050/dwp/fed/backend:latest CMD ["watchmedo", "auto-restart", "--directory=api/", "--pattern=*.py", "--recursive", "--", "celery", "-A", "api.celery", "worker", "--loglevel=INFO"]
-
docker-compose.yml
version: "2.8" services: fedbackend: # image: gitlab.demarq.com:5050/dwp/fed/backend/fedbackend:latest build: context: . dockerfile: ./Dockerfile.backend ports: - 8000:8000 command: [ "uvicorn", "--host", "0.0.0.0", "--reload", "--reload-dir", "/app/api/", "api.main:app", ] env_file: - ./.env depends_on: - postgres volumes: - ".:/app" mem_limit: 1g celeryworker: # image: gitlab.demarq.com:5050/dwp/fed/backend/celeryrunner:latest build: context: . dockerfile: ./Dockerfile.backend command: [ "watchmedo", "auto-restart", "--directory=api/", "--pattern=*.py", "--recursive", "--", "celery", "-A", "api.celery", "worker", "--loglevel=INFO", ] env_file: - ./.env depends_on: - redis - rabbitmq volumes: - ".:/app" mem_limit: 1g deploy: replicas: ${CELERY_WORKER_REPLICA_COUNT:-1} restart_policy: condition: on-failure postgres: image: postgres:13 environment: POSTGRES_PASSWORD: ${PG_PASSWORD} volumes: - pgdata:/var/lib/postgresql/data mem_limit: 500m cpus: 1 ports: - 5432:5432 rabbitmq: image: bitnami/rabbitmq:3.7 ports: - "4369:4369" - "5672:5672" - "25672:25672" - "15672:15672" volumes: - "rabbitmq_data:/rabbit_mq" mem_limit: 500m cpus: 0.5 redis: image: "bitnami/redis:5.0.4" environment: - REDIS_PASSWORD=${REDIS_PASSWORD} ports: - 6379:6379 volumes: - "redis_data:/redis" mem_limit: 1g cpus: 0.5 volumes: pgdata: rabbitmq_data: driver: local redis_data: driver: local
-
backend_logging.yml
version: 1 disable_existing_loggers: false formatters: standard: format: "%(asctime)s - %(levelname)s - %(message)s" handlers: console: class: logging.StreamHandler formatter: standard stream: ext://sys.stdout loggers: uvicorn: error: propagate: true root: level: INFO handlers: [console] propagate: no
-
.pre-commit-config.yaml
repos: - repo: local hooks: - id: autoflake name: Remove unused variables and imports entry: bash -c 'autoflake "$@"; git add -u' -- language: python args: [ "--in-place", "--remove-all-unused-imports", "--remove-unused-variables", "--expand-star-imports", "--ignore-init-module-imports", ] files: \.py$ - id: isort name: Sorting import statements entry: bash -c 'isort "$@"; git add -u' -- language: python args: ["--filter-files"] files: \.py$ - id: black name: Black Python code formatting entry: bash -c 'black "$@"; git add -u' -- language: python types: [python] args: ["--line-length=88"]
-
logging.conf
[loggers] keys=root,api [handlers] keys=consoleHandler,detailedConsoleHandler [formatters] keys=normalFormatter,detailedFormatter [logger_root] level=INFO handlers=consoleHandler [logger_api] level=DEBUG handlers=detailedConsoleHandler qualname=api propagate=0 [handler_consoleHandler] class=StreamHandler level=DEBUG formatter=normalFormatter args=(sys.stdout,) [handler_detailedConsoleHandler] class=StreamHandler level=DEBUG formatter=detailedFormatter args=(sys.stdout,) [formatter_normalFormatter] format=%(asctime)s loglevel=%(levelname)-6s logger=%(name)s %(funcName)s() L%(lineno)-4d %(message)s [formatter_detailedFormatter] format=%(asctime)s loglevel=%(levelname)-6s logger=%(name)s %(funcName)s() L%(lineno)-4d %(message)s call_trace=%(pathname)s L%(lineno)-4d
-
requirements.txt
# bcrypt==3.2.0 fastapi[all]==0.63.0 uvicorn[standard]==0.13.4 gunicorn==20.0.4 SQLAlchemy==1.4.1 psycopg2-binary==2.8.6 minio==7.0.2 celery==5.0.5 redis==3.5.3 asyncio-redis==0.16.0 Pillow==8.2.0 preview-generator==0.23 pdf2image==1.14.0 python-dotenv==0.15.0 aiofiles==0.5.0 dateparser==1.0.0 fuzzywuzzy==0.18.0 itsdangerous==1.1.0 numpy==1.20.3 pandas==1.2.4 python-jose==3.3.0 PyPDF2==1.26.0 passlib==1.7.4 python-multipart==0.0.5 pytz==2021.1 PyYAML==5.4.1 # # We only need the CPU version of torch, the full version above has all the GPU gubbins in and is huge https://download.pytorch.org/whl/cpu/torch-1.6.0%2Bcpu-cp38-cp38-linux_x86_64.whl websockets==8.1 watchdog boto3==1.17.106 openpyxl==3.0.7 xlsxwriter==1.4.4 scipy==1.7.0 nltk==3.6.2 bcrypt==3.2.0 stripe==2.58.0 python-Levenshtein==0.12.2 alembic==1.5.7 scikit-learn==1.0.1 scipy==1.7.0 pikepdf==4.0.1.post1
-
.gitignore
# Custom bits .env .env.dev .env.normal .env.test /temp/ /test_files/ /aws/credentials # Auto generated bits # Created by https://www.toptal.com/developers/gitignore/api/python,vscode # Edit at https://www.toptal.com/developers/gitignore?templates=python,vscode ### Python ### # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class # C extensions *.so # identifier files *.Identifier # Distribution / packaging .Python build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ parts/ sdist/ var/ wheels/ pip-wheel-metadata/ share/python-wheels/ *.egg-info/ .installed.cfg *.egg MANIFEST # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .nox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *.cover *.py,cover .hypothesis/ .pytest_cache/ pytestdebug.log # Translations *.mo *.pot # Django stuff: *.log local_settings.py db.sqlite3 db.sqlite3-journal # Flask stuff: instance/ .webassets-cache # Scrapy stuff: .scrapy # Sphinx documentation docs/_build/ doc/_build/ # PyBuilder target/ # Jupyter Notebook .ipynb_checkpoints # IPython profile_default/ ipython_config.py # pyenv .python-version # pipenv # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. # However, in case of collaboration, if having platform-specific dependencies or dependencies # having no cross-platform support, pipenv may install dependencies that don't work, or not # install all needed dependencies. #Pipfile.lock # poetry #poetry.lock # PEP 582; used by e.g. github.com/David-OConnor/pyflow __pypackages__/ # Celery stuff celerybeat-schedule celerybeat.pid # SageMath parsed files *.sage.py # Environments # .env .env/ .venv/ env/ venv/ ENV/ env.bak/ venv.bak/ pythonenv* # Spyder project settings .spyderproject .spyproject # Rope project settings .ropeproject # mkdocs documentation /site # mypy .mypy_cache/ .dmypy.json dmypy.json # Pyre type checker .pyre/ # pytype static type analyzer .pytype/ # operating system-related files *.DS_Store #file properties cache/storage on macOS Thumbs.db #thumbnail cache on Windows # profiling data .prof ### vscode ### .vscode/* .vscode/settings.json .vscode/tasks.json .vscode/launch.json .vscode/extensions.json *.code-workspace *test.db