Added migrations

This commit is contained in:
Michail Kostochka 2024-12-12 22:12:00 +03:00
parent 62cb52a8ae
commit 6c22f2b560
15 changed files with 527 additions and 19 deletions

1
.gitignore vendored
View File

@ -1 +1,2 @@
.idea .idea
.env

11
batcher/Dockerfile Normal file
View File

@ -0,0 +1,11 @@
FROM python:3.12
WORKDIR /batcher
COPY ./requirements.txt /batcher/requirements.txt
RUN pip install --no-cache-dir --upgrade -r /batcher/requirements.txt
COPY ./app /batcher/app
CMD ["fastapi", "run", "app/main.py", "--port", "$HTTP_PORT"]

View File

9
batcher/app/migrate.py Normal file
View File

@ -0,0 +1,9 @@
import sys
import asyncio
from .src.db.pg import migrate
if __name__ == '__main__':
if len(sys.argv) < 2:
raise RuntimeError('you need to specify target revision')
asyncio.run(migrate(sys.argv[1]))

View File

@ -1,19 +0,0 @@
import asyncpg
import asyncio
from ..config import PG_HOST, PG_PORT, PG_USER, PG_PASSWORD, PG_DB
DB_URL = f'postgresql://{PG_USER}:{str(PG_PASSWORD)}@{PG_HOST}:{PG_PORT}/{PG_DB}'
async def connect_db() -> asyncpg.Pool:
return await asyncpg.create_pool(DB_URL)
pool = asyncio.run(connect_db())
async def get_pg() -> asyncpg.Connection:
async with pool.acquire() as conn:
yield conn

View File

@ -0,0 +1 @@
from .pg import get_pg, migrate

View File

@ -0,0 +1,2 @@
DROP INDEX clicks_user_id_time_idx;
DROP TABLE clicks;

View File

@ -0,0 +1,8 @@
CREATE TABLE IF NOT EXISTS clicks(
id BIGSERIAL PRIMARY KEY,
user_id BIGINT,
time TIMESTAMP,
value DECIMAL(100, 2),
expiry_info JSONB
);
CREATE INDEX IF NOT EXISTS clicks_user_id_time_idx ON clicks(user_id, time);

View File

@ -0,0 +1,32 @@
from batcher.app.src.config import PG_HOST, PG_PORT, PG_USER, PG_PASSWORD, PG_DB
from pathlib import Path
import asyncio
import asyncpg
from asyncpg_trek import plan, execute, Direction
from asyncpg_trek.asyncpg import AsyncpgBackend
DB_URL = f'postgresql://{PG_USER}:{str(PG_PASSWORD)}@{PG_HOST}:{PG_PORT}/{PG_DB}'
MIGRATIONS_DIR = Path(__file__) / "migrations"
async def connect_db() -> asyncpg.Pool:
return await asyncpg.create_pool(DB_URL)
pool = asyncio.run(connect_db())
async def get_pg() -> asyncpg.Connection:
async with pool.acquire() as conn:
yield conn
async def migrate(
target_revision: str,
) -> None:
async with pool.acquire() as conn:
backend = AsyncpgBackend(conn)
async with backend.connect() as conn:
planned = await plan(conn, backend, MIGRATIONS_DIR, target_revision=target_revision, direction=Direction.up)
await execute(conn, backend, planned)

1
batcher/migrate.sh Normal file
View File

@ -0,0 +1 @@
docker exec -it $1 python /batcher/app/migrate.py $2

31
batcher/requirements.txt Normal file
View File

@ -0,0 +1,31 @@
aio-pika==9.4.3
aiohappyeyeballs==2.4.3
aiohttp==3.10.10
aiormq==6.8.1
aiosignal==1.3.1
amqp==5.2.0
annotated-types==0.7.0
anyio==4.6.2.post1
async-timeout==4.0.3
asyncpg==0.29.0
asyncpg-trek==0.4.0
attrs==24.2.0
certifi==2024.8.30
charset-normalizer==3.4.0
fastapi==0.115.2
frozenlist==1.4.1
idna==3.10
multidict==6.1.0
pamqp==3.3.0
propcache==0.2.0
pydantic==2.9.2
pydantic_core==2.23.4
redis==5.1.1
requests==2.32.3
sniffio==1.3.1
starlette==0.40.0
typing_extensions==4.12.2
tzdata==2024.2
urllib3==2.2.3
vine==5.1.0
yarl==1.15.5

143
docker-compose-prod.yml Normal file
View File

@ -0,0 +1,143 @@
version: '3.9'
volumes:
db_data: {}
batcher_db_data: {}
redis_data: {}
services:
backend:
build:
context: ./backend
volumes:
- ./backend:/app
command: /gunicorn.sh
entrypoint: /entrypoint.sh
restart: on-failure
depends_on:
- postgres
- rabbitmq
env_file:
- .env/prod/pg
- .env/prod/back
- .env/prod/rmq
- .env/prod/bot
bot:
build:
context: ./clicker_bot
depends_on:
- backend
volumes:
- ./clicker_bot:/app
environment:
PROD: 1
env_file:
- .env/prod/bot
command:
- /gunicorn.sh
restart: on-failure
#
# memcached:
# container_name: memcached
# image: memcached:latest
postgres:
image: postgres:14.5-alpine
volumes:
- db_data:/var/lib/postgresql/data
env_file:
- .env/prod/pg
nginx:
build:
context: .
dockerfile: nginx/Dockerfile
ports:
- '80:80'
- '443:443'
depends_on:
- backend
- bot
- rabbitmq
- batcher
volumes:
- ./backend/static/:/static/
- ./nginx/certbot/conf:/etc/letsencrypt
- ./nginx/certbot/www:/var/www/certbot
restart: unless-stopped
command: '/bin/sh -c ''while :; do sleep 6h & wait $${!}; nginx -s reload; done & nginx -g "daemon off;"'''
certbot:
container_name: certbot
image: certbot/certbot
volumes:
- ./nginx/certbot/conf:/etc/letsencrypt
- ./nginx/certbot/www:/var/www/certbot
restart: unless-stopped
entrypoint: "/bin/sh -c 'trap exit TERM; while :; do certbot renew; sleep 12h & wait $${!}; done;'"
celery:
build: ./backend
command: /start_celery.sh
volumes:
- ./backend:/app
env_file:
- .env/prod/back
- .env/prod/rmq
- .env/prod/pg
- .env/prod/bot
depends_on:
- backend
- rabbitmq
celery-beat:
build: ./backend
command: celery -A clicker beat -l info
volumes:
- ./backend:/app
env_file:
- .env/prod/back
- .env/prod/rmq
- .env/prod/pg
- .env/prod/bot
depends_on:
- backend
- rabbitmq
rabbitmq:
container_name: 'rabbitmq'
image: 'rabbitmq:3-management-alpine'
env_file:
- .env/prod/rmq
ports:
- '15672:15672'
redis:
env_file:
- .env/prod/redis
image: redis
command: bash -c "redis-server --appendonly yes --requirepass $${REDIS_PASSWORD}"
volumes:
- redis_data:/data
batcher:
build:
context: ./batcher
depends_on:
- redis
- batcher-postgres
- rabbitmq
env_file:
- .env/prod/rmq
- .env/prod/redis
- .env/prod/batcher-pg
- .env/prod/batcher
- .env/prod/bot
batcher-postgres:
image: postgres:14.5-alpine
volumes:
- batcher_db_data:/var/lib/postgresql/data
env_file:
- .env/prod/batcher-pg

110
docker-compose.yml Normal file
View File

@ -0,0 +1,110 @@
version: '3.9'
volumes:
db_data: {}
batcher_db_data: {}
redis_data: {}
services:
backend:
build:
context: ./backend
depends_on:
- postgres
- rabbitmq
volumes:
- ./backend:/app
command: /start.sh
entrypoint: /entrypoint.sh
restart: on-failure
env_file:
- .env/dev/pg
- .env/dev/back
- .env/dev/rmq
- .env/dev/bot
- .env/dev/web
ports:
- '8000:8000'
postgres:
image: postgres:14.5-alpine
volumes:
- db_data:/var/lib/postgresql/data
env_file:
- .env/dev/pg
ports:
- '5432:5432'
celery:
build: ./backend
command: celery -A clicker worker -l info
volumes:
- ./backend:/app
env_file:
- .env/dev/back
- .env/dev/rmq
- .env/dev/pg
- .env/dev/bot
- .env/dev/web
depends_on:
- backend
- rabbitmq
celery-beat:
build: ./backend
command: celery -A clicker beat -l info
volumes:
- ./backend:/app
env_file:
- .env/dev/back
- .env/dev/rmq
- .env/dev/pg
- .env/dev/bot
- .env/dev/web
depends_on:
- backend
- rabbitmq
rabbitmq:
container_name: 'rabbitmq'
image: 'rabbitmq:3-management-alpine'
env_file:
- .env/dev/rmq
ports:
- '5672:5672'
- '15672:15672'
redis:
env_file:
- .env/dev/redis
image: redis
command: bash -c "redis-server --appendonly yes --requirepass $${REDIS_PASSWORD}"
ports:
- '6379:6379'
volumes:
- redis_data:/data
batcher:
build:
context: ./batcher
depends_on:
- redis
- batcher-postgres
env_file:
- .env/dev/rmq
- .env/dev/redis
- .env/dev/batcher-pg
- .env/dev/batcher
- .env/dev/bot
- .env/dev/web
ports:
- '8080:8080'
batcher-postgres:
image: postgres:14.5-alpine
volumes:
- batcher_db_data:/var/lib/postgresql/data
env_file:
- .env/dev/batcher-pg
ports:
- '5433:5432'

20
nginx/Dockerfile Normal file
View File

@ -0,0 +1,20 @@
# stage 1 - build frontend
FROM node:16.20.0 as build-deps
WORKDIR /app
COPY ./frontend/package.json /app/
COPY ./frontend/package-lock.json /app/
RUN npm install
COPY ./frontend /app/
RUN npm run build
# stage 2 - nginx
FROM nginx:stable
COPY nginx/nginx.conf /etc/nginx/nginx.conf
COPY --from=build-deps /app/dist/ /dist/
CMD ["nginx", "-g", "daemon off;"]

158
nginx/nginx.conf Normal file
View File

@ -0,0 +1,158 @@
user nginx;
worker_processes 1;
events {
worker_connections 1024;
}
http {
log_format upstreamlog '[$time_local] $remote_addr - $remote_user - $server_name $host to: $upstream_addr: $request $status upstream_response_time $upstream_response_time msec $msec request_time $request_time';
include /etc/nginx/mime.types;
client_max_body_size 100m;
proxy_read_timeout 300;
proxy_connect_timeout 300;
proxy_send_timeout 300;
proxy_buffer_size 8m;
proxy_busy_buffers_size 12m;
proxy_buffers 16 1m;
uwsgi_buffers 16 1m;
uwsgi_buffer_size 8m;
uwsgi_busy_buffers_size 12m;
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
upstream backend {
server backend:8000;
}
upstream batcher {
server batcher:8080;
}
upstream rabbitmq {
server rabbitmq:15672;
}
upstream bot {
server bot:7313;
}
server {
access_log /var/log/nginx/access.log upstreamlog;
error_log /var/log/nginx/error.log;
listen 80;
listen 443 ssl http2;
charset utf-8;
server_name crowngame.ru www.crowngame.ru;
root /dist/;
index index.html;
ssl_certificate /etc/letsencrypt/live/crowngame.ru/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/crowngame.ru/privkey.pem;
include /etc/letsencrypt/options-ssl-nginx.conf;
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem;
if ($server_port = 80) {
set $https_redirect 1;
}
if ($host ~ '^www\.') {
set $https_redirect 1;
}
if ($https_redirect = 1) {
return 301 https://crowngame.ru$request_uri;
}
location /.well-known/acme-challenge/ {
root /var/www/certbot;
}
# frontend
location / {
try_files $uri $uri/ @rewrites;
}
location @rewrites {
rewrite ^(.+)$ /index.html last;
}
# batcher
location ~ ^/api/v1/(batch\-click|click|energy|coefficient)(/(.*))? {
proxy_pass http://batcher;
proxy_pass_header Authorization;
}
location ^~ /api/internal {
deny all;
}
# backend
location ~ ^/(admin|api) {
proxy_http_version 1.1;
proxy_pass http://backend;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Connection $connection_upgrade;
proxy_set_header Upgrade $http_upgrade;
proxy_cache_bypass $http_upgrade;
if ($uri ~* \.(?:ico|js|css|gif|jpe?g|png|webp)/?$) {
expires max;
add_header Pragma public;
add_header Cache-Control "public, must-revalidate, proxy-revalidate";
}
}
# bot
location ~ ^/bot {
proxy_http_version 1.1;
proxy_pass http://bot;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Connection $connection_upgrade;
proxy_set_header Upgrade $http_upgrade;
proxy_cache_bypass $http_upgrade;
}
# backend static
location ~ ^/(static)/(.*)$ {
alias /$1/$2;
}
# Some basic cache-control for static files to be sent to the browser
location ~* \.(?:ico|css|js|gif|jpe?g|png|webp)$ {
expires max;
add_header Pragma public;
add_header Cache-Control "public, must-revalidate, proxy-revalidate";
}
location ~ ^/rabbitmq/api/(.*?)/(.*) {
proxy_pass http://rabbitmq/api/$1/%2F/$2?$query_string;
proxy_buffering off;
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
location ~ ^/rabbitmq/(.*) {
rewrite ^/rabbitmq/(.*)$ /$1 break;
proxy_pass http://rabbitmq;
proxy_buffering off;
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
}