Alembic sqlalchemy 迁移助手不跟踪现有表
Alembic sqlalchemy migration assistant not tracking existing tables
我正在尝试设置 alembic 以自动生成迁移。目前当我 运行 这行代码时: alembic revision --autogenerate -m 'add column'
alembic 检查 spatial_ref_sys 中的更改,我已告诉它忽略这些更改,然后创建一个包含空升级和降级定义的版本文件。它无法识别我对数据库中其他表所做的更改。我认为我定义 target_metadata 的方式有问题。我尝试打印 Base.metadata.tables 并得到 immutabledict({})
,而当我在我的应用程序中打印相同的内容时,我从我创建的所有表格中打印出很长的内容。
alembic.ini
# A generic, single database configuration.
[alembic]
# path to migration scripts
script_location = alembic
# template used to generate migration files
file_template = %%(rev)s_%%(slug)s
# timezone to use when rendering the date
# within the migration file as well as the filename.
# string value is passed to dateutil.tz.gettz()
# leave blank for localtime
timezone = UTC
# max length of characters to apply to the
# "slug" field
truncate_slug_length = 40
# set to 'true' to run the environment during
# the 'revision' command, regardless of autogenerate
revision_environment = false
# set to 'true' to allow .pyc and .pyo files without
# a source .py file to be detected as revisions in the
# versions/ directory
sourceless = false
# version location specification; this defaults
# to alembic/versions. When using multiple version
# directories, initial revisions must be specified with --version-path
# version_locations = %(here)s/bar %(here)s/bat alembic/versions
# the output encoding used when revision files
# are written from script.py.mako
output_encoding = utf-8
[alembic:exclude]
tables = spatial_ref_sys
# Logging configuration
[loggers]
keys = root,sqlalchemy,alembic
[handlers]
keys = console
[formatters]
keys = generic
[logger_root]
level = WARN
handlers = console
qualname =
[logger_sqlalchemy]
level = WARN
handlers =
qualname = sqlalchemy.engine
[logger_alembic]
level = INFO
handlers =
qualname = alembic
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatter_generic]
format = %(levelname)-5.5s [%(name)s] %(message)s
datefmt = %H:%M:%S
env.py
from __future__ import with_statement
import os
import sys
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
parent_dir = os.path.abspath(os.path.join(os.getcwd(), "./app"))
sys.path.append(parent_dir)
from frameworks.database import Base, engine, MetaData
print Base.metadata.tables
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = Base.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
exclude_tables = config.get_section('alembic:exclude').get('tables', '').split(',')
def include_object(object, name, type_, reflected, compare_to):
if type_ == "table" and name in exclude_tables:
return False
else:
return True
def get_url():
return os.getenv('DATABASE_URL', "postgres://localhost/public")
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = os.getenv('DATABASE_URL', "postgres://localhost/public")
context.configure(
url=url, target_metadata=target_metadata, literal_binds=True,
include_object=include_object)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
print target_metadata.tables
connectable = engine
with connectable.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata,
include_object=include_object
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
alembic 版本文件
"""add column
Revision ID: 04806a82f806
Revises: 8d27a8fcd1ec
Create Date: 2018-01-29 18:25:19.911399+00:00
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '04806a82f806'
down_revision = '8d27a8fcd1ec'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
我能够通过设置 include_schemas=True
(检查数据库端的所有模式)并在 env.py 中导入模型(允许 alembic 检查 sqlalchemy 上的更新)来解决这个问题结束)。
我正在尝试设置 alembic 以自动生成迁移。目前当我 运行 这行代码时: alembic revision --autogenerate -m 'add column'
alembic 检查 spatial_ref_sys 中的更改,我已告诉它忽略这些更改,然后创建一个包含空升级和降级定义的版本文件。它无法识别我对数据库中其他表所做的更改。我认为我定义 target_metadata 的方式有问题。我尝试打印 Base.metadata.tables 并得到 immutabledict({})
,而当我在我的应用程序中打印相同的内容时,我从我创建的所有表格中打印出很长的内容。
alembic.ini
# A generic, single database configuration.
[alembic]
# path to migration scripts
script_location = alembic
# template used to generate migration files
file_template = %%(rev)s_%%(slug)s
# timezone to use when rendering the date
# within the migration file as well as the filename.
# string value is passed to dateutil.tz.gettz()
# leave blank for localtime
timezone = UTC
# max length of characters to apply to the
# "slug" field
truncate_slug_length = 40
# set to 'true' to run the environment during
# the 'revision' command, regardless of autogenerate
revision_environment = false
# set to 'true' to allow .pyc and .pyo files without
# a source .py file to be detected as revisions in the
# versions/ directory
sourceless = false
# version location specification; this defaults
# to alembic/versions. When using multiple version
# directories, initial revisions must be specified with --version-path
# version_locations = %(here)s/bar %(here)s/bat alembic/versions
# the output encoding used when revision files
# are written from script.py.mako
output_encoding = utf-8
[alembic:exclude]
tables = spatial_ref_sys
# Logging configuration
[loggers]
keys = root,sqlalchemy,alembic
[handlers]
keys = console
[formatters]
keys = generic
[logger_root]
level = WARN
handlers = console
qualname =
[logger_sqlalchemy]
level = WARN
handlers =
qualname = sqlalchemy.engine
[logger_alembic]
level = INFO
handlers =
qualname = alembic
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatter_generic]
format = %(levelname)-5.5s [%(name)s] %(message)s
datefmt = %H:%M:%S
env.py
from __future__ import with_statement
import os
import sys
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
parent_dir = os.path.abspath(os.path.join(os.getcwd(), "./app"))
sys.path.append(parent_dir)
from frameworks.database import Base, engine, MetaData
print Base.metadata.tables
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = Base.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
exclude_tables = config.get_section('alembic:exclude').get('tables', '').split(',')
def include_object(object, name, type_, reflected, compare_to):
if type_ == "table" and name in exclude_tables:
return False
else:
return True
def get_url():
return os.getenv('DATABASE_URL', "postgres://localhost/public")
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = os.getenv('DATABASE_URL', "postgres://localhost/public")
context.configure(
url=url, target_metadata=target_metadata, literal_binds=True,
include_object=include_object)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
print target_metadata.tables
connectable = engine
with connectable.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata,
include_object=include_object
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
alembic 版本文件
"""add column
Revision ID: 04806a82f806
Revises: 8d27a8fcd1ec
Create Date: 2018-01-29 18:25:19.911399+00:00
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '04806a82f806'
down_revision = '8d27a8fcd1ec'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
我能够通过设置 include_schemas=True
(检查数据库端的所有模式)并在 env.py 中导入模型(允许 alembic 检查 sqlalchemy 上的更新)来解决这个问题结束)。