Compare commits

..

3 Commits

Author SHA1 Message Date
OpenStack Proposal Bot
a9dc3794a6 Imported Translations from Zanata
For more information about this automatic import see:
https://docs.openstack.org/i18n/latest/reviewing-translation-import.html

Change-Id: I2b2afb0c0e590b737871bf4c43293df2ed88e534
2024-06-01 02:47:52 +00:00
Takashi Kajinami
d6f169197e SQLAlchemy 2.0: Omnibus fixes patch
This was originally five patches, but they are all needed to pass
any of the test jobs now, so they have been squashed into one:

Co-Authored-By: Dan Smith (dms@danplanet.com)

First:

The autoload argument was removed[1] in SQLAlchemy and only
the autoload_with argument should be passed.

The autoload argument is set according to the autoload_with argument
automatically even in SQLAlchemy 1.x[2] so is not at all needed.

[1] c932123bac
[2] ad8f921e96

Second:

Remove _warn_on_bytestring for newer SA, AFAICT, this flag has been
removed from SQLAlchemy and that is why watcher-db-manage fails to
initialize the DB for me on jammy. This migration was passing the
default value (=False) anyway, so I assume this is the right "fix".

Third:

Fix joinedload passing string attribute names

Fourth:

Fix engine.select pattern to use begin() per the migration guide.

Fifth:

Override the apscheduler get_next_run_time() which appears to be
trivially not compatible with SQLAlchemy 2.0 because of a return type
from scalar().

Change-Id: I000e5e78f97f82ed4ea64d42f1c38354c3252e08
2024-05-29 06:49:32 -07:00
James Page
bc5922c684 Fix oslo.db >= 15.0.0 compatibility
Minimal refactor of SQLAlchemy api module to be compatible with
oslo.db >= 15.0.0 where autocommit behaviour was dropped.

Closes-Bug: #2056181
Change-Id: I33be53f647faae2aad30a43c10980df950d5d7c2
2024-03-27 09:41:23 +00:00
4 changed files with 59 additions and 36 deletions

View File

@@ -3,15 +3,16 @@
# Andi Chandler <andi@gowling.com>, 2020. #zanata # Andi Chandler <andi@gowling.com>, 2020. #zanata
# Andi Chandler <andi@gowling.com>, 2022. #zanata # Andi Chandler <andi@gowling.com>, 2022. #zanata
# Andi Chandler <andi@gowling.com>, 2023. #zanata # Andi Chandler <andi@gowling.com>, 2023. #zanata
# Andi Chandler <andi@gowling.com>, 2024. #zanata
msgid "" msgid ""
msgstr "" msgstr ""
"Project-Id-Version: python-watcher\n" "Project-Id-Version: python-watcher\n"
"Report-Msgid-Bugs-To: \n" "Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2023-08-14 03:05+0000\n" "POT-Creation-Date: 2024-05-31 14:40+0000\n"
"MIME-Version: 1.0\n" "MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n" "Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n" "Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2023-06-21 07:54+0000\n" "PO-Revision-Date: 2024-04-18 12:21+0000\n"
"Last-Translator: Andi Chandler <andi@gowling.com>\n" "Last-Translator: Andi Chandler <andi@gowling.com>\n"
"Language-Team: English (United Kingdom)\n" "Language-Team: English (United Kingdom)\n"
"Language: en_GB\n" "Language: en_GB\n"
@@ -63,6 +64,9 @@ msgstr "2.0.0"
msgid "2023.1 Series Release Notes" msgid "2023.1 Series Release Notes"
msgstr "2023.1 Series Release Notes" msgstr "2023.1 Series Release Notes"
msgid "2023.2 Series Release Notes"
msgstr "2023.2 Series Release Notes"
msgid "3.0.0" msgid "3.0.0"
msgstr "3.0.0" msgstr "3.0.0"

View File

@@ -28,7 +28,7 @@ def upgrade():
op.create_table( op.create_table(
'apscheduler_jobs', 'apscheduler_jobs',
sa.Column('id', sa.Unicode(191, _warn_on_bytestring=False), sa.Column('id', sa.Unicode(191),
nullable=False), nullable=False),
sa.Column('next_run_time', sa.Float(25), index=True), sa.Column('next_run_time', sa.Float(25), index=True),
sa.Column('job_state', sa.LargeBinary, nullable=False), sa.Column('job_state', sa.LargeBinary, nullable=False),

View File

@@ -44,11 +44,7 @@ _FACADE = None
def _create_facade_lazily(): def _create_facade_lazily():
global _FACADE global _FACADE
if _FACADE is None: if _FACADE is None:
# FIXME(amoralej): Remove autocommit=True (and ideally use of _FACADE = db_session.EngineFacade.from_config(CONF)
# LegacyEngineFacade) asap since it's not compatible with SQLAlchemy
# 2.0.
_FACADE = db_session.EngineFacade.from_config(CONF,
autocommit=True)
return _FACADE return _FACADE
@@ -248,30 +244,36 @@ class Connection(api.BaseConnection):
for relationship in relationships: for relationship in relationships:
if not relationship.uselist: if not relationship.uselist:
# We have a One-to-X relationship # We have a One-to-X relationship
query = query.options(joinedload(relationship.key)) query = query.options(joinedload(
getattr(model, relationship.key)))
return query return query
def _create(self, model, values): def _create(self, model, values):
obj = model() session = get_session()
cleaned_values = {k: v for k, v in values.items() with session.begin():
if k not in self._get_relationships(model)} obj = model()
obj.update(cleaned_values) cleaned_values = {k: v for k, v in values.items()
obj.save() if k not in self._get_relationships(model)}
obj.update(cleaned_values)
obj.save(session=session)
session.commit()
return obj return obj
def _get(self, context, model, fieldname, value, eager): def _get(self, context, model, fieldname, value, eager):
query = model_query(model) session = get_session()
if eager: with session.begin():
query = self._set_eager_options(model, query) query = model_query(model, session=session)
if eager:
query = self._set_eager_options(model, query)
query = query.filter(getattr(model, fieldname) == value) query = query.filter(getattr(model, fieldname) == value)
if not context.show_deleted: if not context.show_deleted:
query = query.filter(model.deleted_at.is_(None)) query = query.filter(model.deleted_at.is_(None))
try: try:
obj = query.one() obj = query.one()
except exc.NoResultFound: except exc.NoResultFound:
raise exception.ResourceNotFound(name=model.__name__, id=value) raise exception.ResourceNotFound(name=model.__name__, id=value)
return obj return obj

View File

@@ -22,6 +22,7 @@ from apscheduler.jobstores.base import ConflictingIdError
from apscheduler.jobstores import sqlalchemy from apscheduler.jobstores import sqlalchemy
from apscheduler.util import datetime_to_utc_timestamp from apscheduler.util import datetime_to_utc_timestamp
from apscheduler.util import maybe_ref from apscheduler.util import maybe_ref
from apscheduler.util import utc_timestamp_to_datetime
from watcher.common import context from watcher.common import context
from watcher.common import service from watcher.common import service
@@ -32,7 +33,7 @@ try:
except ImportError: # pragma: nocover except ImportError: # pragma: nocover
import pickle import pickle
from sqlalchemy import Table, MetaData, select, and_ from sqlalchemy import Table, MetaData, select, and_, null
from sqlalchemy.exc import IntegrityError from sqlalchemy.exc import IntegrityError
@@ -58,8 +59,7 @@ class WatcherJobStore(sqlalchemy.SQLAlchemyJobStore):
super(WatcherJobStore, self).__init__(url, engine, tablename, super(WatcherJobStore, self).__init__(url, engine, tablename,
metadata, pickle_protocol) metadata, pickle_protocol)
metadata = maybe_ref(metadata) or MetaData() metadata = maybe_ref(metadata) or MetaData()
self.jobs_t = Table(tablename, metadata, autoload=True, self.jobs_t = Table(tablename, metadata, autoload_with=engine)
autoload_with=engine)
service_ident = service.ServiceHeartbeat.get_service_name() service_ident = service.ServiceHeartbeat.get_service_name()
self.tag = tag or {'host': service_ident[0], 'name': service_ident[1]} self.tag = tag or {'host': service_ident[0], 'name': service_ident[1]}
self.service_id = objects.Service.list(context=context.make_context(), self.service_id = objects.Service.list(context=context.make_context(),
@@ -79,7 +79,8 @@ class WatcherJobStore(sqlalchemy.SQLAlchemyJobStore):
'tag': jsonutils.dumps(self.tag) 'tag': jsonutils.dumps(self.tag)
}) })
try: try:
self.engine.execute(insert) with self.engine.begin() as conn:
conn.execute(insert)
except IntegrityError: except IntegrityError:
raise ConflictingIdError(job.id) raise ConflictingIdError(job.id)
@@ -88,20 +89,36 @@ class WatcherJobStore(sqlalchemy.SQLAlchemyJobStore):
self._fix_paused_jobs_sorting(jobs) self._fix_paused_jobs_sorting(jobs)
return jobs return jobs
def get_next_run_time(self):
selectable = select(self.jobs_t.c.next_run_time).\
where(self.jobs_t.c.next_run_time != null()).\
order_by(self.jobs_t.c.next_run_time).limit(1)
with self.engine.begin() as connection:
# NOTE(danms): The apscheduler implementation of this gets a
# decimal.Decimal back from scalar() which causes
# utc_timestamp_to_datetime() to choke since it is expecting a
# python float. Assume this is SQLAlchemy 2.0 stuff, so just
# coerce to a float here.
next_run_time = connection.execute(selectable).scalar()
return utc_timestamp_to_datetime(float(next_run_time)
if next_run_time is not None
else None)
def _get_jobs(self, *conditions): def _get_jobs(self, *conditions):
jobs = [] jobs = []
conditions += (self.jobs_t.c.service_id == self.service_id,) conditions += (self.jobs_t.c.service_id == self.service_id,)
selectable = select( selectable = select(
[self.jobs_t.c.id, self.jobs_t.c.job_state, self.jobs_t.c.tag] self.jobs_t.c.id, self.jobs_t.c.job_state, self.jobs_t.c.tag
).order_by(self.jobs_t.c.next_run_time).where(and_(*conditions)) ).order_by(self.jobs_t.c.next_run_time).where(and_(*conditions))
failed_job_ids = set() failed_job_ids = set()
for row in self.engine.execute(selectable): with self.engine.begin() as conn:
try: for row in conn.execute(selectable):
jobs.append(self._reconstitute_job(row.job_state)) try:
except Exception: jobs.append(self._reconstitute_job(row.job_state))
self._logger.exception( except Exception:
'Unable to restore job "%s" -- removing it', row.id) self._logger.exception(
failed_job_ids.add(row.id) 'Unable to restore job "%s" -- removing it', row.id)
failed_job_ids.add(row.id)
# Remove all the jobs we failed to restore # Remove all the jobs we failed to restore
if failed_job_ids: if failed_job_ids: