diff --git a/dbzero/setup.py b/dbzero/setup.py index e37567b6..cd047733 100644 --- a/dbzero/setup.py +++ b/dbzero/setup.py @@ -10,7 +10,7 @@ setup( name='dbzero', - version='0.2.3', + version='0.2.4', description='DBZero community edition', packages=['dbzero'], python_requires='>=3.9', diff --git a/pyproject.toml b/pyproject.toml index 5288d19a..c37bb747 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ requires = ['meson-python'] [project] name = 'dbzero' -version = '0.2.3' +version = '0.2.4' description = 'A state management system for Python 3.x that unifies your applications business logic, data persistence, and caching into a single, efficient layer.' readme = 'README.md' requires-python = '>=3.9' diff --git a/python_tests/test_issues_15.py b/python_tests/test_issues_15.py index e691ac1a..c0554f0f 100644 --- a/python_tests/test_issues_15.py +++ b/python_tests/test_issues_15.py @@ -57,3 +57,9 @@ def test_find_inside_memo_init_after_tagging_self(db0_rw_fixture): """db0.find() inside a memo __init__ must not segfault after db0.tags(self).add().""" obj = TaggerAndFinder() assert obj._results == [] + +def test_find_memo_init_after_tagging_in_init(db0_rw_fixture): + """db0.find() inside a memo __init__ must not segfault after db0.tags(self).add().""" + obj = TaggerAndFinder() + obj2 = TaggerAndFinder() + assert len(list(db0.find(TaggerAndFinder, "my-tag"))) == 2 diff --git a/src/dbzero/bindings/python/Memo.cpp b/src/dbzero/bindings/python/Memo.cpp index 351c81fa..866d377b 100644 --- a/src/dbzero/bindings/python/Memo.cpp +++ b/src/dbzero/bindings/python/Memo.cpp @@ -280,9 +280,9 @@ namespace db0::python // invoke post-init on associated dbzero object auto &object = self->modifyExt(); - db0::FixtureLock fixture(object.getFixture()); + db0::FixtureLock fixture(object.getFixture()); object.postInit(fixture); - + // need to call modifyExt again after postInit because the instance has just been created // and potentially needs to be included in the AtomicContext self->modifyExt(); @@ -290,13 +290,18 @@ namespace db0::python if (!class_ptr || !class_ptr->isNoCache()) { fixture->getLangCache().add(object.getAddress(), self); } - - // finally, unless opted-out, assign the type tag(s) of the entire type hierarchy - if (class_ptr && class_ptr->assignDefaultTags()) { + + { auto &tag_index = fixture->get(); - while (class_ptr) { - tag_index.addTag(self, class_ptr->getAddress(), true); - class_ptr = class_ptr->getBaseClassPtr(); + // Flush any user-tags that were deferred because db0.tags(self).add() was + // called during __init__ before postInit() assigned the dbzero address. + tag_index.applyDeferredTags(self); + // finally, unless opted-out, assign the type tag(s) of the entire type hierarchy + if (class_ptr && class_ptr->assignDefaultTags()) { + while (class_ptr) { + tag_index.addTag(self, class_ptr->getAddress(), true); + class_ptr = class_ptr->getBaseClassPtr(); + } } } } diff --git a/src/dbzero/object_model/tags/TagIndex.cpp b/src/dbzero/object_model/tags/TagIndex.cpp index 8aa67cf2..f4b07534 100644 --- a/src/dbzero/object_model/tags/TagIndex.cpp +++ b/src/dbzero/object_model/tags/TagIndex.cpp @@ -169,12 +169,30 @@ namespace db0::object_model } void TagIndex::addTags(ObjectPtr memo_ptr, ObjectPtr const *args, std::size_t nargs) - { + { using TypeId = db0::bindings::TypeId; if (nargs == 0) { return; } + // If the tagged object has not yet completed postInit() (i.e. we are inside its + // __init__ before postInit assigns a dbzero address), the normal placeholder + // mechanism would have the tags dropped by a flush() triggered by a db0.find() + // call in the same __init__. Instead, store the tag Python objects and apply + // them once postInit() has completed (see applyDeferredTags / Memo.cpp). + { + auto &memo = LangToolkit::getTypeManager().extractAnyObject(memo_ptr); + if (!memo.hasInstance() && !memo.isDefunct()) { + auto &entry = m_deferred_user_tags[memo_ptr]; + for (std::size_t i = 0; i < nargs; ++i) { + entry.tags.emplace_back(args[i]); + } + m_deferred_pre_cache.emplace(memo_ptr, ObjectSharedExtPtr(memo_ptr)); + m_mutation_log->onDirty(); + return; + } + } + using IterableSequence = TagMakerSequence; ActiveValueT active_key = { UniqueAddress(), nullptr }; auto &batch_op_short = getBatchOperationShort(memo_ptr, active_key, false); @@ -302,6 +320,28 @@ namespace db0::object_model } } + void TagIndex::applyDeferredTags(ObjectPtr memo_ptr) + { + auto it = m_deferred_user_tags.find(memo_ptr); + if (it == m_deferred_user_tags.end()) { + return; + } + + auto &entry = it->second; + if (!entry.tags.empty()) { + std::vector tag_ptrs; + tag_ptrs.reserve(entry.tags.size()); + for (auto &tag : entry.tags) { + tag_ptrs.push_back(tag.get()); + } + // hasInstance() is now true (postInit has run), so addTags() takes the normal path. + addTags(memo_ptr, tag_ptrs.data(), tag_ptrs.size()); + } + + m_deferred_user_tags.erase(it); + m_deferred_pre_cache.erase(memo_ptr); + } + void TagIndex::rollback() { // Reject any pending updates @@ -322,6 +362,8 @@ namespace db0::object_model m_object_cache.clear(); m_active_cache.clear(); m_active_pre_cache.clear(); + m_deferred_user_tags.clear(); + m_deferred_pre_cache.clear(); } void TagIndex::clear() @@ -346,6 +388,8 @@ namespace db0::object_model m_active_cache.clear(); m_active_pre_cache.clear(); m_inc_refed_tags.clear(); + m_deferred_user_tags.clear(); + m_deferred_pre_cache.clear(); } void TagIndex::tryTagIncRef(ShortTagT tag_addr) const diff --git a/src/dbzero/object_model/tags/TagIndex.hpp b/src/dbzero/object_model/tags/TagIndex.hpp index 6600b959..657688e6 100644 --- a/src/dbzero/object_model/tags/TagIndex.hpp +++ b/src/dbzero/object_model/tags/TagIndex.hpp @@ -112,6 +112,10 @@ DB0_PACKED_END // add a defunct object (failed on __init__) void addDefunct(ObjectPtr memo_ptr) const; + + // Apply user-tags that were deferred because the object was in __init__ (pre-postInit) + // when db0.tags(self).add() was first called. Must be called after postInit(). + void applyDeferredTags(ObjectPtr memo_ptr); void clear(); @@ -153,10 +157,17 @@ DB0_PACKED_END // and to handle callbacks from the full-text index // NOTE: cache must hold "shared external" references to the objects mutable std::unordered_map m_object_cache; - // A cache for incomplete objects (not yet fully initialized) + // A cache for incomplete objects (not yet fully initialized) mutable std::unordered_map m_active_cache; // Additional buffer to preserve / release ownership for active-cache objects mutable std::unordered_set m_active_pre_cache; + // User tags added via db0.tags(self).add() while self is still in __init__ (before + // postInit assigns a dbzero address). Stored as raw Python objects so they can be + // re-submitted via addTags() once postInit has completed. + struct DeferredUserTagsEntry { std::vector tags; }; + mutable std::unordered_map m_deferred_user_tags; + // Extended Python references keeping pre-init objects alive until activateDeferredTags() + mutable std::unordered_map m_deferred_pre_cache; db0::weak_swine_ptr m_fixture; // the associated fixture UUID (for validation purposes) const std::uint64_t m_fixture_uuid;