diff --git a/fs_attachment_s3_migration/README.rst b/fs_attachment_s3_migration/README.rst new file mode 100644 index 0000000000..02b45f4c15 --- /dev/null +++ b/fs_attachment_s3_migration/README.rst @@ -0,0 +1,94 @@ +======================= +Attachment S3 Migration +======================= + +.. + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + !! This file is generated by oca-gen-addon-readme !! + !! changes will be overwritten. !! + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + !! source digest: sha256:d960aff14147c786815a9ef2f0b78a6d07809a632d627986d30435e07b4c60da + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + +.. |badge1| image:: https://img.shields.io/badge/maturity-Beta-yellow.png + :target: https://odoo-community.org/page/development-status + :alt: Beta +.. |badge2| image:: https://img.shields.io/badge/licence-AGPL--3-blue.png + :target: http://www.gnu.org/licenses/agpl-3.0-standalone.html + :alt: License: AGPL-3 +.. |badge3| image:: https://img.shields.io/badge/github-OCA%2Fstorage-lightgray.png?logo=github + :target: https://github.com/OCA/storage/tree/16.0/fs_attachment_s3_migration + :alt: OCA/storage +.. |badge4| image:: https://img.shields.io/badge/weblate-Translate%20me-F47D42.png + :target: https://translation.odoo-community.org/projects/storage-16-0/storage-16-0-fs_attachment_s3_migration + :alt: Translate me on Weblate +.. |badge5| image:: https://img.shields.io/badge/runboat-Try%20me-875A7B.png + :target: https://runboat.odoo-community.org/builds?repo=OCA/storage&target_branch=16.0 + :alt: Try me on Runboat + +|badge1| |badge2| |badge3| |badge4| |badge5| + +This module lets users move existing ``ir.attachment`` files from the +standard filestore or database into an Amazon S3-backed ``fs.storage``, +using a wizard directly on the storage form. + +Migrations are run in background batches, skipping attachments that are +already stored in S3 or must remain in PostgreSQL. This allows to run +the process repeatedly avoiding creating duplicates. + +**Table of contents** + +.. contents:: + :local: + +Usage +===== + +1. Open the target S3 ``fs.storage`` record and click *Move existing + attachments to S3* in the header. +2. In the migration wizard, keep or adjust the storage code, batch size, + queue channel, and optional *Max Batches* value, then confirm to + enqueue jobs. + +Bug Tracker +=========== + +Bugs are tracked on `GitHub Issues `_. +In case of trouble, please check there if your issue has already been reported. +If you spotted it first, help us to smash it by providing a detailed and welcomed +`feedback `_. + +Do not contact contributors directly about support or help with technical issues. + +Credits +======= + +Authors +------- + +* Cetmix + +Contributors +------------ + +Cetmix (cetmix.com) + +- Ivan Sokolov +- George Smirnov + +Maintainers +----------- + +This module is maintained by the OCA. + +.. image:: https://odoo-community.org/logo.png + :alt: Odoo Community Association + :target: https://odoo-community.org + +OCA, or the Odoo Community Association, is a nonprofit organization whose +mission is to support the collaborative development of Odoo features and +promote its widespread use. + +This module is part of the `OCA/storage `_ project on GitHub. + +You are welcome to contribute. To learn how please visit https://odoo-community.org/page/Contribute. diff --git a/fs_attachment_s3_migration/__init__.py b/fs_attachment_s3_migration/__init__.py new file mode 100644 index 0000000000..9b4296142f --- /dev/null +++ b/fs_attachment_s3_migration/__init__.py @@ -0,0 +1,2 @@ +from . import models +from . import wizard diff --git a/fs_attachment_s3_migration/__manifest__.py b/fs_attachment_s3_migration/__manifest__.py new file mode 100644 index 0000000000..273b20fe23 --- /dev/null +++ b/fs_attachment_s3_migration/__manifest__.py @@ -0,0 +1,21 @@ +# Copyright 2025 Cetmix OU +# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl). +{ + "name": "Attachment S3 Migration", + "summary": """Async migration of attachments from local datastore to S3""", + "version": "16.0.1.0.0", + "license": "AGPL-3", + "author": "Odoo Community Association (OCA), Cetmix", + "website": "https://github.com/OCA/storage", + "depends": [ + "queue_job", + "fs_attachment_s3", + ], + "data": [ + "security/ir.model.access.csv", + "data/queue_job_channel_data.xml", + "views/fs_storage_view.xml", + "views/migration_wizard_views.xml", + ], + "installable": True, +} diff --git a/fs_attachment_s3_migration/data/queue_job_channel_data.xml b/fs_attachment_s3_migration/data/queue_job_channel_data.xml new file mode 100644 index 0000000000..634483371a --- /dev/null +++ b/fs_attachment_s3_migration/data/queue_job_channel_data.xml @@ -0,0 +1,19 @@ + + + + + s3_migration + + + diff --git a/fs_attachment_s3_migration/models/__init__.py b/fs_attachment_s3_migration/models/__init__.py new file mode 100644 index 0000000000..fe02a6365b --- /dev/null +++ b/fs_attachment_s3_migration/models/__init__.py @@ -0,0 +1,5 @@ +# Copyright 2025 Cetmix OU +# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl). + +from . import ir_attachment +from . import fs_storage diff --git a/fs_attachment_s3_migration/models/fs_storage.py b/fs_attachment_s3_migration/models/fs_storage.py new file mode 100644 index 0000000000..ace7c8d563 --- /dev/null +++ b/fs_attachment_s3_migration/models/fs_storage.py @@ -0,0 +1,38 @@ +# Copyright 2025 Cetmix OU +# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl). + +from odoo import _, fields, models +from odoo.exceptions import UserError + + +class FsStorage(models.Model): + _inherit = "fs.storage" + + migration_batch_size = fields.Integer( + default=500, + help="Number of attachments per background job batch.", + ) + + migration_channel = fields.Char( + string="Queue Channel", + default="root.s3_migration", + help="queue_job channel to use for migration jobs.", + ) + + def action_open_migration_wizard(self): + """Open the S3 migration wizard for this storage.""" + self.ensure_one() + if not self.code: + raise UserError(_("Storage must have a code to run migration.")) + return { + "type": "ir.actions.act_window", + "res_model": "s3.migration.wizard", + "view_mode": "form", + "target": "new", + "context": { + "default_storage_id": self.id, + "default_storage_code": self.code, + "default_batch_size": self.migration_batch_size, + "default_channel": self.migration_channel, + }, + } diff --git a/fs_attachment_s3_migration/models/ir_attachment.py b/fs_attachment_s3_migration/models/ir_attachment.py new file mode 100644 index 0000000000..5a1c5c41ac --- /dev/null +++ b/fs_attachment_s3_migration/models/ir_attachment.py @@ -0,0 +1,253 @@ +# Copyright 2025 Cetmix OU +# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl). + +import logging + +from odoo import api, models +from odoo.osv.expression import AND, OR + +_logger = logging.getLogger(__name__) + +# Required tautology for ir.attachment searches to include field-linked attachments. +# Odoo's ir.attachment._search automatically adds ('res_field', '=', False) when the +# domain doesn't contain 'res_field', which would exclude all field-linked attachments. +# See: odoo/addons/base/models/ir_attachment.py _search method +RES_FIELD_DOMAIN = ["|", ("res_field", "=", False), ("res_field", "!=", False)] + + +class IrAttachment(models.Model): + _inherit = "ir.attachment" + + @api.model + def _s3_migration_domain(self, storage_code): + """Build domain for attachments to migrate, excluding force-DB files. + + Respects force_db_for_default_attachment_rules to keep assets and + small images in database for performance. + """ + base = [ + ("store_fname", "not like", f"{storage_code}://%"), + ] + RES_FIELD_DOMAIN + + force_db_domain = self._s3_get_force_db_domain(storage_code) + if force_db_domain: + return AND([base, ["!"] + force_db_domain]) + return base + + @api.model + def _s3_get_force_db_domain(self, storage_code): + """Get domain for attachments that must stay in DB for target storage. + + Returns an Odoo domain combining MIME-type prefixes and optional + file_size limits. Each rule is normalized with AND() before OR'ing + with the accumulated domain to ensure correct grouping. + """ + fs_storage = self.env["fs.storage"] + force_db_rules = fs_storage.get_force_db_for_default_attachment_rules( + storage_code + ) + if not force_db_rules: + return [] + + domain = None + for mimetype_key, size_limit in force_db_rules.items(): + rule_domain = [("mimetype", "=like", f"{mimetype_key}%")] + if size_limit: + rule_domain = AND([rule_domain, [("file_size", "<=", size_limit)]]) + domain = OR([domain, rule_domain]) if domain else rule_domain + + return domain or [] + + # ---------------------------------------------- + # Migration helpers + # ---------------------------------------------- + def _s3_resolve_migration_bytes(self, attachment, target_storage_code): + """Return non-empty base64 bytes for an attachment or None. + + Prefer a donor already migrated to the target storage, then a donor + stored in DB. A donor is any attachment with the same checksum whose + ``datas`` can be read non-empty at this moment. + """ + checksum = attachment.checksum + if not checksum: + return None + + # File is already on target storage (reads from S3) + domain_target = AND( + [ + [ + ("checksum", "=", checksum), + ("store_fname", "=like", f"{target_storage_code}://%"), + ], + RES_FIELD_DOMAIN, + ] + ) + donor = self.search(domain_target, limit=1) + if donor: + donor_data = donor.with_context(prefetch_fields=False).datas + if donor_data: + return donor_data + + # File is kept in DB (fast and independent from filestore) + domain_db = AND( + [[("checksum", "=", checksum), ("db_datas", "!=", False)], RES_FIELD_DOMAIN] + ) + donor = self.search(domain_db, limit=1) + if donor: + donor_data = donor.with_context(prefetch_fields=False).datas + return donor_data + + # 3) Fallback: any readable same-checksum record (avoid mass prefetch) + domain_any = AND([[("checksum", "=", checksum)], RES_FIELD_DOMAIN]) + candidates = self.with_context(prefetch_fields=False).search( + domain_any, limit=10 + ) + for candidate in candidates: + donor_data = candidate.with_context(prefetch_fields=False).datas + if donor_data: + return donor_data + return None + + @api.model + def s3_enqueue_migration( + self, + storage_code, + batch_size=500, + max_batches=None, + channel="root.s3_migration", + max_retries=None, + ): + """Enqueue migration jobs for attachments using cursor pagination. + + Returns number of attachments enqueued for migration. + """ + domain = self._s3_migration_domain(storage_code) + total_enqueued = 0 + batches = 0 + last_id = 0 + + fs_storage = self.env["fs.storage"] + force_db_config = fs_storage.get_force_db_for_default_attachment_rules( + storage_code + ) + if force_db_config: + _logger.info( + "Migration will exclude force-DB files per storage rules: %s", + force_db_config, + ) + + _logger.info( + "Starting migration enqueue for storage %s (batch_size=%d, max_batches=%s)", + storage_code, + batch_size, + max_batches or "unlimited", + ) + + while True: + cursor_domain = domain + [("id", ">", last_id)] + ids = ( + self.with_context(prefetch_fields=False) + .search(cursor_domain, limit=batch_size, order="id ASC") + .ids + ) + if not ids: + break + + rs = self.browse(ids) + rs.with_delay(channel=channel, max_retries=max_retries).s3_migrate_batch( + storage_code + ) + + total_enqueued += len(ids) + batches += 1 + last_id = ids[-1] # Move cursor to last processed ID + + # Progress logging every 10 batches + if batches % 10 == 0: + _logger.info( + "Migration enqueue progress: %d attachments in %d batches", + total_enqueued, + batches, + ) + + if max_batches and batches >= max_batches: + break + + _logger.info( + "Completed migration enqueue: %d attachments in %d batches for storage %s", + total_enqueued, + batches, + storage_code, + ) + return total_enqueued + + def s3_migrate_batch(self, storage_code): + """Migrate a batch of attachments to target storage.""" + rs = self.with_context(prefetch_fields=False) + env = self.env + total = len(rs) + processed = 0 + skipped = 0 + + _logger.info( + "Starting batch migration: %d attachments to storage %s", + total, + storage_code, + ) + + for attachment in rs: + env.clear() + + try: + file_data = attachment.datas + mimetype = attachment.mimetype + name = attachment.name + except OSError as e: + # File missing (deleted by parallel worker) or already migrated + _logger.debug( + "Skipping attachment %s (id=%s): %s", + attachment.name, + attachment.id, + str(e), + ) + skipped += 1 + processed += 1 + continue + + # Avoid empty writes if source is temporarily unreadable + if attachment.file_size and not file_data: + resolved = self._s3_resolve_migration_bytes(attachment, storage_code) + if not resolved: + _logger.warning( + "Skipping migration for id=%s (checksum=%s): " + "source bytes missing", + attachment.id, + attachment.checksum, + ) + skipped += 1 + processed += 1 + continue + file_data = resolved + + att = attachment.with_context(storage_location=storage_code) + att.write({"datas": file_data, "mimetype": mimetype, "name": name}) + + processed += 1 + if processed % 50 == 0 or processed % max(1, total // 10) == 0: + _logger.info( + "Migration batch progress: %d/%d (%.1f%%) - storage: %s", + processed, + total, + (processed / total) * 100, + storage_code, + ) + + _logger.info( + "Completed batch migration: %d/%d attachments to storage %s (%d skipped). " + "Old files will be cleaned by the garbage collector.", + processed - skipped, + total, + storage_code, + skipped, + ) + return True diff --git a/fs_attachment_s3_migration/readme/CONTRIBUTORS.md b/fs_attachment_s3_migration/readme/CONTRIBUTORS.md new file mode 100644 index 0000000000..a81661ec9b --- /dev/null +++ b/fs_attachment_s3_migration/readme/CONTRIBUTORS.md @@ -0,0 +1,5 @@ +Cetmix (cetmix.com) + +* Ivan Sokolov +* George Smirnov + diff --git a/fs_attachment_s3_migration/readme/DESCRIPTION.md b/fs_attachment_s3_migration/readme/DESCRIPTION.md new file mode 100644 index 0000000000..2b21de4187 --- /dev/null +++ b/fs_attachment_s3_migration/readme/DESCRIPTION.md @@ -0,0 +1,8 @@ +This module lets users move existing `ir.attachment` files from the +standard filestore or database into an Amazon S3-backed `fs.storage`, using a +wizard directly on the storage form. + +Migrations are run in background batches, skipping attachments that are already +stored in S3 or must remain in PostgreSQL. This allows to run the process +repeatedly avoiding creating duplicates. + diff --git a/fs_attachment_s3_migration/readme/USAGE.md b/fs_attachment_s3_migration/readme/USAGE.md new file mode 100644 index 0000000000..4b7a8d2a28 --- /dev/null +++ b/fs_attachment_s3_migration/readme/USAGE.md @@ -0,0 +1,5 @@ +1. Open the target S3 `fs.storage` record and click *Move existing attachments + to S3* in the header. +2. In the migration wizard, keep or adjust the storage code, batch size, queue + channel, and optional *Max Batches* value, then confirm to enqueue jobs. + diff --git a/fs_attachment_s3_migration/security/ir.model.access.csv b/fs_attachment_s3_migration/security/ir.model.access.csv new file mode 100644 index 0000000000..a0e9278c07 --- /dev/null +++ b/fs_attachment_s3_migration/security/ir.model.access.csv @@ -0,0 +1,2 @@ +id,name,model_id:id,group_id:id,perm_read,perm_write,perm_create,perm_unlink +access_s3_migration_wizard,access.s3.migration.wizard,model_s3_migration_wizard,base.group_system,1,1,1,1 diff --git a/fs_attachment_s3_migration/static/description/index.html b/fs_attachment_s3_migration/static/description/index.html new file mode 100644 index 0000000000..0864df9cdd --- /dev/null +++ b/fs_attachment_s3_migration/static/description/index.html @@ -0,0 +1,441 @@ + + + + + +Attachment S3 Migration + + + +
+

Attachment S3 Migration

+ + +

Beta License: AGPL-3 OCA/storage Translate me on Weblate Try me on Runboat

+

This module lets users move existing ir.attachment files from the +standard filestore or database into an Amazon S3-backed fs.storage, +using a wizard directly on the storage form.

+

Migrations are run in background batches, skipping attachments that are +already stored in S3 or must remain in PostgreSQL. This allows to run +the process repeatedly avoiding creating duplicates.

+

Table of contents

+ +
+

Usage

+
    +
  1. Open the target S3 fs.storage record and click Move existing +attachments to S3 in the header.
  2. +
  3. In the migration wizard, keep or adjust the storage code, batch size, +queue channel, and optional Max Batches value, then confirm to +enqueue jobs.
  4. +
+
+
+

Bug Tracker

+

Bugs are tracked on GitHub Issues. +In case of trouble, please check there if your issue has already been reported. +If you spotted it first, help us to smash it by providing a detailed and welcomed +feedback.

+

Do not contact contributors directly about support or help with technical issues.

+
+
+

Credits

+
+

Authors

+
    +
  • Cetmix
  • +
+
+
+

Contributors

+

Cetmix (cetmix.com)

+
    +
  • Ivan Sokolov
  • +
  • George Smirnov
  • +
+
+
+

Maintainers

+

This module is maintained by the OCA.

+ +Odoo Community Association + +

OCA, or the Odoo Community Association, is a nonprofit organization whose +mission is to support the collaborative development of Odoo features and +promote its widespread use.

+

This module is part of the OCA/storage project on GitHub.

+

You are welcome to contribute. To learn how please visit https://odoo-community.org/page/Contribute.

+
+
+
+ + diff --git a/fs_attachment_s3_migration/tests/__init__.py b/fs_attachment_s3_migration/tests/__init__.py new file mode 100644 index 0000000000..d4b1afa8b2 --- /dev/null +++ b/fs_attachment_s3_migration/tests/__init__.py @@ -0,0 +1,4 @@ +# Copyright 2025 Cetmix OU +# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl). + +from . import test_attachment_migration diff --git a/fs_attachment_s3_migration/tests/test_attachment_migration.py b/fs_attachment_s3_migration/tests/test_attachment_migration.py new file mode 100644 index 0000000000..5c0ff8dbdf --- /dev/null +++ b/fs_attachment_s3_migration/tests/test_attachment_migration.py @@ -0,0 +1,93 @@ +# Copyright 2025 Cetmix OU +# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl). +"""Tests for S3 attachment migration functionality.""" + +from unittest.mock import MagicMock, patch + +from odoo.tests.common import TransactionCase + + +class TestAttachmentS3Migration(TransactionCase): + """Test suite for fs_attachment_s3_migration module.""" + + @classmethod + def setUpClass(cls): + super().setUpClass() + cls.Attachment = cls.env["ir.attachment"] + + def test_migration_domain_excludes_migrated_files(self): + """Test that migration domain correctly excludes already-migrated files.""" + domain = self.Attachment._s3_migration_domain("test_s3") + + # Should be a list with exclusion pattern + self.assertIsInstance(domain, list) + # Should contain the storage code pattern + domain_str = str(domain) + self.assertIn("test_s3", domain_str) + self.assertIn("store_fname", domain_str) + + def test_enqueue_migration_returns_count(self): + """Test that enqueue_migration returns correct attachment count.""" + # Create test attachments (stored in DB, no file operations) + self.Attachment.create( + [ + { + "name": f"test{i}.txt", + "raw": b"test content", + } + for i in range(5) + ] + ) + + # Mock DelayableRecordset to prevent actual queue_job creation + with patch("odoo.addons.queue_job.delay.DelayableRecordset") as mock_delayable: + mock_instance = MagicMock() + mock_delayable.return_value = mock_instance + + total = self.Attachment.s3_enqueue_migration( + "test_s3", + batch_size=10, + ) + + # Should return count >= 5 (our attachments + any existing ones) + self.assertGreaterEqual(total, 5) + + def test_enqueue_migration_respects_max_batches(self): + """Test that max_batches parameter limits the number of batches.""" + # Create 30 attachments + self.Attachment.create( + [ + { + "name": f"batch_test{i}.txt", + "raw": b"content", + } + for i in range(30) + ] + ) + + with patch("odoo.addons.queue_job.delay.DelayableRecordset") as mock_delayable: + mock_delayable.return_value = MagicMock() + + # Limit to 2 batches of 10 + total = self.Attachment.s3_enqueue_migration( + "test_s3", + batch_size=10, + max_batches=2, + ) + + # Should stop at 20 (2 batches × 10) + self.assertEqual(total, 20) + + def test_migrate_batch_handles_empty_recordset(self): + """Test that empty recordset doesn't crash.""" + empty = self.Attachment.browse([]) + + # Should return True without error + result = empty.s3_migrate_batch("test_s3") + self.assertTrue(result) + + def test_migrate_batch_method_exists(self): + """Test that s3_migrate_batch method is callable.""" + # Just verify the method exists and is callable + self.assertTrue(hasattr(self.Attachment, "s3_migrate_batch")) + self.assertTrue(callable(self.Attachment.s3_migrate_batch)) diff --git a/fs_attachment_s3_migration/views/fs_storage_view.xml b/fs_attachment_s3_migration/views/fs_storage_view.xml new file mode 100644 index 0000000000..d47ca3bb0f --- /dev/null +++ b/fs_attachment_s3_migration/views/fs_storage_view.xml @@ -0,0 +1,27 @@ + + + + + fs.storage.form.inherit.s3.migration + fs.storage + + +
+
+ + + + + + +
+
+ +
diff --git a/fs_attachment_s3_migration/views/migration_wizard_views.xml b/fs_attachment_s3_migration/views/migration_wizard_views.xml new file mode 100644 index 0000000000..b6a18d9db3 --- /dev/null +++ b/fs_attachment_s3_migration/views/migration_wizard_views.xml @@ -0,0 +1,27 @@ + + + + s3.migration.wizard.form + s3.migration.wizard + +
+ + + + + + + + +
+
+
+
diff --git a/fs_attachment_s3_migration/wizard/__init__.py b/fs_attachment_s3_migration/wizard/__init__.py new file mode 100644 index 0000000000..0395a5a24e --- /dev/null +++ b/fs_attachment_s3_migration/wizard/__init__.py @@ -0,0 +1,4 @@ +# Copyright 2025 Cetmix OU +# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl). + +from . import migration_wizard diff --git a/fs_attachment_s3_migration/wizard/migration_wizard.py b/fs_attachment_s3_migration/wizard/migration_wizard.py new file mode 100644 index 0000000000..94bcedfa93 --- /dev/null +++ b/fs_attachment_s3_migration/wizard/migration_wizard.py @@ -0,0 +1,94 @@ +# Copyright 2025 Cetmix OU +# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl). + +from odoo import _, api, fields, models +from odoo.exceptions import UserError + +MIGRATION_CHANNEL_XMLID = "fs_attachment_s3_migration.queue_channel_s3_migration" + + +class S3MigrationWizard(models.TransientModel): + _name = "s3.migration.wizard" + _description = "Migrate existing attachments to S3" + + storage_id = fields.Many2one( + "fs.storage", + string="Target Storage", + required=True, + ) + storage_code = fields.Char( + compute="_compute_storage_code", + store=True, + readonly=False, + required=True, + ) + batch_size = fields.Integer( + compute="_compute_batch_size", + store=True, + readonly=False, + ) + channel = fields.Char( + string="Queue Channel", + compute="_compute_channel", + store=True, + readonly=False, + ) + max_batches = fields.Integer( + string="Max Batches (per click)", + help="Limit number of batches to enqueue now. Leave 0 for unlimited.", + default=0, + ) + + @api.model + def _get_default_channel(self): + """Get default channel name from XML record.""" + channel = self.env.ref(MIGRATION_CHANNEL_XMLID, raise_if_not_found=False) + return channel.complete_name if channel else "root.s3_migration" + + @api.depends("storage_id") + def _compute_storage_code(self): + for wizard in self: + wizard.storage_code = wizard.storage_id.code if wizard.storage_id else False + + @api.depends("storage_id") + def _compute_batch_size(self): + for wizard in self: + if wizard.storage_id and wizard.storage_id.migration_batch_size: + wizard.batch_size = wizard.storage_id.migration_batch_size + elif not wizard.batch_size: + wizard.batch_size = 500 + + @api.depends("storage_id") + def _compute_channel(self): + for wizard in self: + if wizard.storage_id and wizard.storage_id.migration_channel: + wizard.channel = wizard.storage_id.migration_channel + elif not wizard.channel: + wizard.channel = wizard._get_default_channel() + + def action_confirm(self): + self.ensure_one() + if not self.storage_code: + raise UserError(_("Storage code is required.")) + max_batches = self.max_batches or None + total = self.env["ir.attachment"].s3_enqueue_migration( + self.storage_code, + batch_size=self.batch_size, + max_batches=max_batches, + channel=self.channel, + ) + return [ + {"type": "ir.actions.act_window_close"}, + { + "type": "ir.actions.client", + "tag": "display_notification", + "params": { + "title": _("Migration Enqueued"), + "message": _( + "%(count)s attachments enqueued for migration.", + count=total, + ), + "sticky": False, + }, + }, + ] diff --git a/setup/fs_attachment_s3_migration/odoo/addons/fs_attachment_s3_migration b/setup/fs_attachment_s3_migration/odoo/addons/fs_attachment_s3_migration new file mode 120000 index 0000000000..2071cfe7b5 --- /dev/null +++ b/setup/fs_attachment_s3_migration/odoo/addons/fs_attachment_s3_migration @@ -0,0 +1 @@ +../../../../fs_attachment_s3_migration \ No newline at end of file diff --git a/setup/fs_attachment_s3_migration/setup.py b/setup/fs_attachment_s3_migration/setup.py new file mode 100644 index 0000000000..28c57bb640 --- /dev/null +++ b/setup/fs_attachment_s3_migration/setup.py @@ -0,0 +1,6 @@ +import setuptools + +setuptools.setup( + setup_requires=['setuptools-odoo'], + odoo_addon=True, +)