diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index fc4242cbc3baf..15b84cc28513f 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -229,7 +229,7 @@ jobs: cache: false - name: Cache - uses: actions/cache@v4 + uses: actions/cache@v5 with: path: | ~/go/pkg/mod diff --git a/.github/workflows/build_publish_docker_image.yml b/.github/workflows/build_publish_docker_image.yml index 082281e35339a..261e9de488bb0 100644 --- a/.github/workflows/build_publish_docker_image.yml +++ b/.github/workflows/build_publish_docker_image.yml @@ -129,7 +129,7 @@ jobs: - name: Load Go Build Cache for Docker id: go-cache - uses: actions/cache@v4 + uses: actions/cache@v5 with: key: ${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }} restore-keys: | @@ -183,7 +183,7 @@ jobs: touch "/tmp/digests/${digest#sha256:}" - name: Upload Image Digest - uses: actions/upload-artifact@v5 + uses: actions/upload-artifact@v6 with: name: digests-${{ env.PLATFORM }} path: /tmp/digests/* @@ -198,7 +198,7 @@ jobs: steps: - name: Download Image Digests - uses: actions/download-artifact@v6 + uses: actions/download-artifact@v7 with: path: /tmp/digests pattern: digests-* diff --git a/.golangci.yml b/.golangci.yml index ae9bbb24cccba..5f33d618329d5 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -17,6 +17,14 @@ linters: #- prealloc # TODO - revive - unconvert + exclusions: + rules: + - linters: + - revive + text: 'var-naming: avoid meaningless package names' + - linters: + - revive + text: 'var-naming: avoid package names that conflict with Go standard library package names' # Configure checks. Mostly using defaults but with some commented exceptions. settings: govet: @@ -136,6 +144,7 @@ linters: - name: var-naming disabled: false + formatters: enable: - goimports diff --git a/.tool-versions b/.tool-versions new file mode 100644 index 0000000000000..805bef4a12d6c --- /dev/null +++ b/.tool-versions @@ -0,0 +1,2 @@ +golang 1.23.0 +global golang 1.23.0 diff --git a/README.md b/README.md index bb133237aee6d..f1108bf962e4d 100644 --- a/README.md +++ b/README.md @@ -31,21 +31,25 @@ directories to and from different cloud storage providers. - Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss) - Amazon S3 [:page_facing_up:](https://rclone.org/s3/) - ArvanCloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos) +- Bizfly Cloud Simple Storage [:page_facing_up:](https://rclone.org/s3/#bizflycloud) - Backblaze B2 [:page_facing_up:](https://rclone.org/b2/) - Box [:page_facing_up:](https://rclone.org/box/) - Ceph [:page_facing_up:](https://rclone.org/s3/#ceph) - China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos) -- Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2) - Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/) +- Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2) +- Cloudinary [:page_facing_up:](https://rclone.org/cloudinary/) - Cubbit DS3 [:page_facing_up:](https://rclone.org/s3/#Cubbit) - DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces) - Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage) - Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost) +- Drime [:page_facing_up:](https://rclone.org/s3/#drime) - Dropbox [:page_facing_up:](https://rclone.org/dropbox/) - Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/) - Exaba [:page_facing_up:](https://rclone.org/s3/#exaba) - Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files) - FileLu [:page_facing_up:](https://rclone.org/filelu/) +- Filen [:page_facing_up:](https://rclone.org/filen/) - Files.com [:page_facing_up:](https://rclone.org/filescom/) - FlashBlade [:page_facing_up:](https://rclone.org/s3/#pure-storage-flashblade) - FTP [:page_facing_up:](https://rclone.org/ftp/) @@ -113,6 +117,7 @@ directories to and from different cloud storage providers. - Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel) - Servercore Object Storage [:page_facing_up:](https://rclone.org/s3/#servercore) - SFTP [:page_facing_up:](https://rclone.org/sftp/) +- Shade [:page_facing_up:](https://rclone.org/shade/) - SMB / CIFS [:page_facing_up:](https://rclone.org/smb/) - Spectra Logic [:page_facing_up:](https://rclone.org/s3/#spectralogic) - StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath) diff --git a/backend/all/all.go b/backend/all/all.go index 8d3272497c4cb..a6c1d0f9a73a0 100644 --- a/backend/all/all.go +++ b/backend/all/all.go @@ -16,11 +16,13 @@ import ( _ "github.com/rclone/rclone/backend/compress" _ "github.com/rclone/rclone/backend/crypt" _ "github.com/rclone/rclone/backend/doi" + _ "github.com/rclone/rclone/backend/drime" _ "github.com/rclone/rclone/backend/drive" _ "github.com/rclone/rclone/backend/dropbox" _ "github.com/rclone/rclone/backend/fichier" _ "github.com/rclone/rclone/backend/filefabric" _ "github.com/rclone/rclone/backend/filelu" + _ "github.com/rclone/rclone/backend/filen" _ "github.com/rclone/rclone/backend/filescom" _ "github.com/rclone/rclone/backend/ftp" _ "github.com/rclone/rclone/backend/gofile" @@ -56,6 +58,7 @@ import ( _ "github.com/rclone/rclone/backend/s3" _ "github.com/rclone/rclone/backend/seafile" _ "github.com/rclone/rclone/backend/sftp" + _ "github.com/rclone/rclone/backend/shade" _ "github.com/rclone/rclone/backend/sharefile" _ "github.com/rclone/rclone/backend/sia" _ "github.com/rclone/rclone/backend/smb" @@ -64,7 +67,6 @@ import ( _ "github.com/rclone/rclone/backend/swift" _ "github.com/rclone/rclone/backend/ulozto" _ "github.com/rclone/rclone/backend/union" - _ "github.com/rclone/rclone/backend/uptobox" _ "github.com/rclone/rclone/backend/webdav" _ "github.com/rclone/rclone/backend/yandex" _ "github.com/rclone/rclone/backend/zoho" diff --git a/backend/azureblob/azureblob.go b/backend/azureblob/azureblob.go index 28df222061325..6b72fc11b97b3 100644 --- a/backend/azureblob/azureblob.go +++ b/backend/azureblob/azureblob.go @@ -86,12 +86,56 @@ var ( metadataMu sync.Mutex ) +// system metadata keys which this backend owns +var systemMetadataInfo = map[string]fs.MetadataHelp{ + "cache-control": { + Help: "Cache-Control header", + Type: "string", + Example: "no-cache", + }, + "content-disposition": { + Help: "Content-Disposition header", + Type: "string", + Example: "inline", + }, + "content-encoding": { + Help: "Content-Encoding header", + Type: "string", + Example: "gzip", + }, + "content-language": { + Help: "Content-Language header", + Type: "string", + Example: "en-US", + }, + "content-type": { + Help: "Content-Type header", + Type: "string", + Example: "text/plain", + }, + "tier": { + Help: "Tier of the object", + Type: "string", + Example: "Hot", + ReadOnly: true, + }, + "mtime": { + Help: "Time of last modification, read from rclone metadata", + Type: "RFC 3339", + Example: "2006-01-02T15:04:05.999999999Z07:00", + }, +} + // Register with Fs func init() { fs.Register(&fs.RegInfo{ Name: "azureblob", Description: "Microsoft Azure Blob Storage", NewFs: NewFs, + MetadataInfo: &fs.MetadataInfo{ + System: systemMetadataInfo, + Help: `User metadata is stored as x-ms-meta- keys. Azure metadata keys are case insensitive and are always returned in lower case.`, + }, Options: []fs.Option{{ Name: "account", Help: `Azure Storage Account Name. @@ -810,6 +854,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e f.features = (&fs.Features{ ReadMimeType: true, WriteMimeType: true, + ReadMetadata: true, + WriteMetadata: true, + UserMetadata: true, BucketBased: true, BucketBasedRootOK: true, SetTier: true, @@ -1157,6 +1204,289 @@ func (o *Object) updateMetadataWithModTime(modTime time.Time) { o.meta[modTimeKey] = modTime.Format(timeFormatOut) } +// parseXMsTags parses the value of the x-ms-tags header into a map. +// It expects comma-separated key=value pairs. Whitespace around keys and +// values is trimmed. Empty pairs and empty keys are rejected. +func parseXMsTags(s string) (map[string]string, error) { + if strings.TrimSpace(s) == "" { + return map[string]string{}, nil + } + out := make(map[string]string) + parts := strings.Split(s, ",") + for _, p := range parts { + p = strings.TrimSpace(p) + if p == "" { + continue + } + kv := strings.SplitN(p, "=", 2) + if len(kv) != 2 { + return nil, fmt.Errorf("invalid tag %q", p) + } + k := strings.TrimSpace(kv[0]) + v := strings.TrimSpace(kv[1]) + if k == "" { + return nil, fmt.Errorf("invalid tag key in %q", p) + } + out[k] = v + } + return out, nil +} + +// mapMetadataToAzure maps a generic metadata map to Azure HTTP headers, +// user metadata, tags and optional modTime override. +// Reserved x-ms-* keys (except x-ms-tags) are ignored for user metadata. +// +// Pass a logger to surface non-fatal parsing issues (e.g. bad mtime). +func mapMetadataToAzure(meta map[string]string, logf func(string, ...any)) (headers blob.HTTPHeaders, userMeta map[string]*string, tags map[string]string, modTime *time.Time, err error) { + if meta == nil { + return headers, nil, nil, nil, nil + } + tmp := make(map[string]string) + for k, v := range meta { + lowerKey := strings.ToLower(k) + switch lowerKey { + case "cache-control": + headers.BlobCacheControl = pString(v) + case "content-disposition": + headers.BlobContentDisposition = pString(v) + case "content-encoding": + headers.BlobContentEncoding = pString(v) + case "content-language": + headers.BlobContentLanguage = pString(v) + case "content-type": + headers.BlobContentType = pString(v) + case "x-ms-tags": + parsed, perr := parseXMsTags(v) + if perr != nil { + return headers, nil, nil, nil, perr + } + // allocate only if there are tags + if len(parsed) > 0 { + tags = parsed + } + case "mtime": + // Accept multiple layouts for tolerance + var parsed time.Time + var pErr error + for _, layout := range []string{time.RFC3339Nano, time.RFC3339, timeFormatOut} { + parsed, pErr = time.Parse(layout, v) + if pErr == nil { + modTime = &parsed + break + } + } + // Log and ignore if unparseable + if modTime == nil && logf != nil { + logf("metadata: couldn't parse mtime %q: %v", v, pErr) + } + case "tier": + // ignore - handled elsewhere + default: + // Filter out other reserved headers so they don't end up as user metadata + if strings.HasPrefix(lowerKey, "x-ms-") { + continue + } + tmp[lowerKey] = v + } + } + userMeta = toAzureMetaPtr(tmp) + return headers, userMeta, tags, modTime, nil +} + +// toAzureMetaPtr converts a map[string]string to map[string]*string as used by Azure SDK +func toAzureMetaPtr(in map[string]string) map[string]*string { + if len(in) == 0 { + return nil + } + out := make(map[string]*string, len(in)) + for k, v := range in { + vv := v + out[k] = &vv + } + return out +} + +// assembleCopyParams prepares headers, metadata and tags for copy operations. +// +// It starts from the source properties, optionally overlays mapped metadata +// from rclone's metadata options, ensures mtime presence when mapping is +// enabled, and returns whether mapping was actually requested (hadMapping). +// assembleCopyParams prepares headers, metadata and tags for copy operations. +// +// If includeBaseMeta is true, start user metadata from the source's metadata +// and overlay mapped values. This matches multipart copy commit behavior. +// If false, only include mapped user metadata (no source baseline) which +// matches previous singlepart StartCopyFromURL semantics. +func assembleCopyParams(ctx context.Context, f *Fs, src fs.Object, srcProps *blob.GetPropertiesResponse, includeBaseMeta bool) (headers blob.HTTPHeaders, meta map[string]*string, tags map[string]string, hadMapping bool, err error) { + // Start from source properties + headers = blob.HTTPHeaders{ + BlobCacheControl: srcProps.CacheControl, + BlobContentDisposition: srcProps.ContentDisposition, + BlobContentEncoding: srcProps.ContentEncoding, + BlobContentLanguage: srcProps.ContentLanguage, + BlobContentMD5: srcProps.ContentMD5, + BlobContentType: srcProps.ContentType, + } + // Optionally deep copy user metadata pointers from source. Normalise keys to + // lower-case to avoid duplicate x-ms-meta headers when we later inject/overlay + // metadata (Azure treats keys case-insensitively but Go's http.Header will + // join duplicate keys into a comma separated list, which breaks shared-key + // signing). + if includeBaseMeta && len(srcProps.Metadata) > 0 { + meta = make(map[string]*string, len(srcProps.Metadata)) + for k, v := range srcProps.Metadata { + if v != nil { + vv := *v + meta[strings.ToLower(k)] = &vv + } + } + } + + // Only consider mapping if metadata pipeline is enabled + if fs.GetConfig(ctx).Metadata { + mapped, mapErr := fs.GetMetadataOptions(ctx, f, src, fs.MetadataAsOpenOptions(ctx)) + if mapErr != nil { + return headers, meta, nil, false, fmt.Errorf("failed to map metadata: %w", mapErr) + } + if mapped != nil { + // Map rclone metadata to Azure shapes + mappedHeaders, userMeta, mappedTags, mappedModTime, herr := mapMetadataToAzure(mapped, func(format string, args ...any) { fs.Debugf(f, format, args...) }) + if herr != nil { + return headers, meta, nil, false, fmt.Errorf("metadata mapping: %w", herr) + } + hadMapping = true + // Overlay headers (only non-nil) + if mappedHeaders.BlobCacheControl != nil { + headers.BlobCacheControl = mappedHeaders.BlobCacheControl + } + if mappedHeaders.BlobContentDisposition != nil { + headers.BlobContentDisposition = mappedHeaders.BlobContentDisposition + } + if mappedHeaders.BlobContentEncoding != nil { + headers.BlobContentEncoding = mappedHeaders.BlobContentEncoding + } + if mappedHeaders.BlobContentLanguage != nil { + headers.BlobContentLanguage = mappedHeaders.BlobContentLanguage + } + if mappedHeaders.BlobContentType != nil { + headers.BlobContentType = mappedHeaders.BlobContentType + } + // Overlay user metadata + if len(userMeta) > 0 { + if meta == nil { + meta = make(map[string]*string, len(userMeta)) + } + for k, v := range userMeta { + meta[k] = v + } + } + // Apply tags if any + if len(mappedTags) > 0 { + tags = mappedTags + } + // Ensure mtime present using mapped or source time + if _, ok := meta[modTimeKey]; !ok { + when := src.ModTime(ctx) + if mappedModTime != nil { + when = *mappedModTime + } + val := when.Format(time.RFC3339Nano) + if meta == nil { + meta = make(map[string]*string, 1) + } + meta[modTimeKey] = &val + } + // Ensure content-type fallback to source if not set by mapper + if headers.BlobContentType == nil { + headers.BlobContentType = srcProps.ContentType + } + } else { + // Mapping enabled but not provided: ensure mtime present based on source ModTime + if _, ok := meta[modTimeKey]; !ok { + when := src.ModTime(ctx) + val := when.Format(time.RFC3339Nano) + if meta == nil { + meta = make(map[string]*string, 1) + } + meta[modTimeKey] = &val + } + } + } + + return headers, meta, tags, hadMapping, nil +} + +// applyMappedMetadata applies mapped metadata and headers to the object state for uploads. +// +// It reads `--metadata`, `--metadata-set`, and `--metadata-mapper` outputs via fs.GetMetadataOptions +// and updates o.meta, o.tags and ui.httpHeaders accordingly. +func (o *Object) applyMappedMetadata(ctx context.Context, src fs.ObjectInfo, ui *uploadInfo, options []fs.OpenOption) (modTime time.Time, err error) { + // Start from the source modtime; may be overridden by metadata + modTime = src.ModTime(ctx) + + // Fetch mapped metadata if --metadata is enabled + meta, err := fs.GetMetadataOptions(ctx, o.fs, src, options) + if err != nil { + return modTime, err + } + if meta == nil { + // No metadata processing requested + return modTime, nil + } + + // Map metadata using common helper + headers, userMeta, tags, mappedModTime, err := mapMetadataToAzure(meta, func(format string, args ...any) { fs.Debugf(o, format, args...) }) + if err != nil { + return modTime, err + } + // Merge headers into ui + if headers.BlobCacheControl != nil { + ui.httpHeaders.BlobCacheControl = headers.BlobCacheControl + } + if headers.BlobContentDisposition != nil { + ui.httpHeaders.BlobContentDisposition = headers.BlobContentDisposition + } + if headers.BlobContentEncoding != nil { + ui.httpHeaders.BlobContentEncoding = headers.BlobContentEncoding + } + if headers.BlobContentLanguage != nil { + ui.httpHeaders.BlobContentLanguage = headers.BlobContentLanguage + } + if headers.BlobContentType != nil { + ui.httpHeaders.BlobContentType = headers.BlobContentType + } + + // Apply user metadata to o.meta with a single critical section + if len(userMeta) > 0 { + metadataMu.Lock() + if o.meta == nil { + o.meta = make(map[string]string, len(userMeta)) + } + for k, v := range userMeta { + if v != nil { + o.meta[k] = *v + } + } + metadataMu.Unlock() + } + + // Apply tags + if len(tags) > 0 { + if o.tags == nil { + o.tags = make(map[string]string, len(tags)) + } + for k, v := range tags { + o.tags[k] = v + } + } + + if mappedModTime != nil { + modTime = *mappedModTime + } + + return modTime, nil +} + // Returns whether file is a directory marker or not func isDirectoryMarker(size int64, metadata map[string]*string, remote string) bool { // Directory markers are 0 length @@ -1951,18 +2281,19 @@ func (f *Fs) copyMultipart(ctx context.Context, remote, dstContainer, dstPath st return nil, err } - // Convert metadata from source object + // Prepare metadata/headers/tags for destination + // For multipart commit, include base metadata from source then overlay mapped + commitHeaders, commitMeta, commitTags, _, err := assembleCopyParams(ctx, f, src, srcProperties, true) + if err != nil { + return nil, fmt.Errorf("multipart copy: %w", err) + } + + // Convert metadata from source or mapper options := blockblob.CommitBlockListOptions{ - Metadata: srcProperties.Metadata, - Tier: parseTier(f.opt.AccessTier), - HTTPHeaders: &blob.HTTPHeaders{ - BlobCacheControl: srcProperties.CacheControl, - BlobContentDisposition: srcProperties.ContentDisposition, - BlobContentEncoding: srcProperties.ContentEncoding, - BlobContentLanguage: srcProperties.ContentLanguage, - BlobContentMD5: srcProperties.ContentMD5, - BlobContentType: srcProperties.ContentType, - }, + Metadata: commitMeta, + Tags: commitTags, + Tier: parseTier(f.opt.AccessTier), + HTTPHeaders: &commitHeaders, } // Finalise the upload session @@ -1993,10 +2324,36 @@ func (f *Fs) copySinglepart(ctx context.Context, remote, dstContainer, dstPath s return nil, fmt.Errorf("single part copy: source auth: %w", err) } - // Start the copy + // Prepare mapped metadata/tags/headers if requested options := blob.StartCopyFromURLOptions{ Tier: parseTier(f.opt.AccessTier), } + var postHeaders *blob.HTTPHeaders + // Read source properties and assemble params; this also handles the case when mapping is disabled + srcProps, err := src.readMetaDataAlways(ctx) + if err != nil { + return nil, fmt.Errorf("single part copy: read source properties: %w", err) + } + // For singlepart copy, do not include base metadata from source in StartCopyFromURL + headers, meta, tags, hadMapping, aerr := assembleCopyParams(ctx, f, src, srcProps, false) + if aerr != nil { + return nil, fmt.Errorf("single part copy: %w", aerr) + } + // Apply tags and post-copy headers only when mapping requested changes + if len(tags) > 0 { + options.BlobTags = make(map[string]string, len(tags)) + for k, v := range tags { + options.BlobTags[k] = v + } + } + if hadMapping { + // Only set metadata explicitly when mapping was requested; otherwise + // let the service copy source metadata (including mtime) automatically. + if len(meta) > 0 { + options.Metadata = meta + } + postHeaders = &headers + } var startCopy blob.StartCopyFromURLResponse err = f.pacer.Call(func() (bool, error) { startCopy, err = dstBlobSVC.StartCopyFromURL(ctx, srcURL, &options) @@ -2026,6 +2383,16 @@ func (f *Fs) copySinglepart(ctx context.Context, remote, dstContainer, dstPath s pollTime = min(2*pollTime, time.Second) } + // If mapper requested header changes, set them post-copy + if postHeaders != nil { + blb := f.getBlobSVC(dstContainer, dstPath) + _, setErr := blb.SetHTTPHeaders(ctx, *postHeaders, nil) + if setErr != nil { + return nil, fmt.Errorf("single part copy: failed to set headers: %w", setErr) + } + } + // Metadata (when requested) is set via StartCopyFromURL options.Metadata + return f.NewObject(ctx, remote) } @@ -2157,6 +2524,35 @@ func (o *Object) getMetadata() (metadata map[string]*string) { return metadata } +// Metadata returns metadata for an object +// +// It returns a combined view of system and user metadata. +func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) { + // Ensure metadata is loaded + if err := o.readMetaData(ctx); err != nil { + return nil, err + } + + m := fs.Metadata{} + + // System metadata we expose + if !o.modTime.IsZero() { + m["mtime"] = o.modTime.Format(time.RFC3339Nano) + } + if o.accessTier != "" { + m["tier"] = string(o.accessTier) + } + + // Merge user metadata (already lower-cased keys) + metadataMu.Lock() + for k, v := range o.meta { + m[k] = v + } + metadataMu.Unlock() + + return m, nil +} + // decodeMetaDataFromPropertiesResponse sets the metadata from the data passed in // // Sets @@ -2995,17 +3391,19 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [ // containerPath = containerPath[:len(containerPath)-1] // } - // Update Mod time - o.updateMetadataWithModTime(src.ModTime(ctx)) - if err != nil { - return ui, err - } - - // Create the HTTP headers for the upload + // Start with default content-type based on source ui.httpHeaders = blob.HTTPHeaders{ BlobContentType: pString(fs.MimeType(ctx, src)), } + // Apply mapped metadata/headers/tags if requested + modTime, err := o.applyMappedMetadata(ctx, src, &ui, options) + if err != nil { + return ui, err + } + // Ensure mtime is set in metadata based on possibly overridden modTime + o.updateMetadataWithModTime(modTime) + // Compute the Content-MD5 of the file. As we stream all uploads it // will be set in PutBlockList API call using the 'x-ms-blob-content-md5' header if !o.fs.opt.DisableCheckSum { diff --git a/backend/azureblob/azureblob_internal_test.go b/backend/azureblob/azureblob_internal_test.go index 67669775dbd78..b2fc0f7d5f614 100644 --- a/backend/azureblob/azureblob_internal_test.go +++ b/backend/azureblob/azureblob_internal_test.go @@ -5,11 +5,16 @@ package azureblob import ( "context" "encoding/base64" + "fmt" + "net/http" "strings" "testing" + "time" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/object" "github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest/fstests" "github.com/rclone/rclone/lib/random" @@ -148,4 +153,417 @@ func (f *Fs) testWriteUncommittedBlocks(t *testing.T) { func (f *Fs) InternalTest(t *testing.T) { t.Run("Features", f.testFeatures) t.Run("WriteUncommittedBlocks", f.testWriteUncommittedBlocks) + t.Run("Metadata", f.testMetadataPaths) +} + +// helper to read blob properties for an object +func getProps(ctx context.Context, t *testing.T, o fs.Object) *blob.GetPropertiesResponse { + ao := o.(*Object) + props, err := ao.readMetaDataAlways(ctx) + require.NoError(t, err) + return props +} + +// helper to assert select headers and user metadata +func assertHeadersAndMetadata(t *testing.T, props *blob.GetPropertiesResponse, want map[string]string, wantUserMeta map[string]string) { + // Headers + get := func(p *string) string { + if p == nil { + return "" + } + return *p + } + if v, ok := want["content-type"]; ok { + assert.Equal(t, v, get(props.ContentType), "content-type") + } + if v, ok := want["cache-control"]; ok { + assert.Equal(t, v, get(props.CacheControl), "cache-control") + } + if v, ok := want["content-disposition"]; ok { + assert.Equal(t, v, get(props.ContentDisposition), "content-disposition") + } + if v, ok := want["content-encoding"]; ok { + assert.Equal(t, v, get(props.ContentEncoding), "content-encoding") + } + if v, ok := want["content-language"]; ok { + assert.Equal(t, v, get(props.ContentLanguage), "content-language") + } + // User metadata (case-insensitive keys from service) + norm := make(map[string]*string, len(props.Metadata)) + for kk, vv := range props.Metadata { + norm[strings.ToLower(kk)] = vv + } + for k, v := range wantUserMeta { + pv, ok := norm[strings.ToLower(k)] + if assert.True(t, ok, fmt.Sprintf("missing user metadata key %q", k)) { + if pv == nil { + assert.Equal(t, v, "", k) + } else { + assert.Equal(t, v, *pv, k) + } + } else { + // Log available keys for diagnostics + keys := make([]string, 0, len(props.Metadata)) + for kk := range props.Metadata { + keys = append(keys, kk) + } + t.Logf("available user metadata keys: %v", keys) + } + } +} + +// helper to read blob tags for an object +func getTagsMap(ctx context.Context, t *testing.T, o fs.Object) map[string]string { + ao := o.(*Object) + blb := ao.getBlobSVC() + resp, err := blb.GetTags(ctx, nil) + require.NoError(t, err) + out := make(map[string]string) + for _, tag := range resp.BlobTagSet { + if tag.Key != nil { + k := *tag.Key + v := "" + if tag.Value != nil { + v = *tag.Value + } + out[k] = v + } + } + return out +} + +// Test metadata across different write paths +func (f *Fs) testMetadataPaths(t *testing.T) { + ctx := context.Background() + if testing.Short() { + t.Skip("skipping in short mode") + } + + // Common expected metadata and headers + baseMeta := fs.Metadata{ + "cache-control": "no-cache", + "content-disposition": "inline", + "content-language": "en-US", + // Note: Don't set content-encoding here to avoid download decoding differences + // We will set a custom user metadata key + "potato": "royal", + // and modtime + "mtime": fstest.Time("2009-05-06T04:05:06.499999999Z").Format(time.RFC3339Nano), + } + + // Singlepart upload + t.Run("PutSinglepart", func(t *testing.T) { + // size less than chunk size + contents := random.String(int(f.opt.ChunkSize / 2)) + item := fstest.NewItem("meta-single.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z")) + // override content-type via metadata mapping + meta := fs.Metadata{} + meta.Merge(baseMeta) + meta["content-type"] = "text/plain" + obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, "text/html", meta) + defer func() { _ = obj.Remove(ctx) }() + + props := getProps(ctx, t, obj) + assertHeadersAndMetadata(t, props, map[string]string{ + "content-type": "text/plain", + "cache-control": "no-cache", + "content-disposition": "inline", + "content-language": "en-US", + }, map[string]string{ + "potato": "royal", + }) + _ = http.StatusOK // keep import for parity but don't inspect RawResponse + }) + + // Multipart upload + t.Run("PutMultipart", func(t *testing.T) { + // size greater than chunk size to force multipart + contents := random.String(int(f.opt.ChunkSize + 1024)) + item := fstest.NewItem("meta-multipart.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z")) + meta := fs.Metadata{} + meta.Merge(baseMeta) + meta["content-type"] = "application/json" + obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, "text/html", meta) + defer func() { _ = obj.Remove(ctx) }() + + props := getProps(ctx, t, obj) + assertHeadersAndMetadata(t, props, map[string]string{ + "content-type": "application/json", + "cache-control": "no-cache", + "content-disposition": "inline", + "content-language": "en-US", + }, map[string]string{ + "potato": "royal", + }) + + // Tags: Singlepart upload + t.Run("PutSinglepartTags", func(t *testing.T) { + contents := random.String(int(f.opt.ChunkSize / 2)) + item := fstest.NewItem("tags-single.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z")) + meta := fs.Metadata{ + "x-ms-tags": "env=dev,team=sync", + } + obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, "text/plain", meta) + defer func() { _ = obj.Remove(ctx) }() + + tags := getTagsMap(ctx, t, obj) + assert.Equal(t, "dev", tags["env"]) + assert.Equal(t, "sync", tags["team"]) + }) + + // Tags: Multipart upload + t.Run("PutMultipartTags", func(t *testing.T) { + contents := random.String(int(f.opt.ChunkSize + 2048)) + item := fstest.NewItem("tags-multipart.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z")) + meta := fs.Metadata{ + "x-ms-tags": "project=alpha,release=2025-08", + } + obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, "application/octet-stream", meta) + defer func() { _ = obj.Remove(ctx) }() + + tags := getTagsMap(ctx, t, obj) + assert.Equal(t, "alpha", tags["project"]) + assert.Equal(t, "2025-08", tags["release"]) + }) + }) + + // Singlepart copy with metadata-set mapping; omit content-type to exercise fallback + t.Run("CopySinglepart", func(t *testing.T) { + // create small source + contents := random.String(int(f.opt.ChunkSize / 2)) + srcItem := fstest.NewItem("meta-copy-single-src.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z")) + srcObj := fstests.PutTestContentsMetadata(ctx, t, f, &srcItem, true, contents, true, "text/plain", nil) + defer func() { _ = srcObj.Remove(ctx) }() + + // set mapping via MetadataSet + ctx2, ci := fs.AddConfig(ctx) + ci.Metadata = true + ci.MetadataSet = fs.Metadata{ + "cache-control": "private, max-age=60", + "content-disposition": "attachment; filename=foo.txt", + "content-language": "fr", + // no content-type: should fallback to source + "potato": "maris", + } + + // do copy + dstName := "meta-copy-single-dst.txt" + dst, err := f.Copy(ctx2, srcObj, dstName) + require.NoError(t, err) + defer func() { _ = dst.Remove(ctx2) }() + + props := getProps(ctx2, t, dst) + // content-type should fallback to source (text/plain) + assertHeadersAndMetadata(t, props, map[string]string{ + "content-type": "text/plain", + "cache-control": "private, max-age=60", + "content-disposition": "attachment; filename=foo.txt", + "content-language": "fr", + }, map[string]string{ + "potato": "maris", + }) + // mtime should be populated on copy when --metadata is used + // and should equal the source ModTime (RFC3339Nano) + // Read user metadata (case-insensitive) + m := props.Metadata + var gotMtime string + for k, v := range m { + if strings.EqualFold(k, "mtime") && v != nil { + gotMtime = *v + break + } + } + if assert.NotEmpty(t, gotMtime, "mtime not set on destination metadata") { + // parse and compare times ignoring formatting differences + parsed, err := time.Parse(time.RFC3339Nano, gotMtime) + require.NoError(t, err) + assert.True(t, srcObj.ModTime(ctx2).Equal(parsed), "dst mtime should equal src ModTime") + } + }) + + // CopySinglepart with only --metadata (no MetadataSet) must inject mtime and preserve src content-type + t.Run("CopySinglepart_MetadataOnly", func(t *testing.T) { + contents := random.String(int(f.opt.ChunkSize / 2)) + srcItem := fstest.NewItem("meta-copy-single-only-src.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z")) + srcObj := fstests.PutTestContentsMetadata(ctx, t, f, &srcItem, true, contents, true, "text/plain", nil) + defer func() { _ = srcObj.Remove(ctx) }() + + ctx2, ci := fs.AddConfig(ctx) + ci.Metadata = true + + dstName := "meta-copy-single-only-dst.txt" + dst, err := f.Copy(ctx2, srcObj, dstName) + require.NoError(t, err) + defer func() { _ = dst.Remove(ctx2) }() + + props := getProps(ctx2, t, dst) + assertHeadersAndMetadata(t, props, map[string]string{ + "content-type": "text/plain", + }, map[string]string{}) + // Assert mtime injected + m := props.Metadata + var gotMtime string + for k, v := range m { + if strings.EqualFold(k, "mtime") && v != nil { + gotMtime = *v + break + } + } + if assert.NotEmpty(t, gotMtime, "mtime not set on destination metadata") { + parsed, err := time.Parse(time.RFC3339Nano, gotMtime) + require.NoError(t, err) + assert.True(t, srcObj.ModTime(ctx2).Equal(parsed), "dst mtime should equal src ModTime") + } + }) + + // Multipart copy with metadata-set mapping; omit content-type to exercise fallback + t.Run("CopyMultipart", func(t *testing.T) { + // create large source to force multipart + contents := random.String(int(f.opt.CopyCutoff + 1024)) + srcItem := fstest.NewItem("meta-copy-multi-src.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z")) + srcObj := fstests.PutTestContentsMetadata(ctx, t, f, &srcItem, true, contents, true, "application/octet-stream", nil) + defer func() { _ = srcObj.Remove(ctx) }() + + // set mapping via MetadataSet + ctx2, ci := fs.AddConfig(ctx) + ci.Metadata = true + ci.MetadataSet = fs.Metadata{ + "cache-control": "max-age=0, no-cache", + // omit content-type to trigger fallback + "content-language": "de", + "potato": "desiree", + } + + dstName := "meta-copy-multi-dst.txt" + dst, err := f.Copy(ctx2, srcObj, dstName) + require.NoError(t, err) + defer func() { _ = dst.Remove(ctx2) }() + + props := getProps(ctx2, t, dst) + // content-type should fallback to source (application/octet-stream) + assertHeadersAndMetadata(t, props, map[string]string{ + "content-type": "application/octet-stream", + "cache-control": "max-age=0, no-cache", + "content-language": "de", + }, map[string]string{ + "potato": "desiree", + }) + // mtime should be populated on copy when --metadata is used + m := props.Metadata + var gotMtime string + for k, v := range m { + if strings.EqualFold(k, "mtime") && v != nil { + gotMtime = *v + break + } + } + if assert.NotEmpty(t, gotMtime, "mtime not set on destination metadata") { + parsed, err := time.Parse(time.RFC3339Nano, gotMtime) + require.NoError(t, err) + assert.True(t, srcObj.ModTime(ctx2).Equal(parsed), "dst mtime should equal src ModTime") + } + }) + + // CopyMultipart with only --metadata must inject mtime and preserve src content-type + t.Run("CopyMultipart_MetadataOnly", func(t *testing.T) { + contents := random.String(int(f.opt.CopyCutoff + 2048)) + srcItem := fstest.NewItem("meta-copy-multi-only-src.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z")) + srcObj := fstests.PutTestContentsMetadata(ctx, t, f, &srcItem, true, contents, true, "application/octet-stream", nil) + defer func() { _ = srcObj.Remove(ctx) }() + + ctx2, ci := fs.AddConfig(ctx) + ci.Metadata = true + + dstName := "meta-copy-multi-only-dst.txt" + dst, err := f.Copy(ctx2, srcObj, dstName) + require.NoError(t, err) + defer func() { _ = dst.Remove(ctx2) }() + + props := getProps(ctx2, t, dst) + assertHeadersAndMetadata(t, props, map[string]string{ + "content-type": "application/octet-stream", + }, map[string]string{}) + m := props.Metadata + var gotMtime string + for k, v := range m { + if strings.EqualFold(k, "mtime") && v != nil { + gotMtime = *v + break + } + } + if assert.NotEmpty(t, gotMtime, "mtime not set on destination metadata") { + parsed, err := time.Parse(time.RFC3339Nano, gotMtime) + require.NoError(t, err) + assert.True(t, srcObj.ModTime(ctx2).Equal(parsed), "dst mtime should equal src ModTime") + } + }) + + // Tags: Singlepart copy + t.Run("CopySinglepartTags", func(t *testing.T) { + // create small source + contents := random.String(int(f.opt.ChunkSize / 2)) + srcItem := fstest.NewItem("tags-copy-single-src.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z")) + srcObj := fstests.PutTestContentsMetadata(ctx, t, f, &srcItem, true, contents, true, "text/plain", nil) + defer func() { _ = srcObj.Remove(ctx) }() + + // set mapping via MetadataSet including tags + ctx2, ci := fs.AddConfig(ctx) + ci.Metadata = true + ci.MetadataSet = fs.Metadata{ + "x-ms-tags": "copy=single,mode=test", + } + + dstName := "tags-copy-single-dst.txt" + dst, err := f.Copy(ctx2, srcObj, dstName) + require.NoError(t, err) + defer func() { _ = dst.Remove(ctx2) }() + + tags := getTagsMap(ctx2, t, dst) + assert.Equal(t, "single", tags["copy"]) + assert.Equal(t, "test", tags["mode"]) + }) + + // Tags: Multipart copy + t.Run("CopyMultipartTags", func(t *testing.T) { + // create large source to force multipart + contents := random.String(int(f.opt.CopyCutoff + 4096)) + srcItem := fstest.NewItem("tags-copy-multi-src.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z")) + srcObj := fstests.PutTestContentsMetadata(ctx, t, f, &srcItem, true, contents, true, "application/octet-stream", nil) + defer func() { _ = srcObj.Remove(ctx) }() + + ctx2, ci := fs.AddConfig(ctx) + ci.Metadata = true + ci.MetadataSet = fs.Metadata{ + "x-ms-tags": "copy=multi,mode=test", + } + + dstName := "tags-copy-multi-dst.txt" + dst, err := f.Copy(ctx2, srcObj, dstName) + require.NoError(t, err) + defer func() { _ = dst.Remove(ctx2) }() + + tags := getTagsMap(ctx2, t, dst) + assert.Equal(t, "multi", tags["copy"]) + assert.Equal(t, "test", tags["mode"]) + }) + + // Negative: invalid x-ms-tags must error + t.Run("InvalidXMsTags", func(t *testing.T) { + contents := random.String(32) + item := fstest.NewItem("tags-invalid.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z")) + // construct ObjectInfo with invalid x-ms-tags + buf := strings.NewReader(contents) + // Build obj info with metadata + meta := fs.Metadata{ + "x-ms-tags": "badpair-without-equals", + } + // force metadata on + ctx2, ci := fs.AddConfig(ctx) + ci.Metadata = true + obji := object.NewStaticObjectInfo(item.Path, item.ModTime, int64(len(contents)), true, nil, nil) + obji = obji.WithMetadata(meta).WithMimeType("text/plain") + _, err := f.Put(ctx2, buf, obji) + require.Error(t, err) + assert.Contains(t, err.Error(), "invalid tag") + }) } diff --git a/backend/b2/b2.go b/backend/b2/b2.go index a21d63dacb296..22c5e079a8484 100644 --- a/backend/b2/b2.go +++ b/backend/b2/b2.go @@ -1081,21 +1081,10 @@ type listBucketFn func(*api.Bucket) error func (f *Fs) listBucketsToFn(ctx context.Context, bucketName string, fn listBucketFn) error { responses := make([]api.ListBucketsResponse, len(f.info.APIs.Storage.Allowed.Buckets))[:0] - for i := range f.info.APIs.Storage.Allowed.Buckets { - b := &f.info.APIs.Storage.Allowed.Buckets[i] - // Empty names indicate a bucket that no longer exists, this is non-fatal - // for multi-bucket API keys. - if b.Name == "" { - continue - } - // When requesting a specific bucket skip over non-matching names - if bucketName != "" && b.Name != bucketName { - continue - } - + call := func(id string) error { var account = api.ListBucketsRequest{ AccountID: f.info.AccountID, - BucketID: b.ID, + BucketID: id, } if bucketName != "" && account.BucketID == "" { account.BucketName = f.opt.Enc.FromStandardName(bucketName) @@ -1114,6 +1103,32 @@ func (f *Fs) listBucketsToFn(ctx context.Context, bucketName string, fn listBuck return err } responses = append(responses, response) + return nil + } + + for i := range f.info.APIs.Storage.Allowed.Buckets { + b := &f.info.APIs.Storage.Allowed.Buckets[i] + // Empty names indicate a bucket that no longer exists, this is non-fatal + // for multi-bucket API keys. + if b.Name == "" { + continue + } + // When requesting a specific bucket skip over non-matching names + if bucketName != "" && b.Name != bucketName { + continue + } + + err := call(b.ID) + if err != nil { + return err + } + } + + if len(f.info.APIs.Storage.Allowed.Buckets) == 0 { + err := call("") + if err != nil { + return err + } } f.bucketIDMutex.Lock() diff --git a/backend/doi/doi.go b/backend/doi/doi.go index 2811df559a8b6..3bb898381be1d 100644 --- a/backend/doi/doi.go +++ b/backend/doi/doi.go @@ -77,7 +77,7 @@ The DOI provider can be set when rclone does not automatically recognize a suppo Name: "doi_resolver_api_url", Help: `The URL of the DOI resolver API to use. -The DOI resolver can be set for testing or for cases when the the canonical DOI resolver API cannot be used. +The DOI resolver can be set for testing or for cases when the canonical DOI resolver API cannot be used. Defaults to "https://doi.org/api".`, Required: false, diff --git a/backend/drime/api/types.go b/backend/drime/api/types.go new file mode 100644 index 0000000000000..0a1a8bab27176 --- /dev/null +++ b/backend/drime/api/types.go @@ -0,0 +1,237 @@ +// Package api has type definitions for drime +// +// Converted from the API docs with help from https://mholt.github.io/json-to-go/ +package api + +import ( + "encoding/json" + "fmt" + "time" +) + +// Types of things in Item +const ( + ItemTypeFolder = "folder" +) + +// User information +type User struct { + Email string `json:"email"` + ID json.Number `json:"id"` + Avatar string `json:"avatar"` + ModelType string `json:"model_type"` + OwnsEntry bool `json:"owns_entry"` + EntryPermissions []any `json:"entry_permissions"` + DisplayName string `json:"display_name"` +} + +// Permissions for a file +type Permissions struct { + FilesUpdate bool `json:"files.update"` + FilesCreate bool `json:"files.create"` + FilesDownload bool `json:"files.download"` + FilesDelete bool `json:"files.delete"` +} + +// Item describes a folder or a file as returned by /drive/file-entries +type Item struct { + ID json.Number `json:"id"` + Name string `json:"name"` + Description any `json:"description"` + FileName string `json:"file_name"` + Mime string `json:"mime"` + Color any `json:"color"` + Backup bool `json:"backup"` + Tracked int `json:"tracked"` + FileSize int64 `json:"file_size"` + UserID json.Number `json:"user_id"` + ParentID json.Number `json:"parent_id"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + DeletedAt any `json:"deleted_at"` + IsDeleted int `json:"is_deleted"` + Path string `json:"path"` + DiskPrefix any `json:"disk_prefix"` + Type string `json:"type"` + Extension any `json:"extension"` + FileHash any `json:"file_hash"` + Public bool `json:"public"` + Thumbnail bool `json:"thumbnail"` + MuxStatus any `json:"mux_status"` + ThumbnailURL any `json:"thumbnail_url"` + WorkspaceID int `json:"workspace_id"` + IsEncrypted int `json:"is_encrypted"` + Iv any `json:"iv"` + VaultID any `json:"vault_id"` + OwnerID int `json:"owner_id"` + Hash string `json:"hash"` + URL string `json:"url"` + Users []User `json:"users"` + Tags []any `json:"tags"` + Permissions Permissions `json:"permissions"` +} + +// Listing response +type Listing struct { + CurrentPage int `json:"current_page"` + Data []Item `json:"data"` + From int `json:"from"` + LastPage int `json:"last_page"` + NextPage int `json:"next_page"` + PerPage int `json:"per_page"` + PrevPage int `json:"prev_page"` + To int `json:"to"` + Total int `json:"total"` +} + +// UploadResponse for a file +type UploadResponse struct { + Status string `json:"status"` + FileEntry Item `json:"fileEntry"` +} + +// CreateFolderRequest for a folder +type CreateFolderRequest struct { + Name string `json:"name"` + ParentID json.Number `json:"parentId,omitempty"` +} + +// CreateFolderResponse for a folder +type CreateFolderResponse struct { + Status string `json:"status"` + Folder Item `json:"folder"` +} + +// Error is returned from drime when things go wrong +type Error struct { + Message string `json:"message"` +} + +// Error returns a string for the error and satisfies the error interface +func (e Error) Error() string { + out := fmt.Sprintf("Error %q", e.Message) + return out +} + +// Check Error satisfies the error interface +var _ error = (*Error)(nil) + +// DeleteRequest is the input to DELETE /file-entries +type DeleteRequest struct { + EntryIDs []string `json:"entryIds"` + DeleteForever bool `json:"deleteForever"` +} + +// DeleteResponse is the input to DELETE /file-entries +type DeleteResponse struct { + Status string `json:"status"` + Message string `json:"message"` + Errors map[string]string `json:"errors"` +} + +// UpdateItemRequest describes the updates to be done to an item for PUT /file-entries/{id}/ +type UpdateItemRequest struct { + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` +} + +// UpdateItemResponse is returned by PUT /file-entries/{id}/ +type UpdateItemResponse struct { + Status string `json:"status"` + FileEntry Item `json:"fileEntry"` +} + +// MoveRequest is the input to /file-entries/move +type MoveRequest struct { + EntryIDs []string `json:"entryIds"` + DestinationID string `json:"destinationId"` +} + +// MoveResponse is returned by POST /file-entries/move +type MoveResponse struct { + Status string `json:"status"` + Entries []Item `json:"entries"` +} + +// CopyRequest is the input to /file-entries/duplicate +type CopyRequest struct { + EntryIDs []string `json:"entryIds"` + DestinationID string `json:"destinationId"` +} + +// CopyResponse is returned by POST /file-entries/duplicate +type CopyResponse struct { + Status string `json:"status"` + Entries []Item `json:"entries"` +} + +// MultiPartCreateRequest is the input of POST /s3/multipart/create +type MultiPartCreateRequest struct { + Filename string `json:"filename"` + Mime string `json:"mime"` + Size int64 `json:"size"` + Extension string `json:"extension"` + ParentID json.Number `json:"parent_id"` + RelativePath string `json:"relativePath"` +} + +// MultiPartCreateResponse is returned by POST /s3/multipart/create +type MultiPartCreateResponse struct { + UploadID string `json:"uploadId"` + Key string `json:"key"` +} + +// CompletedPart Type for completed parts when making a multipart upload. +type CompletedPart struct { + ETag string `json:"ETag"` + PartNumber int32 `json:"PartNumber"` +} + +// MultiPartGetURLsRequest is the input of POST /s3/multipart/batch-sign-part-urls +type MultiPartGetURLsRequest struct { + UploadID string `json:"uploadId"` + Key string `json:"key"` + PartNumbers []int `json:"partNumbers"` +} + +// MultiPartGetURLsResponse is the result of POST /s3/multipart/batch-sign-part-urls +type MultiPartGetURLsResponse struct { + URLs []struct { + URL string `json:"url"` + PartNumber int32 `json:"partNumber"` + } `json:"urls"` +} + +// MultiPartCompleteRequest is the input to POST /s3/multipart/complete +type MultiPartCompleteRequest struct { + UploadID string `json:"uploadId"` + Key string `json:"key"` + Parts []CompletedPart `json:"parts"` +} + +// MultiPartCompleteResponse is the result of POST /s3/multipart/complete +type MultiPartCompleteResponse struct { + Location string `json:"location"` +} + +// MultiPartEntriesRequest is the input to POST /s3/entries +type MultiPartEntriesRequest struct { + ClientMime string `json:"clientMime"` + ClientName string `json:"clientName"` + Filename string `json:"filename"` + Size int64 `json:"size"` + ClientExtension string `json:"clientExtension"` + ParentID json.Number `json:"parent_id"` + RelativePath string `json:"relativePath"` +} + +// MultiPartEntriesResponse is the result of POST /s3/entries +type MultiPartEntriesResponse struct { + FileEntry Item `json:"fileEntry"` +} + +// MultiPartAbort is the input of POST /s3/multipart/abort +type MultiPartAbort struct { + UploadID string `json:"uploadId"` + Key string `json:"key"` +} diff --git a/backend/drime/drime.go b/backend/drime/drime.go new file mode 100644 index 0000000000000..8a199ab11b5e1 --- /dev/null +++ b/backend/drime/drime.go @@ -0,0 +1,1563 @@ +// Package drime provides an interface to the Drime +// object storage system. +package drime + +/* +Return results give + + X-Ratelimit-Limit: 2000 + X-Ratelimit-Remaining: 1999 + +The rate limit headers indicate the number of allowed API requests per +minute. The limit is two thousand requests per minute, and rclone +should stay under that. +*/ + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "path" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/rclone/rclone/backend/drime/api" + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/chunksize" + "github.com/rclone/rclone/fs/config" + "github.com/rclone/rclone/fs/config/configmap" + "github.com/rclone/rclone/fs/config/configstruct" + "github.com/rclone/rclone/fs/fserrors" + "github.com/rclone/rclone/fs/fshttp" + "github.com/rclone/rclone/fs/hash" + "github.com/rclone/rclone/lib/dircache" + "github.com/rclone/rclone/lib/encoder" + "github.com/rclone/rclone/lib/multipart" + "github.com/rclone/rclone/lib/pacer" + "github.com/rclone/rclone/lib/random" + "github.com/rclone/rclone/lib/rest" +) + +const ( + minSleep = 10 * time.Millisecond + maxSleep = 20 * time.Second + decayConstant = 1 // bigger for slower decay, exponential + baseURL = "https://app.drime.cloud/" + rootURL = baseURL + "api/v1" + maxUploadParts = 10000 // maximum allowed number of parts in a multi-part upload + minChunkSize = fs.SizeSuffix(1024 * 1024 * 5) + defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024) +) + +// Register with Fs +func init() { + fs.Register(&fs.RegInfo{ + Name: "drime", + Description: "Drime", + NewFs: NewFs, + Options: []fs.Option{{ + Name: "access_token", + Help: `API Access token + +You can get this from the web control panel.`, + Sensitive: true, + }, { + Name: "root_folder_id", + Help: `ID of the root folder + +Leave this blank normally, rclone will fill it in automatically. + +If you want rclone to be restricted to a particular folder you can +fill it in - see the docs for more info. +`, + Default: "", + Advanced: true, + Sensitive: true, + }, { + Name: "workspace_id", + Help: `Account ID + +Leave this blank normally unless you wish to specify a Workspace ID. +`, + Default: "", + Advanced: true, + Sensitive: true, + }, { + Name: "list_chunk", + Help: `Number of items to list in each call`, + Default: 1000, + Advanced: true, + }, { + Name: "hard_delete", + Help: "Delete files permanently rather than putting them into the trash.", + Default: false, + Advanced: true, + }, { + Name: "upload_cutoff", + Help: `Cutoff for switching to chunked upload. + +Any files larger than this will be uploaded in chunks of chunk_size. +The minimum is 0 and the maximum is 5 GiB.`, + Default: defaultUploadCutoff, + Advanced: true, + }, { + Name: "chunk_size", + Help: `Chunk size to use for uploading. + +When uploading files larger than upload_cutoff or files with unknown +size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google +photos or google docs) they will be uploaded as multipart uploads +using this chunk size. + +Note that "--drime-upload-concurrency" chunks of this size are buffered +in memory per transfer. + +If you are transferring large files over high-speed links and you have +enough memory, then increasing this will speed up the transfers. + +Rclone will automatically increase the chunk size when uploading a +large file of known size to stay below the 10,000 chunks limit. + +Files of unknown size are uploaded with the configured +chunk_size. Since the default chunk size is 5 MiB and there can be at +most 10,000 chunks, this means that by default the maximum size of +a file you can stream upload is 48 GiB. If you wish to stream upload +larger files then you will need to increase chunk_size. +`, + Default: minChunkSize, + Advanced: true, + }, { + Name: "upload_concurrency", + Help: `Concurrency for multipart uploads and copies. + +This is the number of chunks of the same file that are uploaded +concurrently for multipart uploads and copies. + +If you are uploading small numbers of large files over high-speed links +and these uploads do not fully utilize your bandwidth, then increasing +this may help to speed up the transfers.`, + Default: 4, + Advanced: true, + }, { + Name: "upload_cutoff", + Help: `Cutoff for switching to chunked upload. + +Any files larger than this will be uploaded in chunks of chunk_size. +The minimum is 0 and the maximum is 5 GiB.`, + Default: defaultUploadCutoff, + Advanced: true, + }, { + Name: config.ConfigEncoding, + Help: config.ConfigEncodingHelp, + Advanced: true, + Default: (encoder.Display | // Slash Control Delete Dot + encoder.EncodeLeftSpace | + encoder.EncodeBackSlash | + encoder.EncodeRightSpace | + encoder.EncodeInvalidUtf8), + }}, + }) +} + +/* +TestDrime{sb0-v} +stringNeedsEscaping = []rune{ + '/', '\\', '\a', '\b', '\f', '\n', '\r', '\t', '\v', '\x00', '\x01', '\x02', '\x03', '\x04', '\x05', '\x06', '\x0e', '\x0f', '\x10', '\x11', '\x12', '\x13', '\x14', + '\x15', '\x16', '\x17', '\x18', '\x19', '\x1a', '\x1b', '\x1c', '\x1d', '\x1e', '\x1f', '\x7f', '\xbf', '\xfe' +} +maxFileLength = 255 // for 1 byte unicode characters +maxFileLength = 127 // for 2 byte unicode characters +maxFileLength = 85 // for 3 byte unicode characters +maxFileLength = 63 // for 4 byte unicode characters +canWriteUnnormalized = true +canReadUnnormalized = true +canReadRenormalized = false +canStream = true +base32768isOK = true // make sure maxFileLength for 2 byte unicode chars is the same as for 1 byte characters +*/ + +// Options defines the configuration for this backend +type Options struct { + AccessToken string `config:"access_token"` + RootFolderID string `config:"root_folder_id"` + WorkspaceID string `config:"workspace_id"` + UploadConcurrency int `config:"upload_concurrency"` + ChunkSize fs.SizeSuffix `config:"chunk_size"` + HardDelete bool `config:"hard_delete"` + UploadCutoff fs.SizeSuffix `config:"upload_cutoff"` + ListChunk int `config:"list_chunk"` + Enc encoder.MultiEncoder `config:"encoding"` +} + +// Fs represents a remote drime +type Fs struct { + name string // name of this remote + root string // the path we are working on + opt Options // parsed options + features *fs.Features // optional features + srv *rest.Client // the connection to the server + dirCache *dircache.DirCache // Map of directory path to directory id + pacer *fs.Pacer // pacer for API calls +} + +// Object describes a drime object +// +// The full set of metadata will always be present +type Object struct { + fs *Fs // what this object is part of + remote string // The remote path + size int64 // size of the object + modTime time.Time // modification time of the object + id string // ID of the object + dirID string // ID of the object's directory + mimeType string // mime type of the object + url string // where to download this object +} + +// ------------------------------------------------------------ + +func checkUploadChunkSize(cs fs.SizeSuffix) error { + if cs < minChunkSize { + return fmt.Errorf("%s is less than %s", cs, minChunkSize) + } + return nil +} + +func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) { + err = checkUploadChunkSize(cs) + if err == nil { + old, f.opt.ChunkSize = f.opt.ChunkSize, cs + } + return +} + +func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) { + old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs + return +} + +// Name of the remote (as passed into NewFs) +func (f *Fs) Name() string { + return f.name +} + +// Root of the remote (as passed into NewFs) +func (f *Fs) Root() string { + return f.root +} + +// String converts this Fs to a string +func (f *Fs) String() string { + return fmt.Sprintf("drime root '%s'", f.root) +} + +// Features returns the optional features of this Fs +func (f *Fs) Features() *fs.Features { + return f.features +} + +// parsePath parses a drime 'url' +func parsePath(path string) (root string) { + root = strings.Trim(path, "/") + return +} + +// retryErrorCodes is a slice of error codes that we will retry +var retryErrorCodes = []int{ + 429, // Too Many Requests. + 500, // Internal Server Error + 502, // Bad Gateway + 503, // Service Unavailable + 504, // Gateway Timeout + 509, // Bandwidth Limit Exceeded +} + +// shouldRetry returns a boolean as to whether this resp and err +// deserve to be retried. It returns the err as a convenience +func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) { + if fserrors.ContextError(ctx, &err) { + return false, err + } + return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err +} + +// readMetaDataForPath reads the metadata from the path +func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) { + leaf, directoryID, err := f.dirCache.FindPath(ctx, path, false) + if err != nil { + if err == fs.ErrorDirNotFound { + return nil, fs.ErrorObjectNotFound + } + return nil, err + } + + found, err := f.listAll(ctx, directoryID, false, true, leaf, func(item *api.Item) bool { + if item.Name == leaf { + info = item + return true + } + return false + }) + if err != nil { + return nil, err + } + if !found { + return nil, fs.ErrorObjectNotFound + } + return info, nil +} + +// getItem reads item for ID given +func (f *Fs) getItem(ctx context.Context, id string, dirID string, leaf string) (info *api.Item, err error) { + found, err := f.listAll(ctx, dirID, false, true, leaf, func(item *api.Item) bool { + if item.ID.String() == id { + info = item + return true + } + return false + }) + if !found { + return nil, fs.ErrorObjectNotFound + } + return info, err +} + +// errorHandler parses a non 2xx error response into an error +func errorHandler(resp *http.Response) error { + body, err := rest.ReadBody(resp) + if err != nil { + fs.Debugf(nil, "Couldn't read error out of body: %v", err) + body = nil + } + // Decode error response if there was one - they can be blank + var errResponse api.Error + if len(body) > 0 { + err = json.Unmarshal(body, &errResponse) + if err != nil { + fs.Debugf(nil, "Couldn't decode error response: %v", err) + } + } + if errResponse.Message == "" { + errResponse.Message = fmt.Sprintf("%s (%d): %s", resp.Status, resp.StatusCode, string(body)) + } + return errResponse +} + +// NewFs constructs an Fs from the path, container:path +func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { + // Parse config into Options struct + opt := new(Options) + err := configstruct.Set(m, opt) + if err != nil { + return nil, err + } + + err = checkUploadChunkSize(opt.ChunkSize) + if err != nil { + return nil, fmt.Errorf("drime: chunk size: %w", err) + } + + root = parsePath(root) + + client := fshttp.NewClient(ctx) + + f := &Fs{ + name: name, + root: root, + opt: *opt, + srv: rest.NewClient(client).SetRoot(rootURL), + pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), + } + f.features = (&fs.Features{ + CanHaveEmptyDirectories: true, + ReadMimeType: true, + WriteMimeType: true, + }).Fill(ctx, f) + f.srv.SetErrorHandler(errorHandler) + f.srv.SetHeader("Authorization", "Bearer "+f.opt.AccessToken) + f.srv.SetHeader("Accept", "application/json") + + // Get rootFolderID + rootID := f.opt.RootFolderID + f.dirCache = dircache.New(root, rootID, f) + + // Find the current root + err = f.dirCache.FindRoot(ctx, false) + if err != nil { + // Assume it is a file + newRoot, remote := dircache.SplitPath(root) + tempF := *f + tempF.dirCache = dircache.New(newRoot, rootID, &tempF) + tempF.root = newRoot + // Make new Fs which is the parent + err = tempF.dirCache.FindRoot(ctx, false) + if err != nil { + // No root so return old f + return f, nil + } + _, err := tempF.newObjectWithInfo(ctx, remote, nil) + if err != nil { + if err == fs.ErrorObjectNotFound { + // File doesn't exist so return old f + return f, nil + } + return nil, err + } + f.features.Fill(ctx, &tempF) + // XXX: update the old f here instead of returning tempF, since + // `features` were already filled with functions having *f as a receiver. + // See https://github.com/rclone/rclone/issues/2182 + f.dirCache = tempF.dirCache + f.root = tempF.root + // return an error with an fs which points to the parent + return f, fs.ErrorIsFile + } + return f, nil +} + +// rootSlash returns root with a slash on if it is empty, otherwise empty string +func (f *Fs) rootSlash() string { + if f.root == "" { + return f.root + } + return f.root + "/" +} + +// Return an Object from a path +// +// If it can't be found it returns the error fs.ErrorObjectNotFound. +func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Item) (fs.Object, error) { + o := &Object{ + fs: f, + remote: remote, + } + var err error + if info != nil { + // Set info + err = o.setMetaData(info) + } else { + err = o.readMetaData(ctx) // reads info and meta, returning an error + } + if err != nil { + return nil, err + } + return o, nil +} + +// NewObject finds the Object at remote. If it can't be found +// it returns the error fs.ErrorObjectNotFound. +func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { + return f.newObjectWithInfo(ctx, remote, nil) +} + +// FindLeaf finds a directory of name leaf in the folder with ID pathID +func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) { + // Find the leaf in pathID + found, err = f.listAll(ctx, pathID, true, false, leaf, func(item *api.Item) bool { + if item.Name == leaf { + pathIDOut = item.ID.String() + return true + } + return false + }) + return pathIDOut, found, err +} + +// createDir makes a directory with pathID as parent and name leaf and modTime +func (f *Fs) createDir(ctx context.Context, pathID, leaf string, modTime time.Time) (item *api.Item, err error) { + var resp *http.Response + var result api.CreateFolderResponse + opts := rest.Opts{ + Method: "POST", + Path: "/folders", + } + mkdir := api.CreateFolderRequest{ + Name: f.opt.Enc.FromStandardName(leaf), + ParentID: json.Number(pathID), + } + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.CallJSON(ctx, &opts, &mkdir, &result) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return nil, fmt.Errorf("failed to create folder: %w", err) + } + return &result.Folder, nil +} + +// CreateDir makes a directory with pathID as parent and name leaf +func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) { + item, err := f.createDir(ctx, pathID, leaf, time.Now()) + if err != nil { + return "", err + } + return item.ID.String(), nil +} + +// list the objects into the function supplied +// +// If directories is set it only sends directories +// User function to process a File item from listAll +// +// Should return true to finish processing +type listAllFn func(*api.Item) bool + +// Lists the directory required calling the user function on each item found +// +// If name is set then the server will limit the returned items to those +// with that name. +// +// If the user fn ever returns true then it early exits with found = true +func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, name string, fn listAllFn) (found bool, err error) { + opts := rest.Opts{ + Method: "GET", + Path: "/drive/file-entries", + Parameters: url.Values{}, + } + if dirID != "" { + opts.Parameters.Add("parentIds", dirID) + } + if directoriesOnly { + opts.Parameters.Add("type", api.ItemTypeFolder) + } + if f.opt.WorkspaceID != "" { + opts.Parameters.Set("workspaceId", f.opt.WorkspaceID) + } + opts.Parameters.Set("perPage", strconv.Itoa(f.opt.ListChunk)) + page := 1 +OUTER: + for { + opts.Parameters.Set("page", strconv.Itoa(page)) + var result api.Listing + var resp *http.Response + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return found, fmt.Errorf("couldn't list files: %w", err) + } + for _, item := range result.Data { + if item.Type == api.ItemTypeFolder { + if filesOnly { + continue + } + } else { + if directoriesOnly { + continue + } + } + item.Name = f.opt.Enc.ToStandardName(item.Name) + if fn(&item) { + found = true + break OUTER + } + } + if result.NextPage == 0 { + break + } + page = result.NextPage + } + return found, err +} + +// Convert a list item into a DirEntry +func (f *Fs) itemToDirEntry(ctx context.Context, remote string, info *api.Item) (entry fs.DirEntry, err error) { + if info.Type == api.ItemTypeFolder { + // cache the directory ID for later lookups + f.dirCache.Put(remote, info.ID.String()) + entry = fs.NewDir(remote, info.UpdatedAt). + SetSize(info.FileSize). + SetID(info.ID.String()). + SetParentID(info.ParentID.String()) + } else { + entry, err = f.newObjectWithInfo(ctx, remote, info) + if err != nil { + return nil, err + } + } + return entry, nil +} + +// List the objects and directories in dir into entries. The +// entries can be returned in any order but should be for a +// complete directory. +// +// dir should be "" to list the root, and should not have +// trailing slashes. +// +// This should return ErrDirNotFound if the directory isn't +// found. +func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { + directoryID, err := f.dirCache.FindDir(ctx, dir, false) + if err != nil { + return nil, err + } + var iErr error + _, err = f.listAll(ctx, directoryID, false, false, "", func(info *api.Item) bool { + remote := path.Join(dir, info.Name) + entry, err := f.itemToDirEntry(ctx, remote, info) + if err != nil { + iErr = err + return true + } + entries = append(entries, entry) + return false + }) + if err != nil { + return nil, err + } + if iErr != nil { + return nil, iErr + } + return entries, nil +} + +// Creates from the parameters passed in a half finished Object which +// must have setMetaData called on it +// +// Returns the object, leaf, directoryID and error. +// +// Used to create new objects +func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) { + // Create the directory for the object if it doesn't exist + leaf, directoryID, err = f.dirCache.FindPath(ctx, remote, true) + if err != nil { + return + } + // Temporary Object under construction + o = &Object{ + fs: f, + remote: remote, + } + return o, leaf, directoryID, nil +} + +// Put the object +// +// Copy the reader in to the new object which is returned. +// +// The new object may have been created if an error is returned +func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + existingObj, err := f.NewObject(ctx, src.Remote()) + switch err { + case nil: + return existingObj, existingObj.Update(ctx, in, src, options...) + case fs.ErrorObjectNotFound: + // Not found so create it + return f.PutUnchecked(ctx, in, src, options...) + default: + return nil, err + } +} + +// PutStream uploads to the remote path with the modTime given of indeterminate size +func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + return f.Put(ctx, in, src, options...) +} + +// PutUnchecked the object into the container +// +// This will produce a duplicate if the object already exists. +// +// Copy the reader in to the new object which is returned. +// +// The new object may have been created if an error is returned +func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + remote := src.Remote() + size := src.Size() + modTime := src.ModTime(ctx) + + o, _, _, err := f.createObject(ctx, remote, modTime, size) + if err != nil { + return nil, err + } + return o, o.Update(ctx, in, src, options...) +} + +// Mkdir creates the container if it doesn't exist +func (f *Fs) Mkdir(ctx context.Context, dir string) error { + _, err := f.dirCache.FindDir(ctx, dir, true) + return err +} + +// deleteObject removes an object by ID +func (f *Fs) deleteObject(ctx context.Context, id string) error { + opts := rest.Opts{ + Method: "POST", + Path: "/file-entries/delete", + } + request := api.DeleteRequest{ + EntryIDs: []string{id}, + DeleteForever: f.opt.HardDelete, + } + var result api.DeleteResponse + err := f.pacer.Call(func() (bool, error) { + resp, err := f.srv.CallJSON(ctx, &opts, &request, &result) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return fmt.Errorf("failed to delete item: %w", err) + } + // Check the individual result codes also + for name, errstring := range result.Errors { + return fmt.Errorf("failed to delete item %q: %s", name, errstring) + } + return nil +} + +// purgeCheck removes the root directory, if check is set then it +// refuses to do so if it has anything in +func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error { + root := path.Join(f.root, dir) + if root == "" { + return errors.New("can't purge root directory") + } + dc := f.dirCache + rootID, err := dc.FindDir(ctx, dir, false) + if err != nil { + return err + } + + // Check to see if there is contents in the directory + if check { + found, err := f.listAll(ctx, rootID, false, false, "", func(item *api.Item) bool { + return true + }) + if err != nil { + return err + } + if found { + return fs.ErrorDirectoryNotEmpty + } + } + + // Delete the directory + err = f.deleteObject(ctx, rootID) + if err != nil { + return err + } + + f.dirCache.FlushDir(dir) + return nil +} + +// Rmdir deletes the root folder +// +// Returns an error if it isn't empty +func (f *Fs) Rmdir(ctx context.Context, dir string) error { + return f.purgeCheck(ctx, dir, true) +} + +// Precision return the precision of this Fs +func (f *Fs) Precision() time.Duration { + return fs.ModTimeNotSupported +} + +// Purge deletes all the files and the container +// +// Optional interface: Only implement this if you have a way of +// deleting all the files quicker than just running Remove() on the +// result of List() +func (f *Fs) Purge(ctx context.Context, dir string) error { + return f.purgeCheck(ctx, dir, false) +} + +// patch an attribute on an object to value +func (f *Fs) patch(ctx context.Context, id, attribute string, value string) (item *api.Item, err error) { + var resp *http.Response + var request = api.UpdateItemRequest{ + Name: value, + } + var result api.UpdateItemResponse + opts := rest.Opts{ + Method: "PUT", + Path: "/file-entries/" + id, + } + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.CallJSON(ctx, &opts, &request, &result) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return nil, fmt.Errorf("failed to patch item %q to %v: %w", attribute, value, err) + } + return &result.FileEntry, nil +} + +// rename a file or a folder +func (f *Fs) rename(ctx context.Context, id, newLeaf string) (item *api.Item, err error) { + return f.patch(ctx, id, "name", f.opt.Enc.FromStandardName(newLeaf)) +} + +// move a file or a folder to a new directory +// func (f *Fs) move(ctx context.Context, id, newDirID string, dstLeaf string) (item *api.Item, err error) { +func (f *Fs) move(ctx context.Context, id, newDirID string) (err error) { + var resp *http.Response + var request = api.MoveRequest{ + EntryIDs: []string{id}, + DestinationID: newDirID, + } + var result api.MoveResponse + opts := rest.Opts{ + Method: "POST", + Path: "/file-entries/move", + } + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.CallJSON(ctx, &opts, &request, &result) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return fmt.Errorf("failed to move item: %w", err) + } + + return nil +} + +// move and rename a file or folder to directoryID with leaf +func (f *Fs) moveTo(ctx context.Context, id, srcLeaf, dstLeaf, srcDirectoryID, dstDirectoryID string) (info *api.Item, err error) { + newLeaf := f.opt.Enc.FromStandardName(dstLeaf) + oldLeaf := f.opt.Enc.FromStandardName(srcLeaf) + doRenameLeaf := oldLeaf != newLeaf + doMove := srcDirectoryID != dstDirectoryID + + // Now rename the leaf to a temporary name if we are moving to + // another directory to make sure we don't overwrite something + // in the destination directory by accident + if doRenameLeaf && doMove { + tmpLeaf := newLeaf + "." + random.String(8) + info, err = f.rename(ctx, id, tmpLeaf) + if err != nil { + return nil, fmt.Errorf("Move rename leaf: %w", err) + } + } + + // Move the object to a new directory (with the existing name) + // if required + if doMove { + err = f.move(ctx, id, dstDirectoryID) + if err != nil { + return nil, err + } + } + + // Rename the leaf to its final name if required + if doRenameLeaf { + info, err = f.rename(ctx, id, newLeaf) + if err != nil { + return nil, fmt.Errorf("Move rename leaf: %w", err) + } + } + + if info == nil { + info, err = f.getItem(ctx, id, dstDirectoryID, dstLeaf) + if err != nil { + return nil, err + } + } + + return info, nil +} + +// Move src to this remote using server-side move operations. +// +// This is stored with the remote path given. +// +// It returns the destination Object and a possible error. +// +// Will only be called if src.Fs().Name() == f.Name() +// +// If it isn't possible then return fs.ErrorCantMove +func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { + srcObj, ok := src.(*Object) + if !ok { + fs.Debugf(src, "Can't move - not same remote type") + return nil, fs.ErrorCantMove + } + + // Find existing object + srcLeaf, srcDirectoryID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false) + if err != nil { + return nil, err + } + + // Create temporary object + dstObj, dstLeaf, dstDirectoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size) + if err != nil { + return nil, err + } + + // Do the move + info, err := f.moveTo(ctx, srcObj.id, srcLeaf, dstLeaf, srcDirectoryID, dstDirectoryID) + if err != nil { + return nil, err + } + + err = dstObj.setMetaData(info) + if err != nil { + return nil, err + } + return dstObj, nil +} + +// DirMove moves src, srcRemote to this remote at dstRemote +// using server-side move operations. +// +// Will only be called if src.Fs().Name() == f.Name() +// +// If it isn't possible then return fs.ErrorCantDirMove +// +// If destination exists then return fs.ErrorDirExists +func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { + srcFs, ok := src.(*Fs) + if !ok { + fs.Debugf(srcFs, "Can't move directory - not same remote type") + return fs.ErrorCantDirMove + } + + srcID, srcDirectoryID, srcLeaf, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote) + if err != nil { + return err + } + + // Do the move + _, err = f.moveTo(ctx, srcID, srcLeaf, dstLeaf, srcDirectoryID, dstDirectoryID) + if err != nil { + return err + } + srcFs.dirCache.FlushDir(srcRemote) + return nil +} + +// copy a file or a folder to a new directory +func (f *Fs) copy(ctx context.Context, id, newDirID string) (item *api.Item, err error) { + var resp *http.Response + var request = api.CopyRequest{ + EntryIDs: []string{id}, + DestinationID: newDirID, + } + var result api.CopyResponse + opts := rest.Opts{ + Method: "POST", + Path: "/file-entries/duplicate", + } + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.CallJSON(ctx, &opts, &request, &result) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return nil, fmt.Errorf("failed to copy item: %w", err) + } + itemResult := result.Entries[0] + return &itemResult, nil +} + +// copy and rename a file or folder to directoryID with leaf +func (f *Fs) copyTo(ctx context.Context, srcID, srcLeaf, dstLeaf, dstDirectoryID string) (info *api.Item, err error) { + // Can have duplicates so don't have to be careful here + + // Copy to dstDirectoryID first + info, err = f.copy(ctx, srcID, dstDirectoryID) + if err != nil { + return nil, err + } + + // Rename if required + if srcLeaf != dstLeaf { + info, err = f.rename(ctx, info.ID.String(), dstLeaf) + if err != nil { + return nil, err + } + } + return info, nil +} + +// Copy src to this remote using server-side copy operations. +// +// This is stored with the remote path given. +// +// It returns the destination Object and a possible error. +// +// Will only be called if src.Fs().Name() == f.Name() +// +// If it isn't possible then return fs.ErrorCantCopy +func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) { + srcObj, ok := src.(*Object) + if !ok { + fs.Debugf(src, "Can't copy - not same remote type") + return nil, fs.ErrorCantCopy + } + srcLeaf := path.Base(srcObj.remote) + + srcPath := srcObj.fs.rootSlash() + srcObj.remote + dstPath := f.rootSlash() + remote + if srcPath == dstPath { + return nil, fmt.Errorf("can't copy %q -> %q as are same name", srcPath, dstPath) + } + + // Find existing object + existingObj, err := f.NewObject(ctx, remote) + if err == nil { + defer func() { + // Don't remove existing object if returning an error + if err != nil { + return + } + fs.Debugf(existingObj, "Server side copy: removing existing object after successful copy") + err = existingObj.Remove(ctx) + }() + } + + // Create temporary object + dstObj, dstLeaf, dstDirectoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size) + if err != nil { + return nil, err + } + + // Copy the object + info, err := f.copyTo(ctx, srcObj.id, srcLeaf, dstLeaf, dstDirectoryID) + if err != nil { + return nil, err + } + err = dstObj.setMetaData(info) + if err != nil { + return nil, err + } + + return dstObj, nil +} + +// DirCacheFlush resets the directory cache - used in testing as an +// optional interface +func (f *Fs) DirCacheFlush() { + f.dirCache.ResetRoot() +} + +// Hashes returns the supported hash sets. +func (f *Fs) Hashes() hash.Set { + return hash.Set(hash.None) +} + +var warnStreamUpload sync.Once + +// Status of open chunked upload +type drimeChunkWriter struct { + uploadID string + key string + chunkSize int64 + size int64 + f *Fs + o *Object + written atomic.Int64 + fileEntry api.Item + + uploadName string + leaf string + mime string + extension string + parentID json.Number + relativePath string + + completedPartsMu sync.Mutex + completedParts []api.CompletedPart +} + +// OpenChunkWriter returns the chunk size and a ChunkWriter +// +// Pass in the remote and the src object +// You can also use options to hint at the desired chunk size +func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectInfo, options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) { + // Create the directory for the object if it doesn't exist + leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true) + if err != nil { + return info, nil, err + } + + // Temporary Object under construction + o := &Object{ + fs: f, + remote: remote, + } + + size := src.Size() + fs.FixRangeOption(options, size) + + // calculate size of parts + chunkSize := f.opt.ChunkSize + + // size can be -1 here meaning we don't know the size of the incoming file. We use ChunkSize + // buffers here (default 5 MB). With a maximum number of parts (10,000) this will be a file of + // 48 GB. + if size == -1 { + warnStreamUpload.Do(func() { + fs.Logf(f, "Streaming uploads using chunk size %v will have maximum file size of %v", + chunkSize, fs.SizeSuffix(int64(chunkSize)*int64(maxUploadParts))) + }) + } else { + chunkSize = chunksize.Calculator(src, size, maxUploadParts, chunkSize) + } + + createSize := max(0, size) + + // Initiate multipart upload + req := api.MultiPartCreateRequest{ + Filename: leaf, + Mime: fs.MimeType(ctx, src), + Size: createSize, + Extension: strings.TrimPrefix(path.Ext(leaf), `.`), + ParentID: json.Number(directoryID), + RelativePath: f.opt.Enc.FromStandardPath(path.Join(f.root, remote)), + } + + var resp api.MultiPartCreateResponse + + opts := rest.Opts{ + Method: "POST", + Path: "/s3/multipart/create", + Options: options, + } + + err = o.fs.pacer.Call(func() (bool, error) { + res, err := o.fs.srv.CallJSON(ctx, &opts, req, &resp) + return shouldRetry(ctx, res, err) + }) + + if err != nil { + return info, nil, fmt.Errorf("failed to initiate multipart upload: %w", err) + } + + mime := fs.MimeType(ctx, src) + ext := strings.TrimPrefix(path.Ext(leaf), ".") + // must have file extension for multipart upload + if ext == "" { + ext = "bin" + } + rel := f.opt.Enc.FromStandardPath(path.Join(f.root, remote)) + + chunkWriter := &drimeChunkWriter{ + uploadID: resp.UploadID, + key: resp.Key, + chunkSize: int64(chunkSize), + size: size, + f: f, + o: o, + uploadName: path.Base(resp.Key), + leaf: leaf, + mime: mime, + extension: ext, + parentID: json.Number(directoryID), + relativePath: rel, + } + info = fs.ChunkWriterInfo{ + ChunkSize: int64(chunkSize), + Concurrency: f.opt.UploadConcurrency, + LeavePartsOnError: false, + } + return info, chunkWriter, err +} + +// WriteChunk will write chunk number with reader bytes, where chunk number >= 0 +func (s *drimeChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (bytesWritten int64, err error) { + // chunk numbers between 1 and 100000 + chunkNumber++ + + // find size of chunk + chunkSize, err := reader.Seek(0, io.SeekEnd) + if err != nil { + return 0, fmt.Errorf("failed to seek chunk: %w", err) + } + + if chunkSize == 0 && chunkNumber != 1 { + return 0, nil + } + + partOpts := rest.Opts{ + Method: "POST", + Path: "/s3/multipart/batch-sign-part-urls", + } + + req := api.MultiPartGetURLsRequest{ + UploadID: s.uploadID, + Key: s.key, + PartNumbers: []int{ + chunkNumber, + }, + } + + var resp api.MultiPartGetURLsResponse + + err = s.f.pacer.Call(func() (bool, error) { + res, err := s.f.srv.CallJSON(ctx, &partOpts, req, &resp) + return shouldRetry(ctx, res, err) + }) + + if err != nil { + return 0, fmt.Errorf("failed to get part URL: %w", err) + } + + if len(resp.URLs) != 1 { + return 0, fmt.Errorf("expecting 1 URL but got %d", len(resp.URLs)) + } + partURL := resp.URLs[0].URL + + opts := rest.Opts{ + Method: "PUT", + RootURL: partURL, + Body: reader, + ContentType: "application/octet-stream", + ContentLength: &chunkSize, + NoResponse: true, + ExtraHeaders: map[string]string{ + "Authorization": "", // clear the default auth + }, + } + + var uploadRes *http.Response + + err = s.f.pacer.Call(func() (bool, error) { + _, err = reader.Seek(0, io.SeekStart) + if err != nil { + return false, fmt.Errorf("failed to seek chunk: %w", err) + } + uploadRes, err = s.f.srv.Call(ctx, &opts) + return shouldRetry(ctx, uploadRes, err) + }) + + if err != nil { + return 0, fmt.Errorf("failed to upload part %d: %w", chunkNumber, err) + } + + // Get ETag from response + etag := uploadRes.Header.Get("ETag") + fs.CheckClose(uploadRes.Body, &err) + + s.completedPartsMu.Lock() + defer s.completedPartsMu.Unlock() + s.completedParts = append(s.completedParts, api.CompletedPart{ + PartNumber: int32(chunkNumber), + ETag: etag, + }) + + // Count size written for unknown file sizes + s.written.Add(chunkSize) + + return chunkSize, nil +} + +// Close complete chunked writer finalising the file. +func (s *drimeChunkWriter) Close(ctx context.Context) error { + s.completedPartsMu.Lock() + defer s.completedPartsMu.Unlock() + + // Complete multipart upload + sort.Slice(s.completedParts, func(i, j int) bool { + return s.completedParts[i].PartNumber < s.completedParts[j].PartNumber + }) + + completeBody := api.MultiPartCompleteRequest{ + UploadID: s.uploadID, + Key: s.key, + Parts: s.completedParts, + } + + completeOpts := rest.Opts{ + Method: "POST", + Path: "/s3/multipart/complete", + } + + var response api.MultiPartCompleteResponse + + err := s.f.pacer.Call(func() (bool, error) { + res, err := s.f.srv.CallJSON(ctx, &completeOpts, completeBody, &response) + return shouldRetry(ctx, res, err) + }) + + if err != nil { + return fmt.Errorf("failed to complete multipart upload: %w", err) + } + + finalSize := s.size + if finalSize < 0 { + finalSize = s.written.Load() + } + + // s3/entries request to create drime object from multipart upload + req := api.MultiPartEntriesRequest{ + ClientMime: s.mime, + ClientName: s.leaf, + Filename: s.uploadName, + Size: finalSize, + ClientExtension: s.extension, + ParentID: s.parentID, + RelativePath: s.relativePath, + } + + entriesOpts := rest.Opts{ + Method: "POST", + Path: "/s3/entries", + } + + var res api.MultiPartEntriesResponse + err = s.f.pacer.Call(func() (bool, error) { + res, err := s.f.srv.CallJSON(ctx, &entriesOpts, req, &res) + return shouldRetry(ctx, res, err) + }) + if err != nil { + return fmt.Errorf("failed to create entry after multipart upload: %w", err) + } + s.fileEntry = res.FileEntry + + return nil +} + +// Abort chunk write +// +// You can and should call Abort without calling Close. +func (s *drimeChunkWriter) Abort(ctx context.Context) error { + opts := rest.Opts{ + Method: "POST", + Path: "/s3/multipart/abort", + NoResponse: true, + } + + req := api.MultiPartAbort{ + UploadID: s.uploadID, + Key: s.key, + } + + err := s.f.pacer.Call(func() (bool, error) { + res, err := s.f.srv.CallJSON(ctx, &opts, req, nil) + return shouldRetry(ctx, res, err) + }) + + if err != nil { + return fmt.Errorf("failed to abort multipart upload: %w", err) + } + + return nil +} + +// ------------------------------------------------------------ + +// Fs returns the parent Fs +func (o *Object) Fs() fs.Info { + return o.fs +} + +// String returns a string version +func (o *Object) String() string { + if o == nil { + return "" + } + return o.remote +} + +// Remote returns the remote path +func (o *Object) Remote() string { + return o.remote +} + +// Hash returns the hash of an object returning a lowercase hex string +func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { + return "", hash.ErrUnsupported +} + +// Size returns the size of an object in bytes +func (o *Object) Size() int64 { + return o.size +} + +// setMetaDataAny sets the metadata from info but doesn't check the type +func (o *Object) setMetaDataAny(info *api.Item) { + o.size = info.FileSize + o.modTime = info.UpdatedAt + o.id = info.ID.String() + o.dirID = info.ParentID.String() + o.mimeType = info.Mime + o.url = info.URL +} + +// setMetaData sets the metadata from info +func (o *Object) setMetaData(info *api.Item) (err error) { + if info.Type == api.ItemTypeFolder { + return fs.ErrorIsDir + } + if info.ID == "" { + return fmt.Errorf("ID not found in response") + } + o.setMetaDataAny(info) + return nil +} + +// readMetaData gets the metadata unconditionally as we expect Object +// to always have the full set of metadata +func (o *Object) readMetaData(ctx context.Context) (err error) { + var info *api.Item + info, err = o.fs.readMetaDataForPath(ctx, o.remote) + if err != nil { + return err + } + return o.setMetaData(info) +} + +// ModTime returns the modification time of the object +// +// It attempts to read the objects mtime and if that isn't present the +// LastModified returned in the http headers +func (o *Object) ModTime(ctx context.Context) time.Time { + return o.modTime +} + +// SetModTime sets the modification time of the local fs object +func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { + return fs.ErrorCantSetModTime +} + +// Storable returns a boolean showing whether this object storable +func (o *Object) Storable() bool { + return true +} + +// Open an object for read +func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { + if o.id == "" { + return nil, errors.New("can't download - no id") + } + if o.url == "" { + // On upload an Object is returned with no url, so fetch it here if needed + err = o.readMetaData(ctx) + if err != nil { + return nil, fmt.Errorf("read metadata: %w", err) + } + } + fs.FixRangeOption(options, o.size) + var resp *http.Response + opts := rest.Opts{ + Method: "GET", + RootURL: baseURL + o.url, + Options: options, + } + + err = o.fs.pacer.Call(func() (bool, error) { + resp, err = o.fs.srv.Call(ctx, &opts) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return nil, err + } + return resp.Body, err +} + +// Update the object with the contents of the io.Reader, modTime and size +// +// If existing is set then it updates the object rather than creating a new one. +// +// The new object may have been created if an error is returned. +func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { + remote := o.Remote() + size := src.Size() + + // Create the directory for the object if it doesn't exist + leaf, directoryID, err := o.fs.dirCache.FindPath(ctx, remote, true) + if err != nil { + return err + } + + // If the file exists, delete it after a successful upload + if o.id != "" { + id := o.id + o.id = "" + defer func() { + if err != nil { + return + } + fs.Debugf(o, "Removing old object on successful upload") + deleteErr := o.fs.deleteObject(ctx, id) + if deleteErr != nil { + err = fmt.Errorf("failed to delete existing object: %w", deleteErr) + } + }() + } + + if size < 0 || size > int64(o.fs.opt.UploadCutoff) { + chunkWriter, err := multipart.UploadMultipart(ctx, src, in, multipart.UploadMultipartOptions{ + Open: o.fs, + OpenOptions: options, + }) + if err != nil { + return err + } + s := chunkWriter.(*drimeChunkWriter) + + return o.setMetaData(&s.fileEntry) + } + + // Do the upload + var resp *http.Response + var result api.UploadResponse + var encodedLeaf = o.fs.opt.Enc.FromStandardName(leaf) + opts := rest.Opts{ + Method: "POST", + Body: in, + MultipartParams: url.Values{ + "parentId": {directoryID}, + "relativePath": {encodedLeaf}, + }, + MultipartContentName: "file", + MultipartFileName: encodedLeaf, + MultipartContentType: fs.MimeType(ctx, src), + Path: "/uploads", + Options: options, + } + err = o.fs.pacer.CallNoRetry(func() (bool, error) { + resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &result) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return fmt.Errorf("failed to upload file: %w", err) + } + return o.setMetaData(&result.FileEntry) +} + +// Remove an object +func (o *Object) Remove(ctx context.Context) error { + return o.fs.deleteObject(ctx, o.id) +} + +// ID returns the ID of the Object if known, or "" if not +func (o *Object) ID() string { + return o.id +} + +// MimeType returns the content type of the Object if known, or "" if not +func (o *Object) MimeType(ctx context.Context) string { + return o.mimeType +} + +// ParentID returns the ID of the Object parent if known, or "" if not +func (o *Object) ParentID() string { + return o.dirID +} + +// Check the interfaces are satisfied +var ( + _ fs.Fs = (*Fs)(nil) + _ fs.Purger = (*Fs)(nil) + _ fs.PutStreamer = (*Fs)(nil) + _ fs.Copier = (*Fs)(nil) + _ fs.Mover = (*Fs)(nil) + _ fs.DirMover = (*Fs)(nil) + _ fs.DirCacheFlusher = (*Fs)(nil) + _ fs.OpenChunkWriter = (*Fs)(nil) + _ fs.Object = (*Object)(nil) + _ fs.IDer = (*Object)(nil) + _ fs.ParentIDer = (*Object)(nil) + _ fs.MimeTyper = (*Object)(nil) +) diff --git a/backend/drime/drime_test.go b/backend/drime/drime_test.go new file mode 100644 index 0000000000000..37b72d6c7fd0d --- /dev/null +++ b/backend/drime/drime_test.go @@ -0,0 +1,33 @@ +// Drime filesystem interface +package drime + +import ( + "testing" + + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fstest/fstests" +) + +// TestIntegration runs integration tests against the remote +func TestIntegration(t *testing.T) { + fstests.Run(t, &fstests.Opt{ + RemoteName: "TestDrime:", + NilObject: (*Object)(nil), + ChunkedUpload: fstests.ChunkedUploadConfig{ + MinChunkSize: minChunkSize, + }, + }) +} + +func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) { + return f.setUploadChunkSize(cs) +} + +func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) { + return f.setUploadCutoff(cs) +} + +var ( + _ fstests.SetUploadChunkSizer = (*Fs)(nil) + _ fstests.SetUploadCutoffer = (*Fs)(nil) +) diff --git a/backend/filen/filen.go b/backend/filen/filen.go new file mode 100644 index 0000000000000..2a3d8cb169145 --- /dev/null +++ b/backend/filen/filen.go @@ -0,0 +1,1178 @@ +// Package filen provides an interface to Filen cloud storage. +package filen + +import ( + "context" + "errors" + "fmt" + "io" + pathModule "path" + "strings" + "sync" + "time" + + sdk "github.com/FilenCloudDienste/filen-sdk-go/filen" + "github.com/FilenCloudDienste/filen-sdk-go/filen/client" + "github.com/FilenCloudDienste/filen-sdk-go/filen/types" + + "github.com/google/uuid" + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/config" + "github.com/rclone/rclone/fs/config/configmap" + "github.com/rclone/rclone/fs/config/configstruct" + "github.com/rclone/rclone/fs/config/obscure" + "github.com/rclone/rclone/fs/hash" + "github.com/rclone/rclone/fs/list" + "github.com/rclone/rclone/lib/encoder" + "golang.org/x/sync/errgroup" +) + +func init() { + fs.Register(&fs.RegInfo{ + Name: "filen", + Description: "Filen", + NewFs: NewFs, + Options: []fs.Option{ + { + Name: "email", + Help: "Email of your Filen account", + Required: true, + }, + { + Name: "password", + Help: "Password of your Filen account", + Required: true, + IsPassword: true, + Sensitive: true, + }, + { + Name: "api_key", + Help: `API Key for your Filen account + +Get this using the Filen CLI export-api-key command +You can download the Filen CLI from https://github.com/FilenCloudDienste/filen-cli`, + Required: true, + IsPassword: true, + Sensitive: true, + }, + { + Name: "upload_concurrency", + Help: `Concurrency for chunked uploads. + +This is the upper limit for how many transfers for the same file are running concurrently. +Setting this above to a value smaller than 1 will cause uploads to deadlock. + +If you are uploading small numbers of large files over high-speed links +and these uploads do not fully utilize your bandwidth, then increasing +this may help to speed up the transfers.`, + Default: 16, + Advanced: true, + }, + { + Name: config.ConfigEncoding, + Help: config.ConfigEncodingHelp, + Advanced: true, + Default: encoder.Standard | encoder.EncodeInvalidUtf8, + }, + { + Name: "master_keys", + Help: "Master Keys (internal use only)", + Sensitive: true, + Advanced: true, + }, { + Name: "private_key", + Help: "Private RSA Key (internal use only)", + Sensitive: true, + Advanced: true, + }, { + Name: "public_key", + Help: "Public RSA Key (internal use only)", + Sensitive: true, + Advanced: true, + }, { + Name: "auth_version", + Help: "Authentication Version (internal use only)", + Advanced: true, + }, { + Name: "base_folder_uuid", + Help: "UUID of Account Root Directory (internal use only)", + Sensitive: true, + Advanced: true, + }, + }, + }) +} + +// NewFs constructs a Fs at the path root +func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { + opt := new(Options) + err := configstruct.Set(m, opt) + if err != nil { + return nil, err + } + + root = opt.Encoder.FromStandardPath(root) + password, err := obscure.Reveal(opt.Password) + if err != nil { + return nil, fmt.Errorf("failed to reveal password: %w", err) + } + apiKey, err := obscure.Reveal(opt.APIKey) + if err != nil { + return nil, fmt.Errorf("failed to reveal api key: %w", err) + } + + var filen *sdk.Filen + if password == "INTERNAL" { + tsconfig := sdk.TSConfig{ + Email: opt.Email, + MasterKeys: strings.Split(opt.MasterKeys, "|"), + APIKey: apiKey, + PublicKey: opt.PublicKey, + PrivateKey: opt.PrivateKey, + AuthVersion: opt.AuthVersion, + BaseFolderUUID: opt.BaseFolderUUID, + } + filen, err = sdk.NewFromTSConfig(tsconfig) + if err != nil { + return nil, err + } + } else { + filen, err = sdk.NewWithAPIKey(ctx, opt.Email, password, apiKey) + if err != nil { + return nil, err + } + } + + maybeRootDir, err := filen.FindDirectory(ctx, root) + if errors.Is(err, fs.ErrorIsFile) { // FsIsFile special case + var err2 error + root = pathModule.Dir(root) + maybeRootDir, err2 = filen.FindDirectory(ctx, root) + if err2 != nil { + return nil, err2 + } + } else if err != nil { + return nil, err + } + + fileSystem := &Fs{ + name: name, + root: Directory{}, + filen: filen, + Enc: opt.Encoder, + concurrency: opt.UploadConcurrency, + } + + fileSystem.features = (&fs.Features{ + ReadMimeType: true, + WriteMimeType: true, + CanHaveEmptyDirectories: true, + ChunkWriterDoesntSeek: true, + }).Fill(ctx, fileSystem) + + fileSystem.root = Directory{ + fs: fileSystem, + directory: maybeRootDir, // could be null at this point + path: root, + } + + // must return the error from FindDirectory (see FsIsFile) + return fileSystem, err +} + +// Options defines the configuration for this backend +type Options struct { + Email string `config:"email"` + Password string `config:"password"` + APIKey string `config:"api_key"` + Encoder encoder.MultiEncoder `config:"encoding"` + MasterKeys string `config:"master_keys"` + PrivateKey string `config:"private_key"` + PublicKey string `config:"public_key"` + AuthVersion int `config:"auth_version"` + BaseFolderUUID string `config:"base_folder_uuid"` + UploadConcurrency int `config:"upload_concurrency"` +} + +// Fs represents a virtual filesystem mounted on a specific root folder +type Fs struct { + name string + root Directory + filen *sdk.Filen + Enc encoder.MultiEncoder + features *fs.Features + concurrency int +} + +// Name of the remote (as passed into NewFs) +func (f *Fs) Name() string { + return f.name +} + +// Root of the remote (as passed into NewFs) +func (f *Fs) Root() string { + return f.root.path +} + +// String converts this Fs to a string +func (f *Fs) String() string { + return fmt.Sprintf("Filen %s at /%s", f.filen.Email, f.root.String()) +} + +// Precision return the precision of this Fs +func (f *Fs) Precision() time.Duration { + return time.Millisecond +} + +// Hashes returns the supported hash sets. +func (f *Fs) Hashes() hash.Set { + return hash.Set(hash.SHA512) +} + +// Features returns the optional features of this Fs +func (f *Fs) Features() *fs.Features { + return f.features +} + +// List the objects and directories in dir into entries. The +// entries can be returned in any order but should be for a +// complete directory. +// +// dir should be "" to list the root, and should not have +// trailing slashes. +// +// This should return ErrDirNotFound if the directory isn't +// found. +func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { + dir = f.Enc.FromStandardPath(dir) + // find directory uuid + directory, err := f.filen.FindDirectory(ctx, f.resolvePath(dir)) + if err != nil { + return nil, err + } + + if directory == nil { + return nil, fs.ErrorDirNotFound + } + + // read directory content + files, directories, err := f.filen.ReadDirectory(ctx, directory) + if err != nil { + return nil, err + } + entries = make(fs.DirEntries, 0, len(files)+len(directories)) + + for _, directory := range directories { + entries = append(entries, &Directory{ + fs: f, + path: pathModule.Join(dir, directory.Name), + directory: directory, + }) + } + for _, file := range files { + file := &Object{ + fs: f, + path: pathModule.Join(dir, file.Name), + file: file, + } + entries = append(entries, file) + } + return entries, nil +} + +// NewObject finds the Object at remote. If it can't be found +// it returns the error ErrorObjectNotFound. +// +// If remote points to a directory then it should return +// ErrorIsDir if possible without doing any extra work, +// otherwise ErrorObjectNotFound. +func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { + remote = f.Enc.FromStandardPath(remote) + file, err := f.filen.FindFile(ctx, f.resolvePath(remote)) + if err != nil { + return nil, err + } + if file == nil { + return nil, fs.ErrorObjectNotFound + } + return &Object{ + fs: f, + path: remote, + file: file, + }, nil +} + +// Put in to the remote path with the modTime given of the given size +// +// When called from outside an Fs by rclone, src.Size() will always be >= 0. +// But for unknown-sized objects (indicated by src.Size() == -1), Put should either +// return an error or upload it properly (rather than e.g. calling panic). +// +// May create the object even if it returns an error - if so +// will return the object and the error, otherwise will return +// nil and the error +func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + for _, option := range options { + if option.Mandatory() { + fs.Logf(option, "Unsupported mandatory option: %v", option) + } + } + path := f.Enc.FromStandardPath(src.Remote()) + resolvedPath := f.resolvePath(path) + modTime := src.ModTime(ctx) + parent, err := f.filen.FindDirectoryOrCreate(ctx, pathModule.Dir(resolvedPath)) + if err != nil { + return nil, err + } + incompleteFile, err := types.NewIncompleteFile(f.filen.FileEncryptionVersion, pathModule.Base(resolvedPath), fs.MimeType(ctx, src), modTime, modTime, parent) + if err != nil { + return nil, err + } + uploadedFile, err := f.filen.UploadFile(ctx, incompleteFile, in) + if err != nil { + return nil, err + } + return &Object{ + fs: f, + path: path, + file: uploadedFile, + }, nil +} + +// PutStream uploads to the remote path with the modTime given of indeterminate size +// +// May create the object even if it returns an error - if so +// will return the object and the error, otherwise will return +// nil and the error +func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + return f.Put(ctx, in, src, options...) +} + +type chunkWriter struct { + sdk.FileUpload + filen *sdk.Filen + bucketAndRegion chan client.V3UploadResponse + chunkSize int64 + + chunksLock sync.Mutex + knownChunks map[int][]byte // known chunks to be hashed + nextChunkToHash int + + sizeLock sync.Mutex + size int64 +} + +func (cw *chunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (bytesWritten int64, err error) { + realChunkNumber := int(int64(chunkNumber) * (cw.chunkSize) / sdk.ChunkSize) + chunk := make([]byte, sdk.ChunkSize, sdk.ChunkSize+cw.EncryptionKey.Cipher.Overhead()) + + totalWritten := int64(0) + for sliceStart := 0; sliceStart < int(cw.chunkSize); sliceStart += sdk.ChunkSize { + chunk = chunk[:sdk.ChunkSize] + chunkRead := 0 + for { + read, err := reader.Read(chunk[chunkRead:]) + chunkRead += read + if err == io.EOF || chunkRead == sdk.ChunkSize { + break + } + if err != nil { + return 0, err + } + } + if chunkRead == 0 { + break + } + chunkReadSlice := chunk[:chunkRead] + err = func() error { + cw.chunksLock.Lock() + defer cw.chunksLock.Unlock() + if cw.nextChunkToHash == realChunkNumber { + _, err := cw.Hasher.Write(chunkReadSlice) + if err != nil { + return err + } + cw.nextChunkToHash++ + for ; ; cw.nextChunkToHash++ { + chunk := cw.knownChunks[cw.nextChunkToHash] + if chunk == nil { + break + } + _, err := cw.Hasher.Write(chunk) + if err != nil { + return err + } + delete(cw.knownChunks, cw.nextChunkToHash) + } + } else { + chunkCopy := make([]byte, len(chunkReadSlice)) + copy(chunkCopy, chunkReadSlice) + cw.knownChunks[realChunkNumber] = chunkCopy + } + return nil + }() + if err != nil { + return totalWritten, err + } + resp, err := cw.filen.UploadChunk(ctx, &cw.FileUpload, realChunkNumber, chunkReadSlice) + select { // only care about getting this once + case cw.bucketAndRegion <- *resp: + default: + } + if err != nil { + return totalWritten, err + } + totalWritten += int64(len(chunkReadSlice)) + realChunkNumber++ + } + + cw.sizeLock.Lock() + cw.size += totalWritten + cw.sizeLock.Unlock() + return totalWritten, nil +} + +func (cw *chunkWriter) Close(ctx context.Context) error { + cw.chunksLock.Lock() + defer close(cw.bucketAndRegion) + defer cw.chunksLock.Unlock() + cw.sizeLock.Lock() + size := cw.size + cw.sizeLock.Unlock() + if len(cw.knownChunks) != 0 { + return errors.New("not all chunks have been hashed") + } + _, err := cw.filen.CompleteFileUpload(ctx, &cw.FileUpload, cw.bucketAndRegion, size) + return err +} + +func (cw *chunkWriter) Abort(ctx context.Context) error { + return nil +} + +// OpenChunkWriter returns the chunk size and a ChunkWriter +// +// Pass in the remote and the src object +// You can also use options to hint at the desired chunk size +func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectInfo, options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) { + path := f.Enc.FromStandardPath(remote) + resolvedPath := f.resolvePath(path) + modTime := src.ModTime(ctx) + + chunkSize := int64(sdk.ChunkSize) + for _, option := range options { + switch x := option.(type) { + case *fs.ChunkOption: + chunkSize = x.ChunkSize + default: + if option.Mandatory() { + fs.Logf(option, "Unsupported mandatory option: %v", option) + } + } + } + + if chunkSize%sdk.ChunkSize != 0 { + return info, nil, errors.New("chunk size must be a multiple of 1MB") + } + + info = fs.ChunkWriterInfo{ + ChunkSize: chunkSize, + Concurrency: f.concurrency, + LeavePartsOnError: false, + } + + parent, err := f.filen.FindDirectoryOrCreate(ctx, pathModule.Dir(resolvedPath)) + if err != nil { + return info, nil, err + } + incompleteFile, err := types.NewIncompleteFile(f.filen.FileEncryptionVersion, pathModule.Base(resolvedPath), fs.MimeType(ctx, src), modTime, modTime, parent) + if err != nil { + return info, nil, err + } + // unused + fu := f.filen.NewFileUpload(incompleteFile) + return info, &chunkWriter{ + FileUpload: *fu, + filen: f.filen, + chunkSize: chunkSize, + bucketAndRegion: make(chan client.V3UploadResponse, 1), + knownChunks: make(map[int][]byte), + nextChunkToHash: 0, + size: 0, + }, nil +} + +// Mkdir makes the directory (container, bucket) +// +// Shouldn't return an error if it already exists +func (f *Fs) Mkdir(ctx context.Context, dir string) error { + dirObj, err := f.filen.FindDirectoryOrCreate(ctx, f.resolvePath(f.Enc.FromStandardPath(dir))) + if err != nil { + return err + } + if dir == f.root.path { + f.root.directory = dirObj + } + return nil +} + +// Rmdir removes the directory (container, bucket) if empty +// +// Return an error if it doesn't exist or isn't empty +func (f *Fs) Rmdir(ctx context.Context, dir string) error { + // find directory + resolvedPath := f.resolvePath(f.Enc.FromStandardPath(dir)) + //if resolvedPath == f.root.path { + // return fs.ErrorDirNotFound + //} + directory, err := f.filen.FindDirectory(ctx, resolvedPath) + if err != nil { + return err + } + if directory == nil { + return errors.New("directory not found") + } + + files, dirs, err := f.filen.ReadDirectory(ctx, directory) + if err != nil { + return err + } + if len(files) > 0 || len(dirs) > 0 { + return errors.New("directory is not empty") + } + + // trash directory + err = f.filen.TrashDirectory(ctx, directory) + if err != nil { + return err + } + return nil +} + +// Directory is Filen's directory type +type Directory struct { + fs *Fs + path string + directory types.DirectoryInterface +} + +// Fs returns read only access to the Fs that this object is part of +func (dir *Directory) Fs() fs.Info { + return dir.fs +} + +// String returns a description of the Object +func (dir *Directory) String() string { + if dir == nil { + return "" + } + return dir.Remote() +} + +// Remote returns the remote path +func (dir *Directory) Remote() string { + return dir.fs.Enc.ToStandardPath(dir.path) +} + +// ModTime returns the modification date of the file +// It should return a best guess if one isn't available +func (dir *Directory) ModTime(ctx context.Context) time.Time { + directory, ok := dir.directory.(*types.Directory) + if !ok { + return time.Time{} // todo add account creation time? + } + + if directory.Created.IsZero() { + obj, err := dir.fs.filen.FindDirectory(ctx, dir.fs.resolvePath(dir.path)) + newDir, ok := obj.(*types.Directory) + if err != nil || !ok { + return time.Now() + } + directory = newDir + dir.directory = newDir + } + return directory.Created +} + +// Size returns the size of the file +// +// filen doesn't have an efficient way to find the size of a directory +func (dir *Directory) Size() int64 { + return -1 +} + +// Items returns the count of items in this directory or this +// directory and subdirectories if known, -1 for unknown +func (dir *Directory) Items() int64 { + return -1 +} + +// ID returns the internal ID of this directory if known, or +// "" otherwise +func (dir *Directory) ID() string { + return dir.directory.GetUUID() +} + +// Object is Filen's normal file +type Object struct { + fs *Fs + path string + file *types.File + isMoved bool +} + +// Fs returns read only access to the Fs that this object is part of +func (o *Object) Fs() fs.Info { + return o.fs +} + +// String returns a description of the Object +func (o *Object) String() string { + if o == nil { + return "" + } + return o.Remote() +} + +// Remote returns the remote path +func (o *Object) Remote() string { + return o.fs.Enc.ToStandardPath(o.path) +} + +// ModTime returns the modification date of the file +// It should return a best guess if one isn't available +func (o *Object) ModTime(ctx context.Context) time.Time { + if o.file.LastModified.IsZero() { + newFile, err := o.fs.filen.FindFile(ctx, o.fs.resolvePath(o.path)) + if err == nil && newFile != nil { + o.file = newFile + } + } + return o.file.LastModified +} + +// Size returns the size of the file +func (o *Object) Size() int64 { + return o.file.Size +} + +// Hash returns the selected checksum of the file +// If no checksum is available it returns "" +func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) { + if ty != hash.SHA512 { + return "", hash.ErrUnsupported + } + if o.file.Hash == "" { + foundFile, err := o.fs.filen.FindFile(ctx, o.fs.resolvePath(o.path)) + if err != nil { + return "", err + } + if foundFile == nil { + return "", fs.ErrorObjectNotFound + } + o.file = foundFile + } + return o.file.Hash, nil +} + +// Storable says whether this object can be stored +func (o *Object) Storable() bool { + return true +} + +// SetModTime sets the metadata on the object to set the modification date +func (o *Object) SetModTime(ctx context.Context, t time.Time) error { + o.file.LastModified = t + return o.fs.filen.UpdateMeta(ctx, o.file) +} + +// Open opens the file for read. Call Close() on the returned io.ReadCloser +func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) { + fs.FixRangeOption(options, o.Size()) + // Create variables to hold our options + var offset int64 + var limit int64 = -1 // -1 means no limit + + // Parse the options + for _, option := range options { + switch opt := option.(type) { + case *fs.RangeOption: + offset = opt.Start + limit = opt.End + 1 // +1 because End is inclusive + case *fs.SeekOption: + offset = opt.Offset + default: + if option.Mandatory() { + fs.Logf(option, "Unsupported mandatory option: %v", option) + } + } + } + + // Get the base reader + readCloser := o.fs.filen.GetDownloadReaderWithOffset(ctx, o.file, offset, limit) + return readCloser, nil +} + +// Update in to the object with the modTime given of the given size +// +// When called from outside an Fs by rclone, src.Size() will always be >= 0. +// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either +// return an error or update the object properly (rather than e.g. calling panic). +func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { + for _, option := range options { + if option.Mandatory() { + fs.Logf(option, "Unsupported mandatory option: %v", option) + } + } + newModTime := src.ModTime(ctx) + newIncomplete, err := o.file.NewFromBase(o.fs.filen.FileEncryptionVersion) + if err != nil { + return err + } + newIncomplete.LastModified = newModTime + newIncomplete.Created = newModTime + newIncomplete.SetMimeType(fs.MimeType(ctx, src)) + uploadedFile, err := o.fs.filen.UploadFile(ctx, newIncomplete, in) + if err != nil { + return err + } + o.file = uploadedFile + return nil +} + +// Remove this object +func (o *Object) Remove(ctx context.Context) error { + if o.isMoved { // doesn't exist at this path + return nil + } + err := o.fs.filen.TrashFile(ctx, *o.file) + if err != nil { + return err + } + return nil +} + +// MimeType returns the content type of the Object if +// known, or "" if not +func (o *Object) MimeType(_ context.Context) string { + return o.file.MimeType +} + +// ID returns the ID of the Object if known, or "" if not +func (o *Object) ID() string { + return o.file.GetUUID() +} + +// ParentID returns the ID of the parent directory if known or nil if not +func (o *Object) ParentID() string { + return o.file.GetParent() +} + +// Purge all files in the directory specified +// +// Implement this if you have a way of deleting all the files +// quicker than just running Remove() on the result of List() +// +// Return an error if it doesn't exist +func (f *Fs) Purge(ctx context.Context, dir string) error { + path := f.resolvePath(f.Enc.FromStandardPath(dir)) + foundDir, err := f.filen.FindDirectory(ctx, path) + if err != nil { + return err + } else if foundDir == nil { + return fs.ErrorDirNotFound + } + return f.filen.TrashDirectory(ctx, foundDir) +} + +// Move src to this remote using server-side move operations. +// +// # This is stored with the remote path given +// +// # It returns the destination Object and a possible error +// +// Will only be called if src.Fs().Name() == f.Name() +// +// If it isn't possible then return fs.ErrorCantMove +func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { + obj, ok := src.(*Object) + if !ok { + return nil, fmt.Errorf("can't move %T: %w", src, fs.ErrorCantMove) + } + newRemote := f.Enc.FromStandardPath(remote) + oldPath, newPath := obj.fs.resolvePath(f.Enc.FromStandardPath(src.Remote())), f.resolvePath(newRemote) + oldParentPath, newParentPath := pathModule.Dir(oldPath), pathModule.Dir(newPath) + oldName, newName := pathModule.Base(oldPath), pathModule.Base(newPath) + if oldPath == newPath { + return nil, fs.ErrorCantMove + } + err := f.filen.Lock(ctx) + if err != nil { + return nil, err + } + defer f.filen.Unlock() + if oldParentPath == newParentPath { + err = f.rename(ctx, obj.file, newPath, newName) + } else if newName == oldName { + err = f.move(ctx, obj.file, newPath, newParentPath) + } else { + err = f.moveWithRename(ctx, obj.file, oldPath, oldName, newPath, newParentPath, newName) + } + if err != nil { + return nil, err + } + return moveFileObjIntoNewPath(obj, newRemote), nil +} + +// moveWithRename moves item to newPath +// using a more complex set of operations designed to handle the fact that +// Filen doesn't support a single moveRename operation +// which requires some annoying hackery to get around reliably +func (f *Fs) moveWithRename(ctx context.Context, item types.NonRootFileSystemObject, oldPath, oldName, newPath, newParentPath, newName string) error { + g, gCtx := errgroup.WithContext(ctx) + var ( + newParentDir types.DirectoryInterface + renamedToUUID bool + ) + + // rename to random UUID first + g.Go(func() error { + err := f.filen.Rename(gCtx, item, uuid.NewString()) + if err != nil { + return fmt.Errorf("failed to rename file: %w : %w", err, fs.ErrorCantMove) + } + renamedToUUID = true + return nil + }) + defer func() { + // safety to try and not leave the item in a bad state + if renamedToUUID { + err := f.filen.Rename(ctx, item, oldName) + if err != nil { + fmt.Printf("ERROR: FAILED TO REVERT UUID RENAME for file %s: %s", oldPath, err) + } + } + }() + + // find parent dir + g.Go(func() error { + var err error + newParentDir, err = f.filen.FindDirectoryOrCreate(gCtx, newParentPath) + return err + }) + + if err := g.Wait(); err != nil { + return err + } + + // move + oldParentUUID := item.GetParent() + err := f.filen.MoveItem(ctx, item, newParentDir.GetUUID(), true) + if err != nil { + return fmt.Errorf("failed to move file: %w : %w", err, fs.ErrorCantMove) + } + defer func() { + // safety to try and not leave the item in a bad state + if renamedToUUID { + err := f.filen.MoveItem(ctx, item, oldParentUUID, true) + if err != nil { + fmt.Printf("ERROR: FAILED TO REVERT MOVE for file %s: %s", oldPath, err) + } + } + }() + + // rename to final name + err = f.filen.Rename(ctx, item, newName) + if err != nil { + return fmt.Errorf("failed to rename file: %w : %w", err, fs.ErrorCantMove) + } + renamedToUUID = false + + return nil +} + +// move moves item to newPath +// by finding the parent and calling moveWithParentUUID +func (f *Fs) move(ctx context.Context, item types.NonRootFileSystemObject, newPath, newParentPath string) error { + newParentDir, err := f.filen.FindDirectoryOrCreate(ctx, newParentPath) + if err != nil { + return fmt.Errorf("failed to find or create directory: %w : %w", err, fs.ErrorCantMove) + } + return f.moveWithParentUUID(ctx, item, newParentDir.GetUUID()) +} + +// moveWithParentUUID moves item to newParentUUID +// using a simple filen.MoveItem operation +func (f *Fs) moveWithParentUUID(ctx context.Context, item types.NonRootFileSystemObject, newParentUUID string) error { + err := f.filen.MoveItem(ctx, item, newParentUUID, true) + if err != nil { + return fmt.Errorf("failed to move file: %w : %w", err, fs.ErrorCantMove) + } + + return nil +} + +// rename moves item to newPath +// using a simple Filen rename operation +func (f *Fs) rename(ctx context.Context, item types.NonRootFileSystemObject, newPath string, newName string) error { + err := f.filen.Rename(ctx, item, newName) + if err != nil { + return fmt.Errorf("failed to rename item: %w : %w", err, fs.ErrorCantMove) + } + return nil +} + +// moveFileObjIntoNewPath 'moves' an existing object into a new path +// invalidating the previous object +// and making a copy with the passed path +// +// this is to work around the fact that rclone expects to have to delete a file after moving +func moveFileObjIntoNewPath(o *Object, newPath string) *Object { + newFile := &Object{ + fs: o.fs, + path: newPath, + file: o.file, + } + o.isMoved = true + return newFile +} + +// DirMove moves src, srcRemote to this remote at dstRemote +// using server-side move operations. +// +// Will only be called if src.Fs().Name() == f.Name() +// +// If it isn't possible then return fs.ErrorCantDirMove +// +// If destination exists then return fs.ErrorDirExists +func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { + + srcF, ok := src.(*Fs) + if !ok || srcF == nil { + return fs.ErrorCantDirMove + } + err := f.filen.Lock(ctx) + if err != nil { + return err + } + defer f.filen.Unlock() + g, gCtx := errgroup.WithContext(ctx) + var ( + srcDirInt types.DirectoryInterface + dstDir types.DirectoryInterface + srcPath = srcF.resolvePath(srcF.Enc.FromStandardPath(srcRemote)) + dstPath = f.resolvePath(f.Enc.FromStandardPath(dstRemote)) + ) + if srcPath == dstPath { + return fs.ErrorDirExists + } + + g.Go(func() error { + var err error + srcDirInt, err = srcF.filen.FindDirectory(gCtx, srcPath) + return err + }) + g.Go(func() error { + var err error + dstDir, err = f.filen.FindDirectory(gCtx, dstPath) + return err + }) + + if err := g.Wait(); err != nil { + return err + } + + if srcDirInt == nil { + return fs.ErrorDirNotFound + } + + if dstDir != nil { + return fs.ErrorDirExists + } + + srcDir, ok := srcDirInt.(*types.Directory) + if !ok { + return fs.ErrorCantDirMove + } + + return f.dirMoveEntireDir(ctx, srcDir, srcPath, dstPath) +} + +// dirMoveEntireDir moves srcDir to newPath +// used for the case where the target directory doesn't exist +func (f *Fs) dirMoveEntireDir(ctx context.Context, srcDir *types.Directory, oldPath string, newPath string) error { + oldParentPath, newParentPath := pathModule.Dir(oldPath), pathModule.Dir(newPath) + oldName, newName := pathModule.Base(oldPath), pathModule.Base(newPath) + var err error + if oldPath == newPath { + return fs.ErrorDirExists + } else if oldParentPath == newParentPath { + err = f.rename(ctx, srcDir, newPath, newName) + } else if newName == oldName { + err = f.move(ctx, srcDir, newPath, newParentPath) + } else { + err = f.moveWithRename(ctx, srcDir, oldPath, oldName, newPath, newParentPath, newName) + } + if err != nil { + return err + } + return err +} + +// ListR lists the objects and directories of the Fs starting +// from dir recursively into out. +// +// dir should be "" to start from the root, and should not +// have trailing slashes. +// +// This should return ErrDirNotFound if the directory isn't +// found. +// +// It should call callback for each tranche of entries read. +// These need not be returned in any particular order. If +// callback returns an error then the listing will stop +// immediately. +// +// Don't implement this unless you have a more efficient way +// of listing recursively that doing a directory traversal. +func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { + basePath := f.Enc.FromStandardPath(dir) + path := f.resolvePath(basePath) + foundDir, err := f.filen.FindDirectory(ctx, path) + if err != nil { + return err + } + if foundDir == nil { + return fs.ErrorDirNotFound + } + + files, dirs, err := f.filen.ListRecursive(ctx, foundDir) + if err != nil { + return err + } + listHelper := list.NewHelper(callback) + // have to build paths + uuidDirMap, uuidPathMap := buildUUIDDirMaps(basePath, foundDir, dirs) + + for _, dir := range dirs { + path, err := getPathForUUID(dir.GetUUID(), uuidPathMap, uuidDirMap) + if err != nil { + return err + } + err = listHelper.Add(&Directory{ + fs: f, + directory: dir, + path: path, + }) + if err != nil { + return err + } + } + + for _, file := range files { + parentPath, err := getPathForUUID(file.GetParent(), uuidPathMap, uuidDirMap) + if err != nil { + return err + } + err = listHelper.Add(&Object{ + fs: f, + file: file, + path: pathModule.Join(parentPath, file.GetName()), + }) + if err != nil { + return err + } + } + return listHelper.Flush() +} + +func buildUUIDDirMaps(rootPath string, rootDir types.DirectoryInterface, dirs []*types.Directory) (map[string]types.DirectoryInterface, map[string]string) { + uuidPathMap := make(map[string]string, len(dirs)+1) + uuidPathMap[rootDir.GetUUID()] = rootPath + + uuidDirMap := make(map[string]types.DirectoryInterface, len(dirs)+1) + uuidDirMap[rootDir.GetUUID()] = rootDir + for _, dir := range dirs { + uuidDirMap[dir.GetUUID()] = dir + } + return uuidDirMap, uuidPathMap +} + +func getPathForUUID(uuid string, uuidPathMap map[string]string, uuidDirMap map[string]types.DirectoryInterface) (string, error) { + if path, ok := uuidPathMap[uuid]; ok { + return path, nil + } + dir, ok := uuidDirMap[uuid] + if !ok { + return "", fs.ErrorDirNotFound + } + parentPath, err := getPathForUUID(dir.GetParent(), uuidPathMap, uuidDirMap) + if err != nil { + return "", err + } + path := pathModule.Join(parentPath, dir.GetName()) + uuidPathMap[uuid] = path + return path, nil +} + +// About gets quota information from the Fs +func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { + userInfo, err := f.filen.GetUserInfo(ctx) + if err != nil { + return nil, err + } + + total := int64(userInfo.MaxStorage) + used := int64(userInfo.UsedStorage) + free := total - used + return &fs.Usage{ + Total: &total, + Used: &used, + Trashed: nil, + Other: nil, + Free: &free, + Objects: nil, + }, nil +} + +// CleanUp the trash in the Fs +func (f *Fs) CleanUp(ctx context.Context) error { + // not sure if this is implemented correctly, since this trashes ALL trash + // not just the trash in the currently mounted fs + // not currently wiping file versions because that feels dangerous + // especially since versioning can be toggled on/off + return f.filen.EmptyTrash(ctx) +} + +// helpers + +// resolvePath returns the absolute path specified by the input path, which is seen relative to the remote's root. +func (f *Fs) resolvePath(path string) string { + return pathModule.Join(f.root.path, path) +} + +// Check the interfaces are satisfied +var ( + _ fs.Fs = &Fs{} + _ fs.Mover = &Fs{} + _ fs.DirMover = &Fs{} + _ fs.Purger = &Fs{} + _ fs.PutStreamer = &Fs{} + _ fs.CleanUpper = &Fs{} + _ fs.ListRer = &Fs{} + _ fs.Abouter = &Fs{} + _ fs.OpenChunkWriter = &Fs{} + _ fs.Directory = &Directory{} + _ fs.Object = &Object{} + _ fs.MimeTyper = &Object{} + _ fs.IDer = &Object{} + _ fs.ParentIDer = &Object{} + _ fs.ChunkWriter = &chunkWriter{} +) + +// todo PublicLinker, +// we could technically implement ChangeNotifier, but +// 1) the current implementation on Filen's side isn't great, it's worth waiting until SSE +// 2) I'm not really clear that the benefits are so great +// a bunch of the information would get wasted, since the Filen does actually specify exact updates, +// whereas rclone seems to only accept a path and object type diff --git a/backend/filen/filen_test.go b/backend/filen/filen_test.go new file mode 100644 index 0000000000000..808456274278d --- /dev/null +++ b/backend/filen/filen_test.go @@ -0,0 +1,14 @@ +package filen + +import ( + "testing" + + "github.com/rclone/rclone/fstest/fstests" +) + +func TestIntegration(t *testing.T) { + fstests.Run(t, &fstests.Opt{ + RemoteName: "TestFilen:", + NilObject: (*Object)(nil), + }) +} diff --git a/backend/ftp/ftp.go b/backend/ftp/ftp.go index fe666fb710df4..5b73801b059ec 100644 --- a/backend/ftp/ftp.go +++ b/backend/ftp/ftp.go @@ -204,6 +204,12 @@ Example: Help: `URL for HTTP CONNECT proxy Set this to a URL for an HTTP proxy which supports the HTTP CONNECT verb. + +Supports the format http://user:pass@host:port, http://host:port, http://host. + +Example: + + http://myUser:myPass@proxyhostname.example.com:8000 `, Advanced: true, }, { @@ -892,7 +898,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e resultchan := make(chan []*ftp.Entry, 1) errchan := make(chan error, 1) - go func() { + go func(c *ftp.ServerConn) { result, err := c.List(f.dirFromStandardPath(path.Join(f.root, dir))) f.putFtpConnection(&c, err) if err != nil { @@ -900,7 +906,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e return } resultchan <- result - }() + }(c) // Wait for List for up to Timeout seconds timer := time.NewTimer(f.ci.TimeoutOrInfinite()) diff --git a/backend/googlecloudstorage/googlecloudstorage.go b/backend/googlecloudstorage/googlecloudstorage.go index 449e371bc019f..3c9062be775d0 100644 --- a/backend/googlecloudstorage/googlecloudstorage.go +++ b/backend/googlecloudstorage/googlecloudstorage.go @@ -346,9 +346,26 @@ can't check the size and hash but the file contents will be decompressed. Advanced: true, Default: false, }, { - Name: "endpoint", - Help: "Endpoint for the service.\n\nLeave blank normally.", + Name: "endpoint", + Help: `Custom endpoint for the storage API. Leave blank to use the provider default. + +When using a custom endpoint that includes a subpath (e.g. example.org/custom/endpoint), +the subpath will be ignored during upload operations due to a limitation in the +underlying Google API Go client library. +Download and listing operations will work correctly with the full endpoint path. +If you require subpath support for uploads, avoid using subpaths in your custom +endpoint configuration.`, Advanced: true, + Examples: []fs.OptionExample{{ + Value: "storage.example.org", + Help: "Specify a custom endpoint", + }, { + Value: "storage.example.org:4443", + Help: "Specifying a custom endpoint with port", + }, { + Value: "storage.example.org:4443/gcs/api", + Help: "Specifying a subpath, see the note, uploads won't use the custom path!", + }}, }, { Name: config.ConfigEncoding, Help: config.ConfigEncodingHelp, diff --git a/backend/imagekit/client/upload.go b/backend/imagekit/client/upload.go index 7964f8c460349..630f068c47e13 100644 --- a/backend/imagekit/client/upload.go +++ b/backend/imagekit/client/upload.go @@ -72,7 +72,7 @@ func (ik *ImageKit) Upload(ctx context.Context, file io.Reader, param UploadPara response := &UploadResult{} - formReader, contentType, _, err := rest.MultipartUpload(ctx, file, formParams, "file", param.FileName) + formReader, contentType, _, err := rest.MultipartUpload(ctx, file, formParams, "file", param.FileName, "application/octet-stream") if err != nil { return nil, nil, fmt.Errorf("failed to make multipart upload: %w", err) diff --git a/backend/internxt/auth.go b/backend/internxt/auth.go new file mode 100644 index 0000000000000..9ab3a64d2c384 --- /dev/null +++ b/backend/internxt/auth.go @@ -0,0 +1,319 @@ +// Authentication handling for Internxt +package internxt + +import ( + "context" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "errors" + "fmt" + "net" + "net/http" + "time" + + "github.com/golang-jwt/jwt/v5" + internxtauth "github.com/internxt/rclone-adapter/auth" + internxtconfig "github.com/internxt/rclone-adapter/config" + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/config/configmap" + "github.com/rclone/rclone/fs/config/obscure" + "github.com/rclone/rclone/lib/oauthutil" + "github.com/tyler-smith/go-bip39" + "golang.org/x/oauth2" +) + +const ( + driveWebURL = "https://drive.internxt.com" + defaultLocalPort = "53682" + bindAddress = "127.0.0.1:" + defaultLocalPort + tokenExpiry2d = 48 * time.Hour +) + +// authResult holds the result from the SSO callback +type authResult struct { + mnemonic string + token string + err error +} + +// authServer handles the local HTTP callback for SSO login +type authServer struct { + listener net.Listener + server *http.Server + result chan authResult +} + +// newAuthServer creates a new local auth callback server +func newAuthServer() (*authServer, error) { + listener, err := net.Listen("tcp", bindAddress) + if err != nil { + return nil, fmt.Errorf("failed to start auth server on %s: %w", bindAddress, err) + } + + s := &authServer{ + listener: listener, + result: make(chan authResult, 1), + } + + mux := http.NewServeMux() + mux.HandleFunc("/", s.handleCallback) + s.server = &http.Server{Handler: mux} + + return s, nil +} + +// start begins serving requests in a goroutine +func (s *authServer) start() { + go func() { + err := s.server.Serve(s.listener) + if err != nil && err != http.ErrServerClosed { + s.result <- authResult{err: err} + } + }() +} + +// stop gracefully shuts down the server +func (s *authServer) stop() { + if s.server != nil { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + _ = s.server.Shutdown(ctx) + } +} + +// handleCallback processes the SSO callback with mnemonic and token +func (s *authServer) handleCallback(w http.ResponseWriter, r *http.Request) { + query := r.URL.Query() + mnemonicB64 := query.Get("mnemonic") + tokenB64 := query.Get("newToken") + + // Helper to redirect and report error + redirectWithError := func(err error) { + http.Redirect(w, r, driveWebURL+"/auth-link-error", http.StatusFound) + s.result <- authResult{err: err} + } + + if mnemonicB64 == "" || tokenB64 == "" { + redirectWithError(errors.New("missing mnemonic or token in callback")) + return + } + + mnemonicBytes, err := base64.StdEncoding.DecodeString(mnemonicB64) + if err != nil { + redirectWithError(fmt.Errorf("failed to decode mnemonic: %w", err)) + return + } + + // Validate that the mnemonic is a valid BIP39 mnemonic + mnemonic := string(mnemonicBytes) + if !bip39.IsMnemonicValid(mnemonic) { + redirectWithError(errors.New("mnemonic is not a valid BIP39 mnemonic")) + return + } + + tokenBytes, err := base64.StdEncoding.DecodeString(tokenB64) + if err != nil { + redirectWithError(fmt.Errorf("failed to decode token: %w", err)) + return + } + + cfg := internxtconfig.NewDefaultToken(string(tokenBytes)) + resp, err := internxtauth.RefreshToken(r.Context(), cfg) + if err != nil { + redirectWithError(fmt.Errorf("failed to refresh token: %w", err)) + return + } + + if resp.NewToken == "" { + redirectWithError(errors.New("refresh response missing newToken")) + return + } + + http.Redirect(w, r, driveWebURL+"/auth-link-ok", http.StatusFound) + + s.result <- authResult{ + mnemonic: mnemonic, + token: resp.NewToken, + } +} + +// doAuth performs the interactive SSO authentication +func doAuth(ctx context.Context) (token, mnemonic string, err error) { + server, err := newAuthServer() + if err != nil { + return "", "", err + } + defer server.stop() + + server.start() + + callbackURL := "http://" + bindAddress + "/" + callbackB64 := base64.StdEncoding.EncodeToString([]byte(callbackURL)) + authURL := fmt.Sprintf("%s/login?universalLink=true&redirectUri=%s", driveWebURL, callbackB64) + + fs.Logf(nil, "") + fs.Logf(nil, "If your browser doesn't open automatically, visit this URL:") + fs.Logf(nil, "%s", authURL) + fs.Logf(nil, "") + fs.Logf(nil, "Log in and authorize rclone for access") + fs.Logf(nil, "Waiting for authentication...") + + if err = oauthutil.OpenURL(authURL); err != nil { + fs.Errorf(nil, "Failed to open browser: %v", err) + fs.Logf(nil, "Please manually open the URL above in your browser") + } + + select { + case result := <-server.result: + if result.err != nil { + return "", "", result.err + } + + fs.Logf(nil, "Authentication successful!") + return result.token, result.mnemonic, nil + + case <-ctx.Done(): + return "", "", fmt.Errorf("authentication cancelled: %w", ctx.Err()) + + case <-time.After(5 * time.Minute): + return "", "", errors.New("authentication timeout after 5 minutes") + } +} + +type userInfo struct { + RootFolderID string + Bucket string + BridgeUser string + UserID string +} + +type userInfoConfig struct { + Token string +} + +// getUserInfo fetches user metadata from the refresh endpoint +func getUserInfo(ctx context.Context, cfg *userInfoConfig) (*userInfo, error) { + // Call the refresh endpoint to get all user metadata + refreshCfg := internxtconfig.NewDefaultToken(cfg.Token) + resp, err := internxtauth.RefreshToken(ctx, refreshCfg) + if err != nil { + return nil, fmt.Errorf("failed to fetch user info: %w", err) + } + + if resp.User.Bucket == "" { + return nil, errors.New("API response missing user.bucket") + } + if resp.User.RootFolderID == "" { + return nil, errors.New("API response missing user.rootFolderId") + } + if resp.User.BridgeUser == "" { + return nil, errors.New("API response missing user.bridgeUser") + } + if resp.User.UserID == "" { + return nil, errors.New("API response missing user.userId") + } + + info := &userInfo{ + RootFolderID: resp.User.RootFolderID, + Bucket: resp.User.Bucket, + BridgeUser: resp.User.BridgeUser, + UserID: resp.User.UserID, + } + + fs.Debugf(nil, "User info: rootFolderId=%s, bucket=%s", + info.RootFolderID, info.Bucket) + + return info, nil +} + +// parseJWTExpiry extracts the expiry time from a JWT token string +func parseJWTExpiry(tokenString string) (time.Time, error) { + parser := jwt.NewParser(jwt.WithoutClaimsValidation()) + token, _, err := parser.ParseUnverified(tokenString, jwt.MapClaims{}) + if err != nil { + return time.Time{}, fmt.Errorf("failed to parse token: %w", err) + } + + claims, ok := token.Claims.(jwt.MapClaims) + if !ok { + return time.Time{}, errors.New("invalid token claims") + } + + exp, ok := claims["exp"].(float64) + if !ok { + return time.Time{}, errors.New("token missing expiration") + } + + return time.Unix(int64(exp), 0), nil +} + +// jwtToOAuth2Token converts a JWT string to an oauth2.Token with expiry +func jwtToOAuth2Token(jwtString string) (*oauth2.Token, error) { + expiry, err := parseJWTExpiry(jwtString) + if err != nil { + return nil, err + } + + return &oauth2.Token{ + AccessToken: jwtString, + TokenType: "Bearer", + Expiry: expiry, + }, nil +} + +// computeBasicAuthHeader creates the BasicAuthHeader for bucket operations +// Following the pattern from SDK's auth/access.go:96-102 +func computeBasicAuthHeader(bridgeUser, userID string) string { + sum := sha256.Sum256([]byte(userID)) + hexPass := hex.EncodeToString(sum[:]) + creds := fmt.Sprintf("%s:%s", bridgeUser, hexPass) + return "Basic " + base64.StdEncoding.EncodeToString([]byte(creds)) +} + +// refreshJWTToken refreshes the token using Internxt's refresh endpoint +func refreshJWTToken(ctx context.Context, name string, m configmap.Mapper) error { + currentToken, err := oauthutil.GetToken(name, m) + if err != nil { + return fmt.Errorf("failed to get current token: %w", err) + } + + mnemonic, ok := m.Get("mnemonic") + if !ok || mnemonic == "" { + return errors.New("mnemonic is missing from configuration") + } + + // Reveal the obscured mnemonic + mnemonic, err = obscure.Reveal(mnemonic) + if err != nil { + return fmt.Errorf("failed to reveal mnemonic: %w", err) + } + + cfg := internxtconfig.NewDefaultToken(currentToken.AccessToken) + resp, err := internxtauth.RefreshToken(ctx, cfg) + if err != nil { + return fmt.Errorf("refresh request failed: %w", err) + } + + if resp.NewToken == "" { + return errors.New("refresh response missing newToken") + } + + // Convert JWT to oauth2.Token format + token, err := jwtToOAuth2Token(resp.NewToken) + if err != nil { + return fmt.Errorf("failed to parse refreshed token: %w", err) + } + + err = oauthutil.PutToken(name, m, token, false) + if err != nil { + return fmt.Errorf("failed to save token: %w", err) + } + + if resp.User.Bucket != "" { + m.Set("bucket", resp.User.Bucket) + } + + fs.Debugf(name, "Token refreshed successfully, new expiry: %v", token.Expiry) + return nil +} diff --git a/backend/internxt/internxt.go b/backend/internxt/internxt.go index 4101e51c4483b..1387fb48b9129 100644 --- a/backend/internxt/internxt.go +++ b/backend/internxt/internxt.go @@ -1,67 +1,170 @@ +// Package internxt provides an interface to Internxt's Drive API package internxt import ( + "bytes" "context" "errors" "fmt" "io" + "net" "path" + "path/filepath" "strings" "time" - "github.com/StarHack/go-internxt-drive/auth" - "github.com/StarHack/go-internxt-drive/buckets" - config "github.com/StarHack/go-internxt-drive/config" - "github.com/StarHack/go-internxt-drive/files" - "github.com/StarHack/go-internxt-drive/folders" + "github.com/internxt/rclone-adapter/buckets" + config "github.com/internxt/rclone-adapter/config" + sdkerrors "github.com/internxt/rclone-adapter/errors" + "github.com/internxt/rclone-adapter/files" + "github.com/internxt/rclone-adapter/folders" + "github.com/internxt/rclone-adapter/users" "github.com/rclone/rclone/fs" + rclone_config "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/obscure" + "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/lib/dircache" + "github.com/rclone/rclone/lib/encoder" + "github.com/rclone/rclone/lib/oauthutil" + "github.com/rclone/rclone/lib/pacer" + "github.com/rclone/rclone/lib/random" ) +const ( + minSleep = 10 * time.Millisecond + maxSleep = 2 * time.Second + decayConstant = 2 // bigger for slower decay, exponential +) + +// shouldRetry determines if an error should be retried +func shouldRetry(ctx context.Context, err error) (bool, error) { + if fserrors.ContextError(ctx, &err) { + return false, err + } + var httpErr *sdkerrors.HTTPError + if errors.As(err, &httpErr) && httpErr.StatusCode() == 401 { + return true, err + } + + return fserrors.ShouldRetry(err), err +} + // Register with Fs func init() { fs.Register(&fs.RegInfo{ Name: "internxt", - Description: "internxt", + Description: "Internxt Drive", NewFs: NewFs, + Config: Config, Options: []fs.Option{ { - Name: "email", - Default: "", - Help: "The email of the user to operate as.", - Sensitive: true, + Name: "skipHashValidation", + Default: true, + Advanced: true, + Help: "Skip hash validation when downloading files.\n\nBy default, hash validation is disabled. Set this to false to enable validation.", }, { - Name: "password", - Default: "", - Help: "The password for the user.", - IsPassword: true, + Name: rclone_config.ConfigEncoding, + Help: rclone_config.ConfigEncodingHelp, + Advanced: true, + Default: encoder.EncodeInvalidUtf8 | + encoder.EncodeSlash | + encoder.EncodeBackSlash | + encoder.EncodeRightPeriod | + encoder.EncodeDot | + encoder.EncodeCrLf, }, - }}) + }}, + ) +} + +// Config implements the interactive configuration flow +func Config(ctx context.Context, name string, m configmap.Mapper, configIn fs.ConfigIn) (*fs.ConfigOut, error) { + _, tokenOK := m.Get("token") + mnemonic, mnemonicOK := m.Get("mnemonic") + + switch configIn.State { + case "": + // Check if we already have valid credentials + if tokenOK && mnemonicOK && mnemonic != "" { + // Get oauth2.Token from config + oauthToken, err := oauthutil.GetToken(name, m) + if err != nil { + fs.Errorf(nil, "Failed to get token: %v", err) + return fs.ConfigGoto("auth") + } + + if time.Until(oauthToken.Expiry) < tokenExpiry2d { + fs.Logf(nil, "Token expires soon, attempting refresh...") + err := refreshJWTToken(ctx, name, m) + if err != nil { + fs.Errorf(nil, "Failed to refresh token: %v", err) + return fs.ConfigGoto("auth") + } + fs.Logf(nil, "Token refreshed successfully") + return nil, nil + } + + // Token is valid - complete config without re-auth prompt + fs.Logf(nil, "Existing credentials are valid") + return nil, nil + } + + return fs.ConfigGoto("auth") + + case "auth": + newToken, newMnemonic, err := doAuth(ctx) + if err != nil { + return nil, fmt.Errorf("authentication failed: %w", err) + } + + // Store mnemonic (obscured) + m.Set("mnemonic", obscure.MustObscure(newMnemonic)) + + // Store token in oauth2 format + oauthToken, err := jwtToOAuth2Token(newToken) + if err != nil { + return nil, fmt.Errorf("failed to create oauth2 token: %w", err) + } + + err = oauthutil.PutToken(name, m, oauthToken, true) + if err != nil { + return nil, fmt.Errorf("failed to save token: %w", err) + } + + fs.Logf(nil, "") + fs.Logf(nil, "Success! Authentication complete.") + fs.Logf(nil, "") + + return nil, nil + } + + return nil, fmt.Errorf("unknown state %q", configIn.State) } // Options holds configuration options for this interface type Options struct { - Endpoint string `flag:"endpoint" help:"API endpoint"` - Email string `flag:"email" help:"Internxt account email"` - Password string `flag:"password" help:"Internxt account password"` + Token string `config:"token"` + Mnemonic string `config:"mnemonic"` + Encoding encoder.MultiEncoder `config:"encoding"` + SkipHashValidation bool `config:"skipHashValidation"` } // Fs represents an Internxt remote type Fs struct { - name string - root string - opt Options - dirCache *dircache.DirCache - cfg *config.Config - loginResponse *auth.LoginResponse - accessResponse *auth.AccessResponse - rootIsFile bool - rootFile *folders.File + name string + root string + opt Options + dirCache *dircache.DirCache + cfg *config.Config + features *fs.Features + pacer *fs.Pacer + tokenRenewer *oauthutil.Renew + bridgeUser string + userID string } // Object holds the data for a remote file object @@ -85,7 +188,7 @@ func (f *Fs) String() string { return f.name + ":" + f.root } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { - return &fs.Features{ReadMetadata: false, CanHaveEmptyDirectories: true} + return f.features } // Hashes returns type of hashes supported by Internxt @@ -93,8 +196,10 @@ func (f *Fs) Hashes() hash.Set { return hash.NewHashSet() } -// Precision returns the precision of mtime that the server responds -func (f *Fs) Precision() time.Duration { return time.Microsecond } +// Precision return the precision of this Fs +func (f *Fs) Precision() time.Duration { + return fs.ModTimeNotSupported +} // NewFs constructs an Fs from the path func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { @@ -102,61 +207,105 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e if err := configstruct.Set(m, opt); err != nil { return nil, err } - clearPassword, err := obscure.Reveal(opt.Password) + + if opt.Mnemonic == "" { + return nil, errors.New("mnemonic is required - please run: rclone config reconnect " + name + ":") + } + + // Reveal the obscured mnemonic + var err error + opt.Mnemonic, err = obscure.Reveal(opt.Mnemonic) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to reveal mnemonic: %w", err) } - cfg := config.NewDefault(opt.Email, clearPassword) - loginResponse, err := auth.Login(cfg) + + oauthToken, err := oauthutil.GetToken(name, m) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to get token - please run: rclone config reconnect %s: - %w", name, err) } - accessResponse, err := auth.AccessLogin(cfg, loginResponse) + + oauthConfig := &oauthutil.Config{ + TokenURL: "https://gateway.internxt.com/drive/users/refresh", + } + + _, ts, err := oauthutil.NewClient(ctx, name, m, oauthConfig) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to create oauth client: %w", err) } + cfg := config.NewDefaultToken(oauthToken.AccessToken) + cfg.Mnemonic = opt.Mnemonic + cfg.SkipHashValidation = opt.SkipHashValidation + + userInfo, err := getUserInfo(ctx, &userInfoConfig{Token: cfg.Token}) + if err != nil { + return nil, fmt.Errorf("failed to fetch user info: %w", err) + } + + cfg.RootFolderID = userInfo.RootFolderID + cfg.Bucket = userInfo.Bucket + cfg.BasicAuthHeader = computeBasicAuthHeader(userInfo.BridgeUser, userInfo.UserID) + f := &Fs{ - name: name, - root: root, - opt: *opt, - cfg: cfg, - loginResponse: loginResponse, - accessResponse: accessResponse, + name: name, + root: strings.Trim(root, "/"), + opt: *opt, + cfg: cfg, + bridgeUser: userInfo.BridgeUser, + userID: userInfo.UserID, } - f.dirCache = dircache.New("", cfg.RootFolderID, f) + f.pacer = fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))) - if root != "" { - parent, leaf := path.Split(root) - parent = strings.Trim(parent, "/") - dirID, err := f.dirCache.FindDir(ctx, parent, false) - if err != nil { - return nil, err - } - files, err := folders.ListFiles(f.cfg, dirID, folders.ListOptions{}) - if err != nil { - return nil, err - } - for _, e := range files { - name := e.PlainName - if len(e.Type) > 0 { - name += "." + e.Type + f.features = (&fs.Features{ + CanHaveEmptyDirectories: true, + }).Fill(ctx, f) + + if ts != nil { + f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error { + err := refreshJWTToken(ctx, name, m) + if err != nil { + return err } - if name == leaf { - f.rootIsFile = true - f.rootFile = &e - break + + newToken, err := oauthutil.GetToken(name, m) + if err != nil { + return fmt.Errorf("failed to get refreshed token: %w", err) } + f.cfg.Token = newToken.AccessToken + f.cfg.BasicAuthHeader = computeBasicAuthHeader(f.bridgeUser, f.userID) + + return nil + }) + f.tokenRenewer.Start() + } + + f.dirCache = dircache.New(f.root, cfg.RootFolderID, f) + + err = f.dirCache.FindRoot(ctx, false) + if err != nil { + // Assume it might be a file + newRoot, remote := dircache.SplitPath(f.root) + tempF := *f + tempF.dirCache = dircache.New(newRoot, f.cfg.RootFolderID, &tempF) + tempF.root = newRoot + + err = tempF.dirCache.FindRoot(ctx, false) + if err != nil { + return f, nil } - if !f.rootIsFile { - folderID, err := f.dirCache.FindDir(ctx, root, true) - if err != nil { - return nil, err + _, err := tempF.NewObject(ctx, remote) + if err != nil { + if err == fs.ErrorObjectNotFound { + return f, nil } - f.dirCache = dircache.New("", folderID, f) + return nil, err } + + f.dirCache = tempF.dirCache + f.root = tempF.root + return f, fs.ErrorIsFile } return f, nil @@ -164,27 +313,65 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e // Mkdir creates a new directory func (f *Fs) Mkdir(ctx context.Context, dir string) error { - _, err := f.dirCache.FindDir(ctx, dir, true) - if err != nil && strings.Contains(err.Error(), `"statusCode":400`) { - return nil + id, err := f.dirCache.FindDir(ctx, dir, true) + if err != nil { + return err } - return err + + f.dirCache.Put(dir, id) + + return nil } // Rmdir removes a directory +// Returns an error if it isn't empty func (f *Fs) Rmdir(ctx context.Context, dir string) error { + root := path.Join(f.root, dir) + if root == "" { + return errors.New("cannot remove root directory") + } + id, err := f.dirCache.FindDir(ctx, dir, false) if err != nil { - return err + return fs.ErrorDirNotFound } - fmt.Println(id) + // Check if directory is empty + var childFolders []folders.Folder + err = f.pacer.Call(func() (bool, error) { + var err error + childFolders, err = folders.ListAllFolders(ctx, f.cfg, id) + return shouldRetry(ctx, err) + }) + if err != nil { + return err + } + if len(childFolders) > 0 { + return fs.ErrorDirectoryNotEmpty + } - if id == f.cfg.RootFolderID { - return fs.ErrorDirNotFound + var childFiles []folders.File + err = f.pacer.Call(func() (bool, error) { + var err error + childFiles, err = folders.ListAllFiles(ctx, f.cfg, id) + return shouldRetry(ctx, err) + }) + if err != nil { + return err + } + if len(childFiles) > 0 { + return fs.ErrorDirectoryNotEmpty } - if err := folders.DeleteFolder(f.cfg, id); err != nil { + // Delete the directory + err = f.pacer.Call(func() (bool, error) { + err := folders.DeleteFolder(ctx, f.cfg, id) + if err != nil && strings.Contains(err.Error(), "404") { + return false, fs.ErrorDirNotFound + } + return shouldRetry(ctx, err) + }) + if err != nil { return err } @@ -195,12 +382,17 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error { // FindLeaf looks for a sub‑folder named `leaf` under the Internxt folder `pathID`. // If found, it returns its UUID and true. If not found, returns "", false. func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (string, bool, error) { - entries, err := folders.ListFolders(f.cfg, pathID, folders.ListOptions{}) + var entries []folders.Folder + err := f.pacer.Call(func() (bool, error) { + var err error + entries, err = folders.ListAllFolders(ctx, f.cfg, pathID) + return shouldRetry(ctx, err) + }) if err != nil { return "", false, err } for _, e := range entries { - if e.PlainName == leaf { + if f.opt.Encoding.ToStandardName(e.PlainName) == leaf { return e.UUID, true, nil } } @@ -209,45 +401,123 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (string, bool, e // CreateDir creates a new directory func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (string, error) { - resp, err := folders.CreateFolder(f.cfg, folders.CreateFolderRequest{ - PlainName: leaf, + request := folders.CreateFolderRequest{ + PlainName: f.opt.Encoding.FromStandardName(leaf), ParentFolderUUID: pathID, + ModificationTime: time.Now().UTC().Format(time.RFC3339), + } + + var resp *folders.Folder + err := f.pacer.CallNoRetry(func() (bool, error) { + var err error + resp, err = folders.CreateFolder(ctx, f.cfg, request) + return shouldRetry(ctx, err) }) if err != nil { - return "", err + // If folder already exists (409 conflict), try to find it + if strings.Contains(err.Error(), "409") || strings.Contains(err.Error(), "Conflict") { + existingID, found, findErr := f.FindLeaf(ctx, pathID, leaf) + if findErr == nil && found { + fs.Debugf(f, "Folder %q already exists in %q, using existing UUID: %s", leaf, pathID, existingID) + return existingID, nil + } + } + return "", fmt.Errorf("can't create folder, %w", err) } + return resp.UUID, nil } +// preUploadCheck checks if a file exists in the given directory +// Returns the file metadata if it exists, nil if not +func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string) (*folders.File, error) { + // Parse name and extension from the leaf + baseName := f.opt.Encoding.FromStandardName(leaf) + name := strings.TrimSuffix(baseName, filepath.Ext(baseName)) + ext := strings.TrimPrefix(filepath.Ext(baseName), ".") + + checkResult, err := files.CheckFilesExistence(ctx, f.cfg, directoryID, []files.FileExistenceCheck{ + { + PlainName: name, + Type: ext, + OriginalFile: struct{}{}, + }, + }) + + if err != nil { + // If existence check fails, assume file doesn't exist to allow upload to proceed + return nil, nil + } + + if len(checkResult.Files) > 0 && checkResult.Files[0].FileExists() { + existingUUID := checkResult.Files[0].UUID + if existingUUID != "" { + fileMeta, err := files.GetFileMeta(ctx, f.cfg, existingUUID) + if err == nil && fileMeta != nil { + return convertFileMetaToFile(fileMeta), nil + } + + if err != nil { + return nil, err + } + } + } + return nil, nil +} + +// convertFileMetaToFile converts files.FileMeta to folders.File +func convertFileMetaToFile(meta *files.FileMeta) *folders.File { + // FileMeta and folders.File have compatible structures + return &folders.File{ + ID: meta.ID, + UUID: meta.UUID, + FileID: meta.FileID, + PlainName: meta.PlainName, + Type: meta.Type, + Size: meta.Size, + Bucket: meta.Bucket, + FolderUUID: meta.FolderUUID, + EncryptVersion: meta.EncryptVersion, + ModificationTime: meta.ModificationTime, + } +} + // List lists a directory func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) { - if f.rootIsFile && dir == "" { - return fs.DirEntries{newObjectWithFile(f, f.root, f.rootFile)}, nil - } dirID, err := f.dirCache.FindDir(ctx, dir, false) if err != nil { return nil, err } var out fs.DirEntries - foldersList, err := folders.ListFolders(f.cfg, dirID, folders.ListOptions{}) + + var foldersList []folders.Folder + err = f.pacer.Call(func() (bool, error) { + var err error + foldersList, err = folders.ListAllFolders(ctx, f.cfg, dirID) + return shouldRetry(ctx, err) + }) if err != nil { return nil, err } for _, e := range foldersList { - remote := path.Join(dir, e.PlainName) - f.dirCache.Put(remote, e.UUID) + remote := filepath.Join(dir, f.opt.Encoding.ToStandardName(e.PlainName)) out = append(out, fs.NewDir(remote, e.ModificationTime)) } - filesList, err := folders.ListFiles(f.cfg, dirID, folders.ListOptions{}) + var filesList []folders.File + err = f.pacer.Call(func() (bool, error) { + var err error + filesList, err = folders.ListAllFiles(ctx, f.cfg, dirID) + return shouldRetry(ctx, err) + }) if err != nil { return nil, err } for _, e := range filesList { - remote := path.Join(dir, e.PlainName) + remote := e.PlainName if len(e.Type) > 0 { remote += "." + e.Type } - f.dirCache.Put(remote, e.UUID) + remote = filepath.Join(dir, f.opt.Encoding.ToStandardName(remote)) out = append(out, newObjectWithFile(f, remote, &e)) } return out, nil @@ -257,23 +527,44 @@ func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) { func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { remote := src.Remote() - parentDir, fileName := path.Split(remote) - parentDir = strings.Trim(parentDir, "/") - - folderUUID, err := f.dirCache.FindDir(ctx, parentDir, true) - + leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, false) if err != nil { + if err == fs.ErrorDirNotFound { + o := &Object{ + f: f, + remote: remote, + size: src.Size(), + modTime: src.ModTime(ctx), + } + return o, o.Update(ctx, in, src, options...) + } return nil, err } - meta, err := buckets.UploadFileStream(f.cfg, folderUUID, fileName, in, src.Size()) + // Check if file already exists + existingFile, err := f.preUploadCheck(ctx, leaf, directoryID) if err != nil { return nil, err } - f.dirCache.Put(remote, meta.UUID) + // Create object - if file exists, populate it with existing metadata + o := &Object{ + f: f, + remote: remote, + size: src.Size(), + modTime: src.ModTime(ctx), + } + + if existingFile != nil { + // File exists - populate object with existing metadata + size, _ := existingFile.Size.Int64() + o.id = existingFile.FileID + o.uuid = existingFile.UUID + o.size = size + o.modTime = existingFile.ModificationTime + } - return newObjectWithMetaFile(f, remote, meta), nil + return o, o.Update(ctx, in, src, options...) } // Remove removes an object @@ -287,47 +578,41 @@ func (f *Fs) Remove(ctx context.Context, remote string) error { f.dirCache.FlushDir(parent) return nil } + dirID, err := f.dirCache.FindDir(ctx, remote, false) if err != nil { return err } - if err := folders.DeleteFolder(f.cfg, dirID); err != nil { + err = f.pacer.Call(func() (bool, error) { + err := folders.DeleteFolder(ctx, f.cfg, dirID) + return shouldRetry(ctx, err) + }) + if err != nil { return err } f.dirCache.FlushDir(remote) return nil } -// Move moves a directory (not implemented) -func (f *Fs) Move(ctx context.Context, src, dst fs.Object) error { - // return f.client.Rename(ctx, f.root+src.Remote(), f.root+dst.Remote()) - return nil -} - -// Copy copies a directory (not implemented) -func (f *Fs) Copy(ctx context.Context, src, dst fs.Object) error { - // return f.client.Copy(ctx, f.root+src.Remote(), f.root+dst.Remote()) - return nil -} - -// DirCacheFlush flushes the dir cache (not implemented) -func (f *Fs) DirCacheFlush(ctx context.Context) {} - // NewObject creates a new object func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { - if f.rootIsFile { - leaf := path.Base(f.root) - if remote == "" || remote == leaf { - return newObjectWithFile(f, f.root, f.rootFile), nil - } + parentDir := filepath.Dir(remote) + + if parentDir == "." { + parentDir = "" } - parentDir, fileName := path.Split(remote) - parentDir = strings.Trim(parentDir, "/") + dirID, err := f.dirCache.FindDir(ctx, parentDir, false) if err != nil { - return nil, err + return nil, fs.ErrorObjectNotFound } - files, err := folders.ListFiles(f.cfg, dirID, folders.ListOptions{}) + + var files []folders.File + err = f.pacer.Call(func() (bool, error) { + var err error + files, err = folders.ListAllFiles(ctx, f.cfg, dirID) + return shouldRetry(ctx, err) + }) if err != nil { return nil, err } @@ -336,7 +621,11 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { if len(e.Type) > 0 { name += "." + e.Type } - if name == fileName { + decodedName := f.opt.Encoding.ToStandardName(name) + targetName := filepath.Base(remote) + match := decodedName == targetName + + if match { return newObjectWithFile(f, remote, &e), nil } } @@ -356,18 +645,6 @@ func newObjectWithFile(f *Fs, remote string, file *folders.File) fs.Object { } } -// newObjectWithMetaFile returns a new object by meta file info -func newObjectWithMetaFile(f *Fs, remote string, file *buckets.CreateMetaResp) fs.Object { - size, _ := file.Size.Int64() - return &Object{ - f: f, - remote: remote, - uuid: file.UUID, - size: size, - modTime: time.Now(), - } -} - // Fs returns the parent Fs func (o *Object) Fs() fs.Info { return o.f @@ -395,7 +672,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time { // Hash returns the hash value (not implemented) func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { - return "", errors.New("not implemented") + return "", hash.ErrUnsupported } // Storable returns if this object is storable @@ -405,38 +682,267 @@ func (o *Object) Storable() bool { // SetModTime sets the modified time func (o *Object) SetModTime(ctx context.Context, t time.Time) error { - return errors.New("not implemented") + return fs.ErrorCantSetModTime +} + +// About gets quota information +func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { + var internxtLimit *users.LimitResponse + err := f.pacer.Call(func() (bool, error) { + var err error + internxtLimit, err = users.GetLimit(ctx, f.cfg) + return shouldRetry(ctx, err) + }) + if err != nil { + return nil, err + } + + var internxtUsage *users.UsageResponse + err = f.pacer.Call(func() (bool, error) { + var err error + internxtUsage, err = users.GetUsage(ctx, f.cfg) + return shouldRetry(ctx, err) + }) + if err != nil { + return nil, err + } + + usage := &fs.Usage{ + Used: fs.NewUsageValue(internxtUsage.Drive), + } + + usage.Total = fs.NewUsageValue(internxtLimit.MaxSpaceBytes) + usage.Free = fs.NewUsageValue(*usage.Total - *usage.Used) + + return usage, nil +} + +func (f *Fs) Shutdown(ctx context.Context) error { + buckets.WaitForPendingThumbnails() + + if f.tokenRenewer != nil { + f.tokenRenewer.Shutdown() + } + return nil } // Open opens a file for streaming func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) { - return buckets.DownloadFileStream(o.f.cfg, o.id) + fs.FixRangeOption(options, o.size) + rangeValue := "" + for _, option := range options { + switch option.(type) { + case *fs.RangeOption, *fs.SeekOption: + _, rangeValue = option.Header() + } + } + + if o.size == 0 { + return io.NopCloser(bytes.NewReader(nil)), nil + } + + var stream io.ReadCloser + err := o.f.pacer.Call(func() (bool, error) { + var err error + stream, err = buckets.DownloadFileStream(ctx, o.f.cfg, o.id, rangeValue) + return shouldRetry(ctx, err) + }) + if err != nil { + return nil, err + } + return stream, nil } -// Update updates an existing file +// Update updates an existing file or creates a new one func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { - parentDir, _ := path.Split(o.remote) - parentDir = strings.Trim(parentDir, "/") - folderUUID, err := o.f.dirCache.FindDir(ctx, parentDir, false) + remote := o.remote + + origBaseName := filepath.Base(remote) + origName := strings.TrimSuffix(origBaseName, filepath.Ext(origBaseName)) + origType := strings.TrimPrefix(filepath.Ext(origBaseName), ".") + + // Create directory if it doesn't exist + _, dirID, err := o.f.dirCache.FindPath(ctx, remote, true) if err != nil { return err } - if err := files.DeleteFile(o.f.cfg, o.uuid); err != nil { - return err + // rename based rollback pattern + // old file is preserved until new upload succeeds + + var backupUUID string + var backupName, backupType string + oldUUID := o.uuid + + // Step 1: If file exists, rename to backup (preserves old file during upload) + if oldUUID != "" { + // Generate unique backup name + baseName := filepath.Base(remote) + name := strings.TrimSuffix(baseName, filepath.Ext(baseName)) + ext := strings.TrimPrefix(filepath.Ext(baseName), ".") + + backupSuffix := fmt.Sprintf(".rclone-backup-%s", random.String(8)) + backupName = o.f.opt.Encoding.FromStandardName(name + backupSuffix) + backupType = ext + + // Rename existing file to backup name + err = o.f.pacer.Call(func() (bool, error) { + err := files.RenameFile(ctx, o.f.cfg, oldUUID, backupName, backupType) + if err != nil { + // Handle 409 Conflict: Treat as success. + var httpErr *sdkerrors.HTTPError + if errors.As(err, &httpErr) && httpErr.StatusCode() == 409 { + return false, nil + } + } + return shouldRetry(ctx, err) + }) + if err != nil { + return fmt.Errorf("failed to rename existing file to backup: %w", err) + } + backupUUID = oldUUID + + fs.Debugf(o.f, "Renamed existing file %s to backup %s.%s (UUID: %s)", remote, backupName, backupType, backupUUID) + } + + var meta *buckets.CreateMetaResponse + err = o.f.pacer.CallNoRetry(func() (bool, error) { + var err error + meta, err = buckets.UploadFileStreamAuto(ctx, + o.f.cfg, + dirID, + o.f.opt.Encoding.FromStandardName(filepath.Base(remote)), + in, + src.Size(), + src.ModTime(ctx), + ) + return shouldRetry(ctx, err) + }) + + if err != nil { + meta, err = o.recoverFromTimeoutConflict(ctx, err, remote, dirID) } - meta, err := buckets.UploadFileStream(o.f.cfg, folderUUID, path.Base(o.remote), in, src.Size()) if err != nil { + o.restoreBackupFile(ctx, backupUUID, origName, origType) return err } + + // Update object metadata o.uuid = meta.UUID + o.id = meta.FileID o.size = src.Size() - o.modTime = time.Now() + o.remote = remote + + // Step 3: Upload succeeded - delete the backup file + if backupUUID != "" { + fs.Debugf(o.f, "Upload succeeded, deleting backup file %s.%s (UUID: %s)", backupName, backupType, backupUUID) + err := o.f.pacer.Call(func() (bool, error) { + err := files.DeleteFile(ctx, o.f.cfg, backupUUID) + if err != nil { + var httpErr *sdkerrors.HTTPError + if errors.As(err, &httpErr) { + // Treat 404 (Not Found) and 204 (No Content) as success + switch httpErr.StatusCode() { + case 404, 204: + return false, nil + } + } + } + return shouldRetry(ctx, err) + }) + if err != nil { + fs.Errorf(o.f, "Failed to delete backup file %s.%s (UUID: %s): %v. This may leave an orphaned backup file.", + backupName, backupType, backupUUID, err) + // Don't fail the upload just because backup deletion failed + } else { + fs.Debugf(o.f, "Successfully deleted backup file") + } + } + return nil } +// isTimeoutError checks if an error is a timeout using proper error type checking +func isTimeoutError(err error) bool { + if errors.Is(err, context.DeadlineExceeded) { + return true + } + var netErr net.Error + if errors.As(err, &netErr) && netErr.Timeout() { + return true + } + return false +} + +// isConflictError checks if an error indicates a file conflict (409) +func isConflictError(err error) bool { + errMsg := err.Error() + return strings.Contains(errMsg, "409") || + strings.Contains(errMsg, "Conflict") || + strings.Contains(errMsg, "already exists") +} + +// recoverFromTimeoutConflict attempts to recover from a timeout or conflict error +func (o *Object) recoverFromTimeoutConflict(ctx context.Context, uploadErr error, remote, dirID string) (*buckets.CreateMetaResponse, error) { + if !isTimeoutError(uploadErr) && !isConflictError(uploadErr) { + return nil, uploadErr + } + + baseName := filepath.Base(remote) + encodedName := o.f.opt.Encoding.FromStandardName(baseName) + + var meta *buckets.CreateMetaResponse + checkErr := o.f.pacer.Call(func() (bool, error) { + existingFile, err := o.f.preUploadCheck(ctx, encodedName, dirID) + if err != nil { + return shouldRetry(ctx, err) + } + if existingFile != nil { + name := strings.TrimSuffix(baseName, filepath.Ext(baseName)) + ext := strings.TrimPrefix(filepath.Ext(baseName), ".") + + meta = &buckets.CreateMetaResponse{ + UUID: existingFile.UUID, + FileID: existingFile.FileID, + Name: name, + PlainName: name, + Type: ext, + Size: existingFile.Size, + } + o.id = existingFile.FileID + } + return false, nil + }) + + if checkErr != nil { + return nil, uploadErr + } + + if meta != nil { + return meta, nil + } + + return nil, uploadErr +} + +// restoreBackupFile restores a backup file after upload failure +func (o *Object) restoreBackupFile(ctx context.Context, backupUUID, origName, origType string) { + if backupUUID == "" { + return + } + + o.f.pacer.Call(func() (bool, error) { + err := files.RenameFile(ctx, o.f.cfg, backupUUID, + o.f.opt.Encoding.FromStandardName(origName), origType) + return shouldRetry(ctx, err) + }) +} + // Remove deletes a file func (o *Object) Remove(ctx context.Context) error { - return files.DeleteFile(o.f.cfg, o.uuid) + return o.f.pacer.Call(func() (bool, error) { + err := files.DeleteFile(ctx, o.f.cfg, o.uuid) + return shouldRetry(ctx, err) + }) } diff --git a/backend/internxt/internxt_test.go b/backend/internxt/internxt_test.go index 4536d5f95a581..01e23b3b20606 100644 --- a/backend/internxt/internxt_test.go +++ b/backend/internxt/internxt_test.go @@ -1,47 +1,14 @@ -package internxt +package internxt_test import ( - "context" - "errors" "testing" - "github.com/rclone/rclone/fs" - "github.com/rclone/rclone/fs/operations" - "github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest/fstests" - "github.com/stretchr/testify/require" ) +// TestIntegration runs integration tests against the remote func TestIntegration(t *testing.T) { fstests.Run(t, &fstests.Opt{ RemoteName: "TestInternxt:", - NilObject: (*Object)(nil), }) } - -// TestMakeDir verifies that basic operations (such as mkdir) can be performed -func TestMakeDir(t *testing.T) { - const ( - remoteName = "TestInternxt:" - ) - ctx := context.Background() - fstest.Initialise() - subRemoteName, _, err := fstest.RandomRemoteName(remoteName) - require.NoError(t, err) - f, err := fs.NewFs(ctx, subRemoteName) - if errors.Is(err, fs.ErrorNotFoundInConfigFile) { - t.Logf("Didn't find %q in config file - skipping tests", remoteName) - return - } - require.NoError(t, err) - - entr, err := f.List(ctx, "") - t.Log(entr) - require.NoError(t, err) - - err = f.Mkdir(ctx, "hello-integration-test") - require.NoError(t, err) - - // Tear down - require.NoError(t, operations.Purge(ctx, f, "")) -} diff --git a/backend/memory/memory.go b/backend/memory/memory.go index 92a79bcdc5142..b6e2683ed547d 100644 --- a/backend/memory/memory.go +++ b/backend/memory/memory.go @@ -6,6 +6,7 @@ import ( "context" "crypto/md5" "encoding/hex" + "errors" "fmt" "io" "path" @@ -24,7 +25,8 @@ import ( var ( hashType = hash.MD5 // the object storage is persistent - buckets = newBucketsInfo() + buckets = newBucketsInfo() + errWriteOnly = errors.New("can't read when using --memory-discard") ) // Register with Fs @@ -33,12 +35,32 @@ func init() { Name: "memory", Description: "In memory object storage system.", NewFs: NewFs, - Options: []fs.Option{}, + Options: []fs.Option{{ + Name: "discard", + Default: false, + Advanced: true, + Help: `If set all writes will be discarded and reads will return an error + +If set then when files are uploaded the contents not be saved. The +files will appear to have been uploaded but will give an error on +read. Files will have their MD5 sum calculated on upload which takes +very little CPU time and allows the transfers to be checked. + +This can be useful for testing performance. + +Probably most easily used by using the connection string syntax: + + :memory,discard:bucket + +`, + }}, }) } // Options defines the configuration for this backend -type Options struct{} +type Options struct { + Discard bool `config:"discard"` +} // Fs represents a remote memory server type Fs struct { @@ -164,6 +186,7 @@ type objectData struct { hash string mimeType string data []byte + size int64 } // Object describes a memory object @@ -558,7 +581,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { if t != hashType { return "", hash.ErrUnsupported } - if o.od.hash == "" { + if o.od.hash == "" && !o.fs.opt.Discard { sum := md5.Sum(o.od.data) o.od.hash = hex.EncodeToString(sum[:]) } @@ -567,7 +590,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { // Size returns the size of an object in bytes func (o *Object) Size() int64 { - return int64(len(o.od.data)) + return o.od.size } // ModTime returns the modification time of the object @@ -593,6 +616,9 @@ func (o *Object) Storable() bool { // Open an object for read func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { + if o.fs.opt.Discard { + return nil, errWriteOnly + } var offset, limit int64 = 0, -1 for _, option := range options { switch x := option.(type) { @@ -624,13 +650,24 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read // The new object may have been created if an error is returned func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { bucket, bucketPath := o.split() - data, err := io.ReadAll(in) + var data []byte + var size int64 + var hash string + if o.fs.opt.Discard { + h := md5.New() + size, err = io.Copy(h, in) + hash = hex.EncodeToString(h.Sum(nil)) + } else { + data, err = io.ReadAll(in) + size = int64(len(data)) + } if err != nil { return fmt.Errorf("failed to update memory object: %w", err) } o.od = &objectData{ data: data, - hash: "", + size: size, + hash: hash, modTime: src.ModTime(ctx), mimeType: fs.MimeType(ctx, src), } diff --git a/backend/onedrive/onedrive.go b/backend/onedrive/onedrive.go index 290c592202369..b9fe4509f0f4f 100644 --- a/backend/onedrive/onedrive.go +++ b/backend/onedrive/onedrive.go @@ -403,7 +403,7 @@ This is why this flag is not set as the default. As a rule of thumb if nearly all of your data is under rclone's root directory (the |root/directory| in |onedrive:root/directory|) then -using this flag will be be a big performance win. If your data is +using this flag will be a big performance win. If your data is mostly not under the root then using this flag will be a big performance loss. diff --git a/backend/oracleobjectstorage/waiter.go b/backend/oracleobjectstorage/waiter.go index d09f00033fd00..ec6dc7463a350 100644 --- a/backend/oracleobjectstorage/waiter.go +++ b/backend/oracleobjectstorage/waiter.go @@ -60,9 +60,6 @@ type StateChangeConf struct { func (conf *StateChangeConf) WaitForStateContext(ctx context.Context, entityType string) (any, error) { // fs.Debugf(entityType, "Waiting for state to become: %s", conf.Target) - notfoundTick := 0 - targetOccurrence := 0 - // Set a default for times to check for not found if conf.NotFoundChecks == 0 { conf.NotFoundChecks = 20 @@ -84,9 +81,11 @@ func (conf *StateChangeConf) WaitForStateContext(ctx context.Context, entityType // cancellation channel for the refresh loop cancelCh := make(chan struct{}) - result := Result{} - go func() { + notfoundTick := 0 + targetOccurrence := 0 + result := Result{} + defer close(resCh) select { diff --git a/backend/pcloud/api/types.go b/backend/pcloud/api/types.go index 6f2d6361bc13f..d567d49afae51 100644 --- a/backend/pcloud/api/types.go +++ b/backend/pcloud/api/types.go @@ -222,3 +222,11 @@ type UserInfo struct { } `json:"steps"` } `json:"journey"` } + +// DiffResult is the response from /diff +type DiffResult struct { + Result int `json:"result"` + DiffID int64 `json:"diffid"` + Entries []map[string]any `json:"entries"` + Error string `json:"error"` +} diff --git a/backend/pcloud/pcloud.go b/backend/pcloud/pcloud.go index ff7fa1a68215b..4b52e391f8f6a 100644 --- a/backend/pcloud/pcloud.go +++ b/backend/pcloud/pcloud.go @@ -171,6 +171,7 @@ type Fs struct { dirCache *dircache.DirCache // Map of directory path to directory id pacer *fs.Pacer // pacer for API calls tokenRenewer *oauthutil.Renew // renew the token on expiry + lastDiffID int64 // change tracking state for diff long-polling } // Object describes a pcloud object @@ -1033,6 +1034,137 @@ func (f *Fs) Shutdown(ctx context.Context) error { return nil } +// ChangeNotify implements fs.Features.ChangeNotify +func (f *Fs) ChangeNotify(ctx context.Context, notify func(string, fs.EntryType), ch <-chan time.Duration) { + // Start long-poll loop in background + go f.changeNotifyLoop(ctx, notify, ch) +} + +// changeNotifyLoop contains the blocking long-poll logic. +func (f *Fs) changeNotifyLoop(ctx context.Context, notify func(string, fs.EntryType), ch <-chan time.Duration) { + // Standard polling interval + interval := 30 * time.Second + + // Start with diffID = 0 to get the current state + var diffID int64 + + // Helper to process changes from the diff API + handleChanges := func(entries []map[string]any) { + notifiedPaths := make(map[string]bool) + + for _, entry := range entries { + meta, ok := entry["metadata"].(map[string]any) + if !ok { + continue + } + + // Robust extraction of ParentFolderID + var pid int64 + if val, ok := meta["parentfolderid"]; ok { + switch v := val.(type) { + case float64: + pid = int64(v) + case int64: + pid = v + case int: + pid = int64(v) + } + } + + // Resolve the path using dirCache.GetInv + // pCloud uses "d" prefix for directory IDs in cache, but API returns numbers + dirID := fmt.Sprintf("d%d", pid) + parentPath, ok := f.dirCache.GetInv(dirID) + + if !ok { + // Parent not in cache, so we can ignore this change as it is outside + // of what the mount has seen or cares about. + continue + } + + name, _ := meta["name"].(string) + fullPath := path.Join(parentPath, name) + + // Determine EntryType (File or Directory) + entryType := fs.EntryObject + if isFolder, ok := meta["isfolder"].(bool); ok && isFolder { + entryType = fs.EntryDirectory + } + + // Deduplicate notifications for this batch + if !notifiedPaths[fullPath] { + fs.Debugf(f, "ChangeNotify: detected change in %q (type: %v)", fullPath, entryType) + notify(fullPath, entryType) + notifiedPaths[fullPath] = true + } + } + } + + for { + // Check context and channel + select { + case <-ctx.Done(): + return + case newInterval, ok := <-ch: + if !ok { + return + } + interval = newInterval + default: + } + + // Setup /diff Request + opts := rest.Opts{ + Method: "GET", + Path: "/diff", + Parameters: url.Values{}, + } + + if diffID != 0 { + opts.Parameters.Set("diffid", strconv.FormatInt(diffID, 10)) + opts.Parameters.Set("block", "1") + } else { + opts.Parameters.Set("last", "0") + } + + // Perform Long-Poll + // Timeout set to 90s (server usually blocks for 60s max) + reqCtx, cancel := context.WithTimeout(ctx, 90*time.Second) + var result api.DiffResult + + _, err := f.srv.CallJSON(reqCtx, &opts, nil, &result) + cancel() + + if err != nil { + if errors.Is(err, context.Canceled) { + return + } + // Ignore timeout errors as they are normal for long-polling + if !errors.Is(err, context.DeadlineExceeded) { + fs.Infof(f, "ChangeNotify: polling error: %v. Waiting %v.", err, interval) + time.Sleep(interval) + } + continue + } + + // If result is not 0, reset DiffID to resync + if result.Result != 0 { + diffID = 0 + time.Sleep(2 * time.Second) + continue + } + + if result.DiffID != 0 { + diffID = result.DiffID + f.lastDiffID = diffID + } + + if len(result.Entries) > 0 { + handleChanges(result.Entries) + } + } +} + // Hashes returns the supported hash sets. func (f *Fs) Hashes() hash.Set { // EU region supports SHA1 and SHA256 (but rclone doesn't @@ -1327,7 +1459,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op // opts.Body=0), so upload it as a multipart form POST with // Content-Length set. if size == 0 { - formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, opts.Parameters, "content", leaf) + formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, opts.Parameters, "content", leaf, opts.ContentType) if err != nil { return fmt.Errorf("failed to make multipart upload for 0 length file: %w", err) } @@ -1401,6 +1533,7 @@ var ( _ fs.ListPer = (*Fs)(nil) _ fs.Abouter = (*Fs)(nil) _ fs.Shutdowner = (*Fs)(nil) + _ fs.ChangeNotifier = (*Fs)(nil) _ fs.Object = (*Object)(nil) _ fs.IDer = (*Object)(nil) ) diff --git a/backend/pikpak/pikpak.go b/backend/pikpak/pikpak.go index 7cce6695e4722..142905475dee2 100644 --- a/backend/pikpak/pikpak.go +++ b/backend/pikpak/pikpak.go @@ -1384,7 +1384,7 @@ func (f *Fs) uploadByForm(ctx context.Context, in io.Reader, name string, size i for i := range iVal.NumField() { params.Set(iTyp.Field(i).Tag.Get("json"), iVal.Field(i).String()) } - formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, params, "file", name) + formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, params, "file", name, "application/octet-stream") if err != nil { return fmt.Errorf("failed to make multipart upload: %w", err) } diff --git a/backend/s3/provider/BizflyCloud.yaml b/backend/s3/provider/BizflyCloud.yaml new file mode 100644 index 0000000000000..42232d4603b31 --- /dev/null +++ b/backend/s3/provider/BizflyCloud.yaml @@ -0,0 +1,15 @@ +name: BizflyCloud +description: Bizfly Cloud Simple Storage +region: + hn: Ha Noi + hcm: Ho Chi Minh +endpoint: + hn.ss.bfcplatform.vn: Hanoi endpoint + hcm.ss.bfcplatform.vn: Ho Chi Minh endpoint +acl: {} +bucket_acl: true +quirks: + force_path_style: true + list_url_encode: false + use_multipart_etag: false + use_already_exists: false diff --git a/backend/s3/provider/Linode.yaml b/backend/s3/provider/Linode.yaml index 0923290d0d603..2e60541743894 100644 --- a/backend/s3/provider/Linode.yaml +++ b/backend/s3/provider/Linode.yaml @@ -1,26 +1,26 @@ name: Linode description: Linode Object Storage endpoint: - nl-ams-1.linodeobjects.com: Amsterdam (Netherlands), nl-ams-1 - us-southeast-1.linodeobjects.com: Atlanta, GA (USA), us-southeast-1 - in-maa-1.linodeobjects.com: Chennai (India), in-maa-1 - us-ord-1.linodeobjects.com: Chicago, IL (USA), us-ord-1 - eu-central-1.linodeobjects.com: Frankfurt (Germany), eu-central-1 - id-cgk-1.linodeobjects.com: Jakarta (Indonesia), id-cgk-1 - gb-lon-1.linodeobjects.com: London 2 (Great Britain), gb-lon-1 - us-lax-1.linodeobjects.com: Los Angeles, CA (USA), us-lax-1 - es-mad-1.linodeobjects.com: Madrid (Spain), es-mad-1 - au-mel-1.linodeobjects.com: Melbourne (Australia), au-mel-1 - us-mia-1.linodeobjects.com: Miami, FL (USA), us-mia-1 - it-mil-1.linodeobjects.com: Milan (Italy), it-mil-1 - us-east-1.linodeobjects.com: Newark, NJ (USA), us-east-1 - jp-osa-1.linodeobjects.com: Osaka (Japan), jp-osa-1 - fr-par-1.linodeobjects.com: Paris (France), fr-par-1 - br-gru-1.linodeobjects.com: São Paulo (Brazil), br-gru-1 - us-sea-1.linodeobjects.com: Seattle, WA (USA), us-sea-1 - ap-south-1.linodeobjects.com: Singapore, ap-south-1 - sg-sin-1.linodeobjects.com: Singapore 2, sg-sin-1 - se-sto-1.linodeobjects.com: Stockholm (Sweden), se-sto-1 - us-iad-1.linodeobjects.com: Washington, DC, (USA), us-iad-1 + nl-ams-1.linodeobjects.com: Amsterdam, NL (nl-ams-1) + us-southeast-1.linodeobjects.com: Atlanta, GA, US (us-southeast-1) + in-maa-1.linodeobjects.com: Chennai, IN (in-maa-1) + us-ord-1.linodeobjects.com: Chicago, IL, US (us-ord-1) + eu-central-1.linodeobjects.com: Frankfurt, DE (eu-central-1) + id-cgk-1.linodeobjects.com: Jakarta, ID (id-cgk-1) + gb-lon-1.linodeobjects.com: London 2, UK (gb-lon-1) + us-lax-1.linodeobjects.com: Los Angeles, CA, US (us-lax-1) + es-mad-1.linodeobjects.com: Madrid, ES (es-mad-1) + us-mia-1.linodeobjects.com: Miami, FL, US (us-mia-1) + it-mil-1.linodeobjects.com: Milan, IT (it-mil-1) + us-east-1.linodeobjects.com: Newark, NJ, US (us-east-1) + jp-osa-1.linodeobjects.com: Osaka, JP (jp-osa-1) + fr-par-1.linodeobjects.com: Paris, FR (fr-par-1) + br-gru-1.linodeobjects.com: Sao Paulo, BR (br-gru-1) + us-sea-1.linodeobjects.com: Seattle, WA, US (us-sea-1) + ap-south-1.linodeobjects.com: Singapore, SG (ap-south-1) + sg-sin-1.linodeobjects.com: Singapore 2, SG (sg-sin-1) + se-sto-1.linodeobjects.com: Stockholm, SE (se-sto-1) + jp-tyo-1.linodeobjects.com: Tokyo 3, JP (jp-tyo-1) + us-iad-10.linodeobjects.com: Washington, DC, US (us-iad-10) acl: {} bucket_acl: true diff --git a/backend/s3/provider/Selectel.yaml b/backend/s3/provider/Selectel.yaml index 70cc74bad89a3..29141ccc27474 100644 --- a/backend/s3/provider/Selectel.yaml +++ b/backend/s3/provider/Selectel.yaml @@ -2,7 +2,17 @@ name: Selectel description: Selectel Object Storage region: ru-1: St. Petersburg + ru-3: St. Petersburg + ru-7: Moscow + gis-1: Moscow + kz-1: Kazakhstan + uz-2: Uzbekistan endpoint: - s3.ru-1.storage.selcloud.ru: Saint Petersburg + s3.ru-1.storage.selcloud.ru: St. Petersburg + s3.ru-3.storage.selcloud.ru: St. Petersburg + s3.ru-7.storage.selcloud.ru: Moscow + s3.gis-1.storage.selcloud.ru: Moscow + s3.kz-1.storage.selcloud.ru: Kazakhstan + s3.uz-2.storage.selcloud.ru: Uzbekistan quirks: list_url_encode: false diff --git a/backend/s3/s3.go b/backend/s3/s3.go index 728594864080d..246077e3b3128 100644 --- a/backend/s3/s3.go +++ b/backend/s3/s3.go @@ -30,9 +30,11 @@ import ( v4signer "github.com/aws/aws-sdk-go-v2/aws/signer/v4" awsconfig "github.com/aws/aws-sdk-go-v2/config" "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/aws/aws-sdk-go-v2/credentials/stscreds" "github.com/aws/aws-sdk-go-v2/feature/s3/manager" "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/aws-sdk-go-v2/service/sts" "github.com/aws/smithy-go" "github.com/aws/smithy-go/logging" "github.com/aws/smithy-go/middleware" @@ -325,6 +327,30 @@ If empty it will default to the environment variable "AWS_PROFILE" or Help: "An AWS session token.", Advanced: true, Sensitive: true, + }, { + Name: "role_arn", + Help: `ARN of the IAM role to assume. + +Leave blank if not using assume role.`, + Advanced: true, + }, { + Name: "role_session_name", + Help: `Session name for assumed role. + +If empty, a session name will be generated automatically.`, + Advanced: true, + }, { + Name: "role_session_duration", + Help: `Session duration for assumed role. + +If empty, the default session duration will be used.`, + Advanced: true, + }, { + Name: "role_external_id", + Help: `External ID for assumed role. + +Leave blank if not using an external ID.`, + Advanced: true, }, { Name: "upload_concurrency", Help: `Concurrency for multipart uploads and copies. @@ -927,6 +953,10 @@ type Options struct { SharedCredentialsFile string `config:"shared_credentials_file"` Profile string `config:"profile"` SessionToken string `config:"session_token"` + RoleARN string `config:"role_arn"` + RoleSessionName string `config:"role_session_name"` + RoleSessionDuration fs.Duration `config:"role_session_duration"` + RoleExternalID string `config:"role_external_id"` UploadConcurrency int `config:"upload_concurrency"` ForcePathStyle bool `config:"force_path_style"` V2Auth bool `config:"v2_auth"` @@ -1290,6 +1320,34 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (s3Cli opt.Region = "us-east-1" } + // Handle assume role if RoleARN is specified + if opt.RoleARN != "" { + fs.Debugf(nil, "Using assume role with ARN: %s", opt.RoleARN) + + // Set region for the config before creating STS client + awsConfig.Region = opt.Region + + // Create STS client using the base credentials + stsClient := sts.NewFromConfig(awsConfig) + + // Configure AssumeRole options + assumeRoleOptions := func(aro *stscreds.AssumeRoleOptions) { + // Set session name if provided, otherwise use a default + if opt.RoleSessionName != "" { + aro.RoleSessionName = opt.RoleSessionName + } + if opt.RoleSessionDuration != 0 { + aro.Duration = time.Duration(opt.RoleSessionDuration) + } + if opt.RoleExternalID != "" { + aro.ExternalID = &opt.RoleExternalID + } + } + + // Create AssumeRole credentials provider + awsConfig.Credentials = stscreds.NewAssumeRoleProvider(stsClient, opt.RoleARN, assumeRoleOptions) + } + provider = loadProvider(opt.Provider) if provider == nil { fs.Logf("s3", "s3 provider %q not known - please set correctly", opt.Provider) @@ -2870,7 +2928,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, req := s3.CopyObjectInput{ MetadataDirective: types.MetadataDirectiveCopy, } - + if srcObj.storageClass != nil { + req.StorageClass = types.StorageClass(*srcObj.storageClass) + } // Build upload options including headers and metadata ci := fs.GetConfig(ctx) uploadOptions := fs.MetadataAsOpenOptions(ctx) @@ -4443,7 +4503,12 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [ ACL: types.ObjectCannedACL(o.fs.opt.ACL), Key: &bucketPath, } - + if tierObj, ok := src.(fs.GetTierer); ok { + tier := tierObj.GetTier() + if tier != "" { + ui.req.StorageClass = types.StorageClass(strings.ToUpper(tier)) + } + } // Fetch metadata if --metadata is in use meta, err := fs.GetMetadataOptions(ctx, o.fs, src, options) if err != nil { diff --git a/backend/seafile/webapi.go b/backend/seafile/webapi.go index 967dbabf0e527..dc9da9e16dcec 100644 --- a/backend/seafile/webapi.go +++ b/backend/seafile/webapi.go @@ -688,7 +688,7 @@ func (f *Fs) upload(ctx context.Context, in io.Reader, uploadLink, filePath stri "need_idx_progress": {"true"}, "replace": {"1"}, } - formReader, contentType, _, err := rest.MultipartUpload(ctx, in, parameters, "file", f.opt.Enc.FromStandardName(filename)) + formReader, contentType, _, err := rest.MultipartUpload(ctx, in, parameters, "file", f.opt.Enc.FromStandardName(filename), "application/octet-stream") if err != nil { return nil, fmt.Errorf("failed to make multipart upload: %w", err) } diff --git a/backend/sftp/sftp.go b/backend/sftp/sftp.go index e3d5242fbc07b..79fa9c2a0cbb2 100644 --- a/backend/sftp/sftp.go +++ b/backend/sftp/sftp.go @@ -519,6 +519,12 @@ Example: Help: `URL for HTTP CONNECT proxy Set this to a URL for an HTTP proxy which supports the HTTP CONNECT verb. + +Supports the format http://user:pass@host:port, http://host:port, http://host. + +Example: + + http://myUser:myPass@proxyhostname.example.com:8000 `, Advanced: true, }, { @@ -919,15 +925,8 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e opt.Port = "22" } - // get proxy URL if set - if opt.HTTPProxy != "" { - proxyURL, err := url.Parse(opt.HTTPProxy) - if err != nil { - return nil, fmt.Errorf("failed to parse HTTP Proxy URL: %w", err) - } - f.proxyURL = proxyURL - } - + // Set up sshConfig here from opt + // **NB** everything else should be setup in NewFsWithConnection sshConfig := &ssh.ClientConfig{ User: opt.User, Auth: []ssh.AuthMethod{}, @@ -1175,11 +1174,21 @@ func NewFsWithConnection(ctx context.Context, f *Fs, name string, root string, m f.mkdirLock = newStringLock() f.pacer = fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))) f.savedpswd = "" + // set the pool drainer timer going if f.opt.IdleTimeout > 0 { f.drain = time.AfterFunc(time.Duration(f.opt.IdleTimeout), func() { _ = f.drainPool(ctx) }) } + // get proxy URL if set + if opt.HTTPProxy != "" { + proxyURL, err := url.Parse(opt.HTTPProxy) + if err != nil { + return nil, fmt.Errorf("failed to parse HTTP Proxy URL: %w", err) + } + f.proxyURL = proxyURL + } + f.features = (&fs.Features{ CanHaveEmptyDirectories: true, SlowHash: true, @@ -1249,7 +1258,7 @@ func NewFsWithConnection(ctx context.Context, f *Fs, name string, root string, m fs.Debugf(f, "Failed to resolve path using RealPath: %v", err) cwd, err := c.sftpClient.Getwd() if err != nil { - fs.Debugf(f, "Failed to to read current directory - using relative paths: %v", err) + fs.Debugf(f, "Failed to read current directory - using relative paths: %v", err) } else { f.absRoot = path.Join(cwd, f.root) fs.Debugf(f, "Relative path joined with current directory to get absolute path %q", f.absRoot) diff --git a/backend/shade/api/types.go b/backend/shade/api/types.go new file mode 100644 index 0000000000000..d1be0c52990be --- /dev/null +++ b/backend/shade/api/types.go @@ -0,0 +1,27 @@ +// Package api has type definitions for shade +package api + +// ListDirResponse ------------------------------------------------- +// Format from shade api +type ListDirResponse struct { + Type string `json:"type"` // "file" or "tree" + Path string `json:"path"` // Full path including root + Ino int `json:"ino"` // inode number + Mtime int64 `json:"mtime"` // Modified time in milliseconds + Ctime int64 `json:"ctime"` // Created time in milliseconds + Size int64 `json:"size"` // Size in bytes + Hash string `json:"hash"` // MD5 hash + Draft bool `json:"draft"` // Whether this is a draft file +} + +// PartURL Type for multipart upload/download +type PartURL struct { + URL string `json:"url"` + Headers map[string]string `json:"headers,omitempty"` +} + +// CompletedPart Type for completed parts when making a multipart upload. +type CompletedPart struct { + ETag string + PartNumber int32 +} diff --git a/backend/shade/shade.go b/backend/shade/shade.go new file mode 100644 index 0000000000000..d06c66d51d8cf --- /dev/null +++ b/backend/shade/shade.go @@ -0,0 +1,1039 @@ +// Package shade provides an interface to the Shade storage system. +package shade + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "path" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/rclone/rclone/backend/shade/api" + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/config" + "github.com/rclone/rclone/fs/config/configmap" + "github.com/rclone/rclone/fs/config/configstruct" + "github.com/rclone/rclone/fs/fshttp" + "github.com/rclone/rclone/fs/hash" + "github.com/rclone/rclone/fs/object" + "github.com/rclone/rclone/lib/encoder" + "github.com/rclone/rclone/lib/pacer" + "github.com/rclone/rclone/lib/rest" +) + +const ( + defaultEndpoint = "https://fs.shade.inc" // Default local development endpoint + apiEndpoint = "https://api.shade.inc" // API endpoint for getting tokens + minSleep = 10 * time.Millisecond // Minimum sleep time for the pacer + maxSleep = 5 * time.Minute // Maximum sleep time for the pacer + decayConstant = 1 // Bigger for slower decay, exponential + defaultChunkSize = int64(64 * 1024 * 1024) // Default chunk size (64MB) + minChunkSize = int64(5 * 1024 * 1024) // Minimum chunk size (5MB) - S3 requirement + maxChunkSize = int64(5 * 1024 * 1024 * 1024) // Maximum chunk size (5GB) + maxUploadParts = 10000 // maximum allowed number of parts in a multipart upload +) + +// Register with Fs +func init() { + fs.Register(&fs.RegInfo{ + Name: "shade", + Description: "Shade FS", + NewFs: NewFS, + Options: []fs.Option{{ + Name: "drive_id", + Help: "The ID of your drive, see this in the drive settings. Individual rclone configs must be made per drive.", + Required: true, + Sensitive: false, + }, { + Name: "api_key", + Help: "An API key for your account.", + Required: true, + Sensitive: true, + }, { + Name: "endpoint", + Help: "Endpoint for the service.\n\nLeave blank normally.", + Advanced: true, + }, { + Name: "chunk_size", + Help: "Chunk size to use for uploading.\n\nAny files larger than this will be uploaded in chunks of this size.\n\nNote that this is stored in memory per transfer, so increasing it will\nincrease memory usage.\n\nMinimum is 5MB, maximum is 5GB.", + Default: fs.SizeSuffix(defaultChunkSize), + Advanced: true, + }, { + Name: "upload_concurrency", + Help: `Concurrency for multipart uploads and copies. This is the number of chunks of the same file that are uploaded concurrently for multipart uploads and copies.`, + Default: 4, + Advanced: true, + }, { + Name: "max_upload_parts", + Help: "Maximum amount of parts in a multipart upload.", + Default: maxUploadParts, + Advanced: true, + }, { + Name: "token", + Help: "JWT Token for performing Shade FS operations. Don't set this value - rclone will set it automatically", + Default: "", + Advanced: true, + }, { + Name: "token_expiry", + Help: "JWT Token Expiration time. Don't set this value - rclone will set it automatically", + Default: "", + Advanced: true, + }, { + Name: config.ConfigEncoding, + Help: config.ConfigEncodingHelp, + Advanced: true, + Default: encoder.Display | + encoder.EncodeBackSlash | + encoder.EncodeInvalidUtf8, + }}, + }) +} + +// refreshJWTToken retrieves or refreshes the ShadeFS token +func (f *Fs) refreshJWTToken(ctx context.Context) (string, error) { + f.tokenMu.Lock() + defer f.tokenMu.Unlock() + // Return existing token if it's still valid + checkTime := f.tokenExp.Add(-2 * time.Minute) + //If the token expires in less than two minutes, just get a new one + if f.token != "" && time.Now().Before(checkTime) { + return f.token, nil + } + + // Token has expired or doesn't exist, get a new one + opts := rest.Opts{ + Method: "GET", + RootURL: apiEndpoint, + Path: fmt.Sprintf("/workspaces/drives/%s/shade-fs-token", f.drive), + ExtraHeaders: map[string]string{ + "Authorization": f.opt.APIKey, + }, + } + + var err error + var tokenStr string + + err = f.pacer.Call(func() (bool, error) { + res, err := f.apiSrv.Call(ctx, &opts) + if err != nil { + fs.Debugf(f, "Token request failed: %v", err) + return false, err + } + + defer fs.CheckClose(res.Body, &err) + + if res.StatusCode != http.StatusOK { + fs.Debugf(f, "Token request failed with code: %d", res.StatusCode) + return res.StatusCode == http.StatusTooManyRequests, fmt.Errorf("failed to get ShadeFS token, status: %d", res.StatusCode) + } + + // Read token directly as plain text + tokenBytes, err := io.ReadAll(res.Body) + if err != nil { + return false, err + } + + tokenStr = strings.TrimSpace(string(tokenBytes)) + return false, nil + }) + + if err != nil { + return "", err + } + + if tokenStr == "" { + return "", fmt.Errorf("empty token received from server") + } + + parts := strings.Split(tokenStr, ".") + if len(parts) < 2 { + return "", fmt.Errorf("invalid token received from server") + } + // Decode the payload (2nd part of the token) + payload, err := base64.RawURLEncoding.DecodeString(parts[1]) + if err != nil { + return "", fmt.Errorf("invalid token received from server") + } + var claims map[string]interface{} + if err := json.Unmarshal(payload, &claims); err != nil { + return "", err + } + var exp int64 + // Extract exp/ + if v, ok := claims["exp"].(float64); ok { + exp = int64(v) + } + + f.token = tokenStr + f.tokenExp = time.Unix(exp, 0) + + f.m.Set("token", f.token) + f.m.Set("token_expiry", f.tokenExp.Format(time.RFC3339)) + + return f.token, nil +} + +func (f *Fs) callAPI(ctx context.Context, method, path string, response interface{}) (*http.Response, error) { + token, err := f.refreshJWTToken(ctx) + if err != nil { + return nil, err + } + opts := rest.Opts{ + Method: method, + Path: path, + RootURL: f.endpoint, + ExtraHeaders: map[string]string{ + "Authorization": "Bearer " + token, + }, + } + var res *http.Response + err = f.pacer.Call(func() (bool, error) { + if response != nil { + res, err = f.srv.CallJSON(ctx, &opts, nil, response) + } else { + res, err = f.srv.Call(ctx, &opts) + } + if err != nil { + return res != nil && res.StatusCode == http.StatusTooManyRequests, err + } + return false, nil + }) + return res, err +} + +// Options defines the configuration for this backend +type Options struct { + Drive string `config:"drive_id"` + APIKey string `config:"api_key"` + Endpoint string `config:"endpoint"` + ChunkSize fs.SizeSuffix `config:"chunk_size"` + MaxUploadParts int `config:"max_upload_parts"` + Concurrency int `config:"upload_concurrency"` + Token string `config:"token"` + TokenExpiry string `config:"token_expiry"` + Encoding encoder.MultiEncoder +} + +// Fs represents a shade remote +type Fs struct { + name string // name of this remote + root string // the path we are working on + opt Options // parsed options + features *fs.Features // optional features + srv *rest.Client // REST client for ShadeFS API + apiSrv *rest.Client // REST client for Shade API + endpoint string // endpoint for ShadeFS + drive string // drive ID + pacer *fs.Pacer // pacer for API calls + token string // ShadeFS token + tokenExp time.Time // Token expiration time + tokenMu sync.Mutex + m configmap.Mapper //Config Mapper to store tokens for future use + recursive bool + createdDirs map[string]bool // Cache of directories we've created + createdDirMu sync.RWMutex // Mutex for createdDirs map +} + +// Object describes a ShadeFS object +type Object struct { + fs *Fs // what this object is part of + remote string // The remote path + mtime int64 // Modified time + size int64 // Size of the object + original string //Presigned download link +} + +// Directory describes a ShadeFS directory +type Directory struct { + fs *Fs // Reference to the filesystem + remote string // Path to the directory + mtime int64 // Modification time + size int64 // Size (typically 0 for directories) +} + +// Name of the remote (as passed into NewFs) +func (f *Fs) Name() string { + return f.name +} + +// Root of the remote (as passed into NewFs) +func (f *Fs) Root() string { + return f.root +} + +// String returns a description of the FS +func (f *Fs) String() string { + return fmt.Sprintf("Shade drive %s path %s", f.opt.Drive, f.root) +} + +// Precision returns the precision of the ModTimes +func (f *Fs) Precision() time.Duration { + return fs.ModTimeNotSupported +} + +// Move src to this remote using server-side move operations. +// +// This is stored with the remote path given. +// +// It returns the destination Object and a possible error. +// +// Will only be called if src.Fs().Name() == f.Name() +// +// If it isn't possible then return fs.ErrorCantMove +func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { + srcObj, ok := src.(*Object) + if !ok { + fs.Debugf(src, "Can't move - not same remote type") + return nil, fs.ErrorCantMove + } + + token, err := f.refreshJWTToken(ctx) + if err != nil { + return nil, err + } + + //Need to make sure destination exists + err = f.ensureParentDirectories(ctx, remote) + if err != nil { + return nil, err + } + + // Create temporary object + o := &Object{ + fs: f, + remote: remote, + mtime: srcObj.mtime, + size: srcObj.size, + } + fromFullPath := path.Join(src.Fs().Root(), srcObj.remote) + toFullPath := path.Join(f.root, remote) + + // Build query parameters + params := url.Values{} + params.Set("path", remote) + params.Set("from", fromFullPath) + params.Set("to", toFullPath) + + opts := rest.Opts{ + Method: "POST", + Path: fmt.Sprintf("/%s/fs/move?%s", f.drive, params.Encode()), + ExtraHeaders: map[string]string{ + "Authorization": "Bearer " + token, + }, + } + + err = o.fs.pacer.Call(func() (bool, error) { + resp, err := f.srv.Call(ctx, &opts) + + if err != nil && resp.StatusCode == http.StatusBadRequest { + fs.Debugf(f, "Bad token from server: %v", token) + } + + return resp != nil && resp.StatusCode == http.StatusTooManyRequests, err + }) + if err != nil { + return nil, err + } + return o, nil +} + +// DirMove moves src, srcRemote to this remote at dstRemote +// using server-side move operations. +// +// Will only be called if src.Fs().Name() == f.Name() +// +// If it isn't possible then return fs.ErrorCantDirMove +// +// If destination exists then return fs.ErrorDirExists +func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { + srcFs, ok := src.(*Fs) + if !ok { + fs.Debugf(srcFs, "Can't move directory - not same remote type") + return fs.ErrorCantDirMove + } + //Need to check if destination exists + fullPath := f.buildFullPath(dstRemote) + var response api.ListDirResponse + res, _ := f.callAPI(ctx, "GET", fmt.Sprintf("/%s/fs/attr?path=%s", f.drive, fullPath), &response) + + if res.StatusCode != http.StatusNotFound { + return fs.ErrorDirExists + } + + fullPathSrc := f.buildFullPath(srcRemote) + fullPathSrcUnencoded, err := url.QueryUnescape(fullPathSrc) + if err != nil { + return err + } + + fullPathDstUnencoded, err := url.QueryUnescape(fullPath) + if err != nil { + return err + } + + err = f.ensureParentDirectories(ctx, dstRemote) + if err != nil { + return err + } + + o := &Object{ + fs: srcFs, + remote: srcRemote, + } + + _, err = f.Move(ctx, o, dstRemote) + + if err == nil { + + f.createdDirMu.Lock() + f.createdDirs[fullPathSrcUnencoded] = false + f.createdDirs[fullPathDstUnencoded] = true + f.createdDirMu.Unlock() + } + + return err +} + +// Hashes returns the supported hash types +func (f *Fs) Hashes() hash.Set { + return hash.Set(hash.None) +} + +// Features returns the optional features of this Fs +func (f *Fs) Features() *fs.Features { + return f.features +} + +// NewFS constructs an FS from the path, container:path +func NewFS(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { + // Parse config into Options struct + opt := new(Options) + err := configstruct.Set(m, opt) + if err != nil { + return nil, err + } + + fs.Debugf(nil, "Creating new ShadeFS backend with drive: %s", opt.Drive) + + f := &Fs{ + name: name, + root: root, + opt: *opt, + drive: opt.Drive, + m: m, + srv: rest.NewClient(fshttp.NewClient(ctx)).SetRoot(defaultEndpoint), + apiSrv: rest.NewClient(fshttp.NewClient(ctx)), + pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), + recursive: true, + createdDirs: make(map[string]bool), + token: opt.Token, + } + + f.features = &fs.Features{ + // Initially set minimal features + // We'll expand this in a future iteration + CanHaveEmptyDirectories: true, + Move: f.Move, + DirMove: f.DirMove, + OpenChunkWriter: f.OpenChunkWriter, + } + + if opt.TokenExpiry != "" { + tokenExpiry, err := time.Parse(time.RFC3339, opt.TokenExpiry) + if err != nil { + fs.Errorf(nil, "Failed to parse token_expiry option: %v", err) + } else { + f.tokenExp = tokenExpiry + } + } + + // Set the endpoint + if opt.Endpoint == "" { + f.endpoint = defaultEndpoint + } else { + f.endpoint = opt.Endpoint + } + + // Validate and set chunk size + if opt.ChunkSize == 0 { + opt.ChunkSize = fs.SizeSuffix(defaultChunkSize) + } else if opt.ChunkSize < fs.SizeSuffix(minChunkSize) { + return nil, fmt.Errorf("chunk_size %d is less than minimum %d", opt.ChunkSize, minChunkSize) + } else if opt.ChunkSize > fs.SizeSuffix(maxChunkSize) { + return nil, fmt.Errorf("chunk_size %d is greater than maximum %d", opt.ChunkSize, maxChunkSize) + } + + // Ensure root doesn't have trailing slash + f.root = strings.Trim(f.root, "/") + + // Check that we can log in by getting a token + _, err = f.refreshJWTToken(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get ShadeFS token: %w", err) + } + + var response api.ListDirResponse + _, _ = f.callAPI(ctx, "GET", fmt.Sprintf("/%s/fs/attr?path=%s", f.drive, url.QueryEscape(root)), &response) + + if response.Type == "file" { + //Specified a single file path, not a directory. + f.root = filepath.Dir(f.root) + return f, fs.ErrorIsFile + } + return f, nil +} + +// NewObject finds the Object at remote +func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { + + fullPath := f.buildFullPath(remote) + + var response api.ListDirResponse + res, err := f.callAPI(ctx, "GET", fmt.Sprintf("/%s/fs/attr?path=%s", f.drive, fullPath), &response) + + if res != nil && res.StatusCode == http.StatusNotFound { + return nil, fs.ErrorObjectNotFound + } + + if err != nil { + return nil, err + } + + if res != nil && res.StatusCode != http.StatusOK { + return nil, fmt.Errorf("attr failed with status code: %d", res.StatusCode) + } + + if response.Type == "tree" { + return nil, fs.ErrorIsDir + } + + if response.Type != "file" { + return nil, fmt.Errorf("path is not a file: %s", remote) + } + + return &Object{ + fs: f, + remote: remote, + mtime: response.Mtime, + size: response.Size, + }, nil +} + +// Put uploads a file +func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + // Create temporary object + o := &Object{ + fs: f, + remote: src.Remote(), + } + return o, o.Update(ctx, in, src, options...) +} + +// List the objects and directories in dir into entries +func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { + + fullPath := f.buildFullPath(dir) + + var response []api.ListDirResponse + res, err := f.callAPI(ctx, "GET", fmt.Sprintf("/%s/fs/listdir?path=%s", f.drive, fullPath), &response) + if err != nil { + fs.Debugf(f, "Error from List call: %v", err) + return nil, fs.ErrorDirNotFound + } + + if res.StatusCode == http.StatusNotFound { + fs.Debugf(f, "Directory not found") + return nil, fs.ErrorDirNotFound + } + + if res.StatusCode != http.StatusOK { + return nil, fmt.Errorf("listdir failed with status code: %d", res.StatusCode) + } + + for _, r := range response { + if r.Draft { + continue + } + + // Make path relative to f.root + entryPath := strings.TrimPrefix(r.Path, "/") + if f.root != "" { + if !strings.HasPrefix(entryPath, f.root) { + continue + } + entryPath = strings.TrimPrefix(strings.TrimPrefix(entryPath, f.root), "/") + } + + if r.Type == "file" { + entries = append(entries, &Object{ + fs: f, + remote: entryPath, + mtime: r.Mtime, + size: r.Size, + }) + } else if r.Type == "tree" { + dirEntry := &Directory{ + fs: f, + remote: entryPath, + mtime: r.Mtime, + size: r.Size, // Typically 0 for directories + } + entries = append(entries, dirEntry) + } else { + fs.Debugf(f, "Unknown entry type: %s for path: %s", r.Type, entryPath) + } + } + + return entries, nil +} + +// ensureParentDirectories creates all parent directories for a given path +func (f *Fs) ensureParentDirectories(ctx context.Context, remotePath string) error { + // Build the full path including root + fullPath := remotePath + if f.root != "" { + fullPath = path.Join(f.root, remotePath) + } + + // Get the parent directory path + parentDir := path.Dir(fullPath) + + // If parent is root, empty, or current dir, nothing to create + if parentDir == "" || parentDir == "." || parentDir == "/" { + return nil + } + + // Ensure the full parent directory path exists + return f.ensureDirectoryPath(ctx, parentDir) +} + +// ensureDirectoryPath creates all directories in a path +func (f *Fs) ensureDirectoryPath(ctx context.Context, dirPath string) error { + // Check cache first + f.createdDirMu.RLock() + if f.createdDirs[dirPath] { + f.createdDirMu.RUnlock() + return nil + } + f.createdDirMu.RUnlock() + + // Build list of all directories that need to be created + var dirsToCreate []string + currentPath := dirPath + + for currentPath != "" && currentPath != "." && currentPath != "/" { + // Check if this directory is already in cache + f.createdDirMu.RLock() + inCache := f.createdDirs[currentPath] + f.createdDirMu.RUnlock() + + if !inCache { + dirsToCreate = append([]string{currentPath}, dirsToCreate...) + } + currentPath = path.Dir(currentPath) + } + + // If all directories are cached, we're done + if len(dirsToCreate) == 0 { + return nil + } + + // Create each directory in order + for _, dir := range dirsToCreate { + + fullPath := url.QueryEscape(dir) + res, err := f.callAPI(ctx, "POST", fmt.Sprintf("/%s/fs/mkdir?path=%s", f.drive, fullPath), nil) + + // If directory already exists, that's fine + if err == nil && res != nil { + if res.StatusCode == http.StatusConflict || res.StatusCode == http.StatusUnprocessableEntity { + f.createdDirMu.Lock() + f.createdDirs[dir] = true + f.createdDirMu.Unlock() + } else if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusCreated { + fs.Debugf(f, "Failed to create directory %s: status code %d", dir, res.StatusCode) + } else { + f.createdDirMu.Lock() + f.createdDirs[dir] = true + f.createdDirMu.Unlock() + } + + fs.CheckClose(res.Body, &err) + } else if err != nil { + fs.Debugf(f, "Error creating directory %s: %v", dir, err) + // Continue anyway + continue + } + } + + // Mark the full path as created in cache + f.createdDirMu.Lock() + f.createdDirs[dirPath] = true + f.createdDirMu.Unlock() + + return nil +} + +// Mkdir creates the container if it doesn't exist +func (f *Fs) Mkdir(ctx context.Context, dir string) error { + + // Build the full path for the directory + fullPath := dir + if dir == "" { + // If dir is empty, we're creating the root directory + if f.root != "" && f.root != "/" && f.root != "." { + fullPath = f.root + } else { + // Nothing to create + return nil + } + } else if f.root != "" { + fullPath = path.Join(f.root, dir) + } + + // Ensure all parent directories exist first + if err := f.ensureDirectoryPath(ctx, fullPath); err != nil { + return fmt.Errorf("failed to create directory path: %w", err) + } + + // Add to cache + f.createdDirMu.Lock() + f.createdDirs[fullPath] = true + f.createdDirMu.Unlock() + + return nil +} + +// Rmdir deletes the root folder +// +// Returns an error if it isn't empty +func (f *Fs) Rmdir(ctx context.Context, dir string) error { + fullPath := f.buildFullPath(dir) + + if fullPath == "" { + return errors.New("cannot delete root directory") + } + + var response []api.ListDirResponse + res, err := f.callAPI(ctx, "GET", fmt.Sprintf("/%s/fs/listdir?path=%s", f.drive, fullPath), &response) + + if res != nil && res.StatusCode != http.StatusOK { + return err + } + + if len(response) > 0 { + return fs.ErrorDirectoryNotEmpty + } + + // Use the delete endpoint which handles both files and directories + res, err = f.callAPI(ctx, "POST", fmt.Sprintf("/%s/fs/delete?path=%s", f.drive, fullPath), nil) + if err != nil { + return err + } + defer fs.CheckClose(res.Body, &err) + + if res.StatusCode == http.StatusNotFound { + return fs.ErrorDirNotFound + } + + if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusCreated { + return fmt.Errorf("rmdir failed with status code: %d", res.StatusCode) + } + + f.createdDirMu.Lock() + defer f.createdDirMu.Unlock() + unescapedPath, err := url.QueryUnescape(fullPath) + if err != nil { + return err + } + f.createdDirs[unescapedPath] = false + + return nil +} + +// Attempts to construct the full path for an object query-escaped +func (f *Fs) buildFullPath(remote string) string { + if f.root == "" { + return url.QueryEscape(remote) + } + return url.QueryEscape(path.Join(f.root, remote)) +} + +// ------------------------------------------------- +// Object implementation +// ------------------------------------------------- + +// Fs returns the parent Fs +func (o *Object) Fs() fs.Info { + return o.fs +} + +// String returns a description of the Object +func (o *Object) String() string { + if o == nil { + return "" + } + return o.remote +} + +// Remote returns the remote path +func (o *Object) Remote() string { + return o.remote +} + +// Hash returns the requested hash of the object content +func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { + return "", hash.ErrUnsupported +} + +// Size returns the size of the object +func (o *Object) Size() int64 { + return o.size +} + +// ModTime returns the modification date of the object +func (o *Object) ModTime(context.Context) time.Time { + return time.Unix(0, o.mtime*int64(time.Millisecond)) +} + +// SetModTime sets the modification time of the object +func (o *Object) SetModTime(context.Context, time.Time) error { + // Not implemented for now + return fs.ErrorCantSetModTime +} + +// Storable returns whether this object is storable +func (o *Object) Storable() bool { + return true +} + +// Open an object for read +func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) { + + if o.size == 0 { + // Empty file: return an empty reader + return io.NopCloser(bytes.NewReader(nil)), nil + } + fs.FixRangeOption(options, o.size) + + token, err := o.fs.refreshJWTToken(ctx) + if err != nil { + return nil, err + } + + fullPath := o.fs.buildFullPath(o.remote) + // Construct the initial request URL + downloadURL := fmt.Sprintf("%s/%s/fs/download?path=%s", o.fs.endpoint, o.fs.drive, fullPath) + + // Create HTTP request manually + req, err := http.NewRequestWithContext(ctx, "GET", downloadURL, nil) + if err != nil { + fs.Debugf(o.fs, "Failed to create request: %v", err) + return nil, fmt.Errorf("failed to create request: %w", err) + } + req.Header.Set("Authorization", "Bearer "+token) + + // Use pacer to manage retries and rate limiting + var res *http.Response + err = o.fs.pacer.Call(func() (bool, error) { + + if res != nil { + err = res.Body.Close() + if err != nil { + return false, err + } + } + + client := http.Client{ + CheckRedirect: func(req *http.Request, via []*http.Request) error { + return http.ErrUseLastResponse // Don't follow redirects + }, + } + res, err = client.Do(req) + if err != nil { + return false, err + } + return res.StatusCode == http.StatusTooManyRequests, nil + }) + + if err != nil { + return nil, fmt.Errorf("download request failed: %w", err) + } + if res == nil { + return nil, fmt.Errorf("no response received from initial request") + } + + // Handle response based on status code + switch res.StatusCode { + case http.StatusOK: + return res.Body, nil + + case http.StatusTemporaryRedirect: + // Read the presigned URL from the body + bodyBytes, err := io.ReadAll(res.Body) + fs.CheckClose(res.Body, &err) // Close body after reading + if err != nil { + return nil, fmt.Errorf("failed to read redirect body: %w", err) + } + + presignedURL := strings.TrimSpace(string(bodyBytes)) + o.original = presignedURL //Save for later for hashing + + client := rest.NewClient(fshttp.NewClient(ctx)).SetRoot(presignedURL) + var downloadRes *http.Response + opts := rest.Opts{ + Method: "GET", + Path: "", + Options: options, + } + + err = o.fs.pacer.Call(func() (bool, error) { + downloadRes, err = client.Call(ctx, &opts) + if err != nil { + return false, err + } + if downloadRes == nil { + return false, fmt.Errorf("failed to fetch presigned URL") + } + return downloadRes.StatusCode == http.StatusTooManyRequests, nil + }) + + if err != nil { + return nil, fmt.Errorf("presigned URL request failed: %w", err) + } + if downloadRes == nil { + return nil, fmt.Errorf("no response received from presigned URL request") + } + + if downloadRes.StatusCode != http.StatusOK && downloadRes.StatusCode != http.StatusPartialContent { + body, _ := io.ReadAll(downloadRes.Body) + fs.CheckClose(downloadRes.Body, &err) + return nil, fmt.Errorf("presigned URL request failed with status %d: %q", downloadRes.StatusCode, string(body)) + } + + return downloadRes.Body, nil + + default: + body, _ := io.ReadAll(res.Body) + fs.CheckClose(res.Body, &err) + return nil, fmt.Errorf("download failed with status %d: %q", res.StatusCode, string(body)) + } +} + +// Update in to the object with the modTime given of the given size +// +// When called from outside an Fs by rclone, src.Size() will always be >= 0. +// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either +// return an error or update the object properly (rather than e.g. calling panic). +func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { + + //Need to ensure parent directories exist before updating + err := o.fs.ensureParentDirectories(ctx, o.remote) + if err != nil { + return err + } + + //If the source remote is different from this object's remote, as in we're updating a file with some other file's data, + //need to construct a new object info in order to correctly upload to THIS object, not the src one + var srcInfo fs.ObjectInfo + if o.remote != src.Remote() { + srcInfo = object.NewStaticObjectInfo(o.remote, src.ModTime(ctx), src.Size(), true, nil, o.Fs()) + } else { + srcInfo = src + } + + return o.uploadMultipart(ctx, srcInfo, in, options...) +} + +// Remove removes the object +func (o *Object) Remove(ctx context.Context) error { + + fullPath := o.fs.buildFullPath(o.remote) + + res, err := o.fs.callAPI(ctx, "POST", fmt.Sprintf("/%s/fs/delete?path=%s", o.fs.drive, fullPath), nil) + if err != nil { + return err + } + defer fs.CheckClose(res.Body, &err) // Ensure body is closed + + if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusCreated { + return fmt.Errorf("object removal failed with status code: %d", res.StatusCode) + } + return nil +} + +// ------------------------------------------------- +// Directory implementation +// ------------------------------------------------- + +// Remote returns the remote path +func (d *Directory) Remote() string { + return d.remote +} + +// ModTime returns the modification time +func (d *Directory) ModTime(context.Context) time.Time { + return time.Unix(0, d.mtime*int64(time.Millisecond)) +} + +// Size returns the size (0 for directories) +func (d *Directory) Size() int64 { + return d.size +} + +// Fs returns the filesystem info +func (d *Directory) Fs() fs.Info { + return d.fs +} + +// Hash is unsupported for directories +func (d *Directory) Hash(context.Context, hash.Type) (string, error) { + return "", hash.ErrUnsupported +} + +// SetModTime is unsupported for directories +func (d *Directory) SetModTime(context.Context, time.Time) error { + return fs.ErrorCantSetModTime +} + +// Storable indicates directories aren’t storable as files +func (d *Directory) Storable() bool { + return false +} + +// Open returns an error for directories +func (d *Directory) Open() (io.ReadCloser, error) { + return nil, fs.ErrorIsDir +} + +// Items returns the number of items in the directory (-1 if unknown) +func (d *Directory) Items() int64 { + return -1 // Unknown +} + +// ID returns the directory ID (empty if not applicable) +func (d *Directory) ID() string { + return "" +} + +func (d *Directory) String() string { + return fmt.Sprintf("Directory: %s", d.remote) +} + +var ( + _ fs.Fs = &Fs{} + _ fs.Object = &Object{} + _ fs.Directory = &Directory{} + + _ fs.Mover = &Fs{} + _ fs.DirMover = &Fs{} +) diff --git a/backend/shade/shade_test.go b/backend/shade/shade_test.go new file mode 100644 index 0000000000000..82c4e13be9dce --- /dev/null +++ b/backend/shade/shade_test.go @@ -0,0 +1,21 @@ +package shade_test + +import ( + "testing" + + "github.com/rclone/rclone/backend/shade" + "github.com/rclone/rclone/fstest/fstests" +) + +// TestIntegration runs integration tests against the remote +func TestIntegration(t *testing.T) { + name := "TestShade" + fstests.Run(t, &fstests.Opt{ + RemoteName: name + ":", + NilObject: (*shade.Object)(nil), + SkipInvalidUTF8: true, + ExtraConfig: []fstests.ExtraConfigItem{ + {Name: name, Key: "eventually_consistent_delay", Value: "7"}, + }, + }) +} diff --git a/backend/shade/upload.go b/backend/shade/upload.go new file mode 100644 index 0000000000000..a4b1ecc04cbd8 --- /dev/null +++ b/backend/shade/upload.go @@ -0,0 +1,336 @@ +//multipart upload for shade + +package shade + +import ( + "bytes" + "context" + "fmt" + "io" + "net/http" + "net/url" + "path" + "sort" + "sync" + + "github.com/rclone/rclone/backend/shade/api" + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/chunksize" + "github.com/rclone/rclone/lib/multipart" + "github.com/rclone/rclone/lib/rest" +) + +var warnStreamUpload sync.Once + +type shadeChunkWriter struct { + initToken string + chunkSize int64 + size int64 + f *Fs + o *Object + completedParts []api.CompletedPart + completedPartsMu sync.Mutex +} + +// uploadMultipart handles multipart upload for larger files +func (o *Object) uploadMultipart(ctx context.Context, src fs.ObjectInfo, in io.Reader, options ...fs.OpenOption) error { + + chunkWriter, err := multipart.UploadMultipart(ctx, src, in, multipart.UploadMultipartOptions{ + Open: o.fs, + OpenOptions: options, + }) + if err != nil { + return err + } + + var shadeWriter = chunkWriter.(*shadeChunkWriter) + o.size = shadeWriter.size + return nil +} + +// OpenChunkWriter returns the chunk size and a ChunkWriter +// +// Pass in the remote and the src object +// You can also use options to hint at the desired chunk size +func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectInfo, options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) { + // Temporary Object under construction + o := &Object{ + fs: f, + remote: remote, + } + + uploadParts := f.opt.MaxUploadParts + if uploadParts < 1 { + uploadParts = 1 + } else if uploadParts > maxUploadParts { + uploadParts = maxUploadParts + } + size := src.Size() + fs.FixRangeOption(options, size) + + // calculate size of parts + chunkSize := f.opt.ChunkSize + + // size can be -1 here meaning we don't know the size of the incoming file. We use ChunkSize + // buffers here (default 64 MB). With a maximum number of parts (10,000) this will be a file of + // 640 GB. + if size == -1 { + warnStreamUpload.Do(func() { + fs.Logf(f, "Streaming uploads using chunk size %v will have maximum file size of %v", + chunkSize, fs.SizeSuffix(int64(chunkSize)*int64(uploadParts))) + }) + } else { + chunkSize = chunksize.Calculator(src, size, uploadParts, chunkSize) + } + + token, err := o.fs.refreshJWTToken(ctx) + if err != nil { + return info, nil, fmt.Errorf("failed to get token: %w", err) + } + + err = f.ensureParentDirectories(ctx, remote) + if err != nil { + return info, nil, fmt.Errorf("failed to ensure parent directories: %w", err) + } + + fullPath := remote + if f.root != "" { + fullPath = path.Join(f.root, remote) + } + + // Initiate multipart upload + type initRequest struct { + Path string `json:"path"` + PartSize int64 `json:"partSize"` + } + reqBody := initRequest{ + Path: fullPath, + PartSize: int64(chunkSize), + } + + var initResp struct { + Token string `json:"token"` + } + + opts := rest.Opts{ + Method: "POST", + Path: fmt.Sprintf("/%s/upload/multipart", o.fs.drive), + RootURL: o.fs.endpoint, + ExtraHeaders: map[string]string{ + "Authorization": "Bearer " + token, + }, + Options: options, + } + + err = o.fs.pacer.Call(func() (bool, error) { + res, err := o.fs.srv.CallJSON(ctx, &opts, reqBody, &initResp) + if err != nil { + return res != nil && res.StatusCode == http.StatusTooManyRequests, err + } + return false, nil + }) + + if err != nil { + return info, nil, fmt.Errorf("failed to initiate multipart upload: %w", err) + } + + chunkWriter := &shadeChunkWriter{ + initToken: initResp.Token, + chunkSize: int64(chunkSize), + size: size, + f: f, + o: o, + } + info = fs.ChunkWriterInfo{ + ChunkSize: int64(chunkSize), + Concurrency: f.opt.Concurrency, + LeavePartsOnError: false, + } + return info, chunkWriter, err +} + +// WriteChunk will write chunk number with reader bytes, where chunk number >= 0 +func (s *shadeChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (bytesWritten int64, err error) { + + token, err := s.f.refreshJWTToken(ctx) + if err != nil { + return 0, err + } + + // Read chunk + var chunk bytes.Buffer + n, err := io.Copy(&chunk, reader) + + if n == 0 { + return 0, nil + } + + if err != nil { + return 0, fmt.Errorf("failed to read chunk: %w", err) + } + // Get presigned URL for this part + var partURL api.PartURL + + partOpts := rest.Opts{ + Method: "POST", + Path: fmt.Sprintf("/%s/upload/multipart/part/%d?token=%s", s.f.drive, chunkNumber+1, url.QueryEscape(s.initToken)), + RootURL: s.f.endpoint, + ExtraHeaders: map[string]string{ + "Authorization": "Bearer " + token, + }, + } + + err = s.f.pacer.Call(func() (bool, error) { + res, err := s.f.srv.CallJSON(ctx, &partOpts, nil, &partURL) + if err != nil { + return res != nil && res.StatusCode == http.StatusTooManyRequests, err + } + return false, nil + }) + + if err != nil { + return 0, fmt.Errorf("failed to get part URL: %w", err) + } + opts := rest.Opts{ + Method: "PUT", + RootURL: partURL.URL, + Body: &chunk, + ContentType: "", + ContentLength: &n, + } + + // Add headers + var uploadRes *http.Response + if len(partURL.Headers) > 0 { + opts.ExtraHeaders = make(map[string]string) + for k, v := range partURL.Headers { + opts.ExtraHeaders[k] = v + } + } + + err = s.f.pacer.Call(func() (bool, error) { + uploadRes, err = s.f.srv.Call(ctx, &opts) + if err != nil { + return uploadRes != nil && uploadRes.StatusCode == http.StatusTooManyRequests, err + } + return false, nil + }) + + if err != nil { + return 0, fmt.Errorf("failed to upload part %d: %w", chunk, err) + } + + if uploadRes.StatusCode != http.StatusOK && uploadRes.StatusCode != http.StatusCreated { + body, _ := io.ReadAll(uploadRes.Body) + fs.CheckClose(uploadRes.Body, &err) + return 0, fmt.Errorf("part upload failed with status %d: %s", uploadRes.StatusCode, string(body)) + } + + // Get ETag from response + etag := uploadRes.Header.Get("ETag") + fs.CheckClose(uploadRes.Body, &err) + + s.completedPartsMu.Lock() + defer s.completedPartsMu.Unlock() + s.completedParts = append(s.completedParts, api.CompletedPart{ + PartNumber: int32(chunkNumber + 1), + ETag: etag, + }) + return n, nil +} + +// Close complete chunked writer finalising the file. +func (s *shadeChunkWriter) Close(ctx context.Context) error { + + // Complete multipart upload + sort.Slice(s.completedParts, func(i, j int) bool { + return s.completedParts[i].PartNumber < s.completedParts[j].PartNumber + }) + + type completeRequest struct { + Parts []api.CompletedPart `json:"parts"` + } + var completeBody completeRequest + + if s.completedParts == nil { + completeBody = completeRequest{Parts: []api.CompletedPart{}} + } else { + completeBody = completeRequest{Parts: s.completedParts} + } + + token, err := s.f.refreshJWTToken(ctx) + if err != nil { + return err + } + + completeOpts := rest.Opts{ + Method: "POST", + Path: fmt.Sprintf("/%s/upload/multipart/complete?token=%s", s.f.drive, url.QueryEscape(s.initToken)), + RootURL: s.f.endpoint, + ExtraHeaders: map[string]string{ + "Authorization": "Bearer " + token, + }, + } + + var response http.Response + + err = s.f.pacer.Call(func() (bool, error) { + res, err := s.f.srv.CallJSON(ctx, &completeOpts, completeBody, &response) + + if err != nil && res == nil { + return false, err + } + + if res.StatusCode == http.StatusTooManyRequests { + return true, err // Retry on 429 + } + + if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusCreated { + body, _ := io.ReadAll(res.Body) + return false, fmt.Errorf("complete multipart failed with status %d: %s", res.StatusCode, string(body)) + } + + return false, nil + }) + + if err != nil { + return fmt.Errorf("failed to complete multipart upload: %w", err) + } + + return nil +} + +// Abort chunk write +// +// You can and should call Abort without calling Close. +func (s *shadeChunkWriter) Abort(ctx context.Context) error { + token, err := s.f.refreshJWTToken(ctx) + if err != nil { + return err + } + + opts := rest.Opts{ + Method: "POST", + Path: fmt.Sprintf("/%s/upload/abort/multipart?token=%s", s.f.drive, url.QueryEscape(s.initToken)), + RootURL: s.f.endpoint, + ExtraHeaders: map[string]string{ + "Authorization": "Bearer " + token, + }, + } + + err = s.f.pacer.Call(func() (bool, error) { + res, err := s.f.srv.Call(ctx, &opts) + if err != nil { + fs.Debugf(s.f, "Failed to abort multipart upload: %v", err) + return false, nil // Don't retry abort + } + if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusCreated { + fs.Debugf(s.f, "Abort returned status %d", res.StatusCode) + } + return false, nil + }) + if err != nil { + return fmt.Errorf("failed to abort multipart upload: %w", err) + } + return nil +} diff --git a/backend/swift/swift.go b/backend/swift/swift.go index cf4ce9d37db7d..d15c3394412c3 100644 --- a/backend/swift/swift.go +++ b/backend/swift/swift.go @@ -54,7 +54,7 @@ var SharedOptions = []fs.Option{{ Name: "chunk_size", Help: strings.ReplaceAll(`Above this size files will be chunked. -Above this size files will be chunked into a a |`+segmentsContainerSuffix+`| container +Above this size files will be chunked into a |`+segmentsContainerSuffix+`| container or a |`+segmentsDirectory+`| directory. (See the |use_segments_container| option for more info). Default for this is 5 GiB which is its maximum value, which means only files above this size will be chunked. diff --git a/backend/uptobox/api/types.go b/backend/uptobox/api/types.go deleted file mode 100644 index 8cf1977546f44..0000000000000 --- a/backend/uptobox/api/types.go +++ /dev/null @@ -1,171 +0,0 @@ -// Package api provides types used by the Uptobox API. -package api - -import "fmt" - -// Error contains the error code and message returned by the API -type Error struct { - Success bool `json:"success,omitempty"` - StatusCode int `json:"statusCode,omitempty"` - Message string `json:"message,omitempty"` - Data string `json:"data,omitempty"` -} - -// Error returns a string for the error and satisfies the error interface -func (e Error) Error() string { - out := fmt.Sprintf("api error %d", e.StatusCode) - if e.Message != "" { - out += ": " + e.Message - } - if e.Data != "" { - out += ": " + e.Data - } - return out -} - -// FolderEntry represents a Uptobox subfolder when listing folder contents -type FolderEntry struct { - FolderID uint64 `json:"fld_id"` - Description string `json:"fld_descr"` - Password string `json:"fld_password"` - FullPath string `json:"fullPath"` - Path string `json:"fld_name"` - Name string `json:"name"` - Hash string `json:"hash"` -} - -// FolderInfo represents the current folder when listing folder contents -type FolderInfo struct { - FolderID uint64 `json:"fld_id"` - Hash string `json:"hash"` - FileCount uint64 `json:"fileCount"` - TotalFileSize int64 `json:"totalFileSize"` -} - -// FileInfo represents a file when listing folder contents -type FileInfo struct { - Name string `json:"file_name"` - Description string `json:"file_descr"` - Created string `json:"file_created"` - Size int64 `json:"file_size"` - Downloads uint64 `json:"file_downloads"` - Code string `json:"file_code"` - Password string `json:"file_password"` - Public int `json:"file_public"` - LastDownload string `json:"file_last_download"` - ID uint64 `json:"id"` -} - -// ReadMetadataResponse is the response when listing folder contents -type ReadMetadataResponse struct { - StatusCode int `json:"statusCode"` - Message string `json:"message"` - Data struct { - CurrentFolder FolderInfo `json:"currentFolder"` - Folders []FolderEntry `json:"folders"` - Files []FileInfo `json:"files"` - PageCount int `json:"pageCount"` - TotalFileCount int `json:"totalFileCount"` - TotalFileSize int64 `json:"totalFileSize"` - } `json:"data"` -} - -// UploadInfo is the response when initiating an upload -type UploadInfo struct { - StatusCode int `json:"statusCode"` - Message string `json:"message"` - Data struct { - UploadLink string `json:"uploadLink"` - MaxUpload string `json:"maxUpload"` - } `json:"data"` -} - -// UploadResponse is the response to a successful upload -type UploadResponse struct { - Files []struct { - Name string `json:"name"` - Size int64 `json:"size"` - URL string `json:"url"` - DeleteURL string `json:"deleteUrl"` - } `json:"files"` -} - -// UpdateResponse is a generic response to various action on files (rename/copy/move) -type UpdateResponse struct { - Message string `json:"message"` - StatusCode int `json:"statusCode"` -} - -// Download is the response when requesting a download link -type Download struct { - StatusCode int `json:"statusCode"` - Message string `json:"message"` - Data struct { - DownloadLink string `json:"dlLink"` - } `json:"data"` -} - -// MetadataRequestOptions represents all the options when listing folder contents -type MetadataRequestOptions struct { - Limit uint64 - Offset uint64 - SearchField string - Search string -} - -// CreateFolderRequest is used for creating a folder -type CreateFolderRequest struct { - Token string `json:"token"` - Path string `json:"path"` - Name string `json:"name"` -} - -// DeleteFolderRequest is used for deleting a folder -type DeleteFolderRequest struct { - Token string `json:"token"` - FolderID uint64 `json:"fld_id"` -} - -// CopyMoveFileRequest is used for moving/copying a file -type CopyMoveFileRequest struct { - Token string `json:"token"` - FileCodes string `json:"file_codes"` - DestinationFolderID uint64 `json:"destination_fld_id"` - Action string `json:"action"` -} - -// MoveFolderRequest is used for moving a folder -type MoveFolderRequest struct { - Token string `json:"token"` - FolderID uint64 `json:"fld_id"` - DestinationFolderID uint64 `json:"destination_fld_id"` - Action string `json:"action"` -} - -// RenameFolderRequest is used for renaming a folder -type RenameFolderRequest struct { - Token string `json:"token"` - FolderID uint64 `json:"fld_id"` - NewName string `json:"new_name"` -} - -// UpdateFileInformation is used for renaming a file -type UpdateFileInformation struct { - Token string `json:"token"` - FileCode string `json:"file_code"` - NewName string `json:"new_name,omitempty"` - Description string `json:"description,omitempty"` - Password string `json:"password,omitempty"` - Public string `json:"public,omitempty"` -} - -// RemoveFileRequest is used for deleting a file -type RemoveFileRequest struct { - Token string `json:"token"` - FileCodes string `json:"file_codes"` -} - -// Token represents the authentication token -type Token struct { - Token string `json:"token"` -} diff --git a/backend/uptobox/uptobox.go b/backend/uptobox/uptobox.go deleted file mode 100644 index ee13d0c672669..0000000000000 --- a/backend/uptobox/uptobox.go +++ /dev/null @@ -1,1087 +0,0 @@ -// Package uptobox provides an interface to the Uptobox storage system. -package uptobox - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "path" - "regexp" - "strconv" - "strings" - "time" - - "github.com/rclone/rclone/backend/uptobox/api" - "github.com/rclone/rclone/fs" - "github.com/rclone/rclone/fs/config" - "github.com/rclone/rclone/fs/config/configmap" - "github.com/rclone/rclone/fs/config/configstruct" - "github.com/rclone/rclone/fs/fserrors" - "github.com/rclone/rclone/fs/fshttp" - "github.com/rclone/rclone/fs/hash" - "github.com/rclone/rclone/lib/encoder" - "github.com/rclone/rclone/lib/pacer" - "github.com/rclone/rclone/lib/random" - "github.com/rclone/rclone/lib/rest" -) - -const ( - apiBaseURL = "https://uptobox.com/api" - minSleep = 400 * time.Millisecond // api is extremely rate limited now - maxSleep = 5 * time.Second - decayConstant = 2 // bigger for slower decay, exponential - attackConstant = 0 // start with max sleep -) - -func init() { - fs.Register(&fs.RegInfo{ - Name: "uptobox", - Description: "Uptobox", - NewFs: NewFs, - Options: []fs.Option{{ - Help: "Your access token.\n\nGet it from https://uptobox.com/my_account.", - Name: "access_token", - Sensitive: true, - }, { - Help: "Set to make uploaded files private", - Name: "private", - Advanced: true, - Default: false, - }, { - Name: config.ConfigEncoding, - Help: config.ConfigEncodingHelp, - Advanced: true, - // maxFileLength = 255 - Default: (encoder.Display | - encoder.EncodeBackQuote | - encoder.EncodeDoubleQuote | - encoder.EncodeLtGt | - encoder.EncodeLeftSpace | - encoder.EncodeInvalidUtf8), - }}, - }) -} - -// Options defines the configuration for this backend -type Options struct { - AccessToken string `config:"access_token"` - Private bool `config:"private"` - Enc encoder.MultiEncoder `config:"encoding"` -} - -// Fs is the interface a cloud storage system must provide -type Fs struct { - root string - name string - opt Options - features *fs.Features - srv *rest.Client - pacer *fs.Pacer - IDRegexp *regexp.Regexp - public string // "0" to make objects private -} - -// Object represents an Uptobox object -type Object struct { - fs *Fs // what this object is part of - remote string // The remote path - hasMetaData bool // whether info below has been set - size int64 // Bytes in the object - // modTime time.Time // Modified time of the object - code string -} - -// Name of the remote (as passed into NewFs) -func (f *Fs) Name() string { - return f.name -} - -// Root of the remote (as passed into NewFs) -func (f *Fs) Root() string { - return f.root -} - -// String returns a description of the FS -func (f *Fs) String() string { - return fmt.Sprintf("Uptobox root '%s'", f.root) -} - -// Precision of the ModTimes in this Fs -func (f *Fs) Precision() time.Duration { - return fs.ModTimeNotSupported -} - -// Hashes returns the supported hash types of the filesystem -func (f *Fs) Hashes() hash.Set { - return hash.Set(hash.None) -} - -// Features returns the optional features of this Fs -func (f *Fs) Features() *fs.Features { - return f.features -} - -// retryErrorCodes is a slice of error codes that we will retry -var retryErrorCodes = []int{ - 429, // Too Many Requests. - 500, // Internal Server Error - 502, // Bad Gateway - 503, // Service Unavailable - 504, // Gateway Timeout - 509, // Bandwidth Limit Exceeded -} - -// shouldRetry returns a boolean as to whether this resp and err -// deserve to be retried. It returns the err as a convenience -func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) { - if fserrors.ContextError(ctx, &err) { - return false, err - } - return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err -} - -// dirPath returns an escaped file path (f.root, file) -func (f *Fs) dirPath(file string) string { - //return path.Join(f.diskRoot, file) - if file == "" || file == "." { - return "//" + f.root - } - return "//" + path.Join(f.root, file) -} - -// returns the full path based on root and the last element -func (f *Fs) splitPathFull(pth string) (string, string) { - fullPath := strings.Trim(path.Join(f.root, pth), "/") - - i := len(fullPath) - 1 - for i >= 0 && fullPath[i] != '/' { - i-- - } - - if i < 0 { - return "//" + fullPath[:i+1], fullPath[i+1:] - } - - // do not include the / at the split - return "//" + fullPath[:i], fullPath[i+1:] -} - -// splitPath is modified splitPath version that doesn't include the separator -// in the base path -func (f *Fs) splitPath(pth string) (string, string) { - // chop of any leading or trailing '/' - pth = strings.Trim(pth, "/") - - i := len(pth) - 1 - for i >= 0 && pth[i] != '/' { - i-- - } - - if i < 0 { - return pth[:i+1], pth[i+1:] - } - return pth[:i], pth[i+1:] -} - -// NewFs makes a new Fs object from the path -// -// The path is of the form remote:path -// -// Remotes are looked up in the config file. If the remote isn't -// found then NotFoundInConfigFile will be returned. -// -// On Windows avoid single character remote names as they can be mixed -// up with drive letters. -func NewFs(ctx context.Context, name string, root string, config configmap.Mapper) (fs.Fs, error) { - opt := new(Options) - err := configstruct.Set(config, opt) - if err != nil { - return nil, err - } - - f := &Fs{ - name: name, - root: root, - opt: *opt, - pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant), pacer.AttackConstant(attackConstant))), - } - if root == "/" || root == "." { - f.root = "" - } else { - f.root = root - } - f.features = (&fs.Features{ - DuplicateFiles: true, - CanHaveEmptyDirectories: true, - ReadMimeType: false, - }).Fill(ctx, f) - if f.opt.Private { - f.public = "0" - } - - client := fshttp.NewClient(ctx) - f.srv = rest.NewClient(client).SetRoot(apiBaseURL) - f.IDRegexp = regexp.MustCompile(`^https://uptobox\.com/([a-zA-Z0-9]+)`) - - _, err = f.readMetaDataForPath(ctx, f.dirPath(""), &api.MetadataRequestOptions{Limit: 10}) - if err != nil { - if _, ok := err.(api.Error); !ok { - return nil, err - } - // assume it's a file than - oldRoot := f.root - rootDir, file := f.splitPath(root) - f.root = rootDir - _, err = f.NewObject(ctx, file) - if err == nil { - return f, fs.ErrorIsFile - } - f.root = oldRoot - } - - return f, nil -} - -func (f *Fs) decodeError(resp *http.Response, response any) (err error) { - defer fs.CheckClose(resp.Body, &err) - - body, err := io.ReadAll(resp.Body) - if err != nil { - return err - } - // try to unmarshal into correct structure - err = json.Unmarshal(body, response) - if err == nil { - return nil - } - // try to unmarshal into Error - var apiErr api.Error - err = json.Unmarshal(body, &apiErr) - if err != nil { - return err - } - return apiErr -} - -func (f *Fs) readMetaDataForPath(ctx context.Context, path string, options *api.MetadataRequestOptions) (*api.ReadMetadataResponse, error) { - opts := rest.Opts{ - Method: "GET", - Path: "/user/files", - Parameters: url.Values{ - "token": []string{f.opt.AccessToken}, - "path": []string{f.opt.Enc.FromStandardPath(path)}, - "limit": []string{strconv.FormatUint(options.Limit, 10)}, - }, - } - - if options.Offset != 0 { - opts.Parameters.Set("offset", strconv.FormatUint(options.Offset, 10)) - } - - var err error - var info api.ReadMetadataResponse - var resp *http.Response - err = f.pacer.Call(func() (bool, error) { - resp, err = f.srv.Call(ctx, &opts) - return shouldRetry(ctx, resp, err) - }) - if err != nil { - return nil, err - } - - err = f.decodeError(resp, &info) - if err != nil { - return nil, err - } - - if info.StatusCode != 0 { - return nil, errors.New(info.Message) - } - - return &info, nil -} - -// List the objects and directories in dir into entries. The -// entries can be returned in any order but should be for a -// complete directory. -// -// dir should be "" to list the root, and should not have -// trailing slashes. -// -// This should return ErrDirNotFound if the directory isn't -// found. -func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { - root := f.dirPath(dir) - - var limit uint64 = 100 // max number of objects per request - 100 seems to be the maximum the api accepts - var page uint64 = 1 - var offset uint64 // for the next page of requests - - for { - opts := &api.MetadataRequestOptions{ - Limit: limit, - Offset: offset, - } - - info, err := f.readMetaDataForPath(ctx, root, opts) - if err != nil { - if apiErr, ok := err.(api.Error); ok { - // might indicate other errors but we can probably assume not found here - if apiErr.StatusCode == 1 { - return nil, fs.ErrorDirNotFound - } - } - return nil, err - } - - for _, item := range info.Data.Files { - remote := path.Join(dir, f.opt.Enc.ToStandardName(item.Name)) - o, err := f.newObjectWithInfo(ctx, remote, &item) - if err != nil { - continue - } - entries = append(entries, o) - } - - // folders are always listed entirely on every page grr. - if page == 1 { - for _, item := range info.Data.Folders { - remote := path.Join(dir, f.opt.Enc.ToStandardName(item.Name)) - d := fs.NewDir(remote, time.Time{}).SetID(strconv.FormatUint(item.FolderID, 10)) - entries = append(entries, d) - } - } - - //offset for the next page of items - page++ - offset += limit - //check if we reached end of list - if page > uint64(info.Data.PageCount) { - break - } - } - return entries, nil -} - -// Return an Object from a path -// -// If it can't be found it returns the error fs.ErrorObjectNotFound. -func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.FileInfo) (fs.Object, error) { - o := &Object{ - fs: f, - remote: remote, - size: info.Size, - code: info.Code, - hasMetaData: true, - } - return o, nil -} - -// NewObject finds the Object at remote. If it can't be found it -// returns the error fs.ErrorObjectNotFound. -func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { - // no way to directly access an object by path so we have to list the parent dir - entries, err := f.List(ctx, path.Dir(remote)) - if err != nil { - // need to change error type - // if the parent dir doesn't exist the object doesn't exist either - if err == fs.ErrorDirNotFound { - return nil, fs.ErrorObjectNotFound - } - return nil, err - } - for _, entry := range entries { - if o, ok := entry.(fs.Object); ok { - if o.Remote() == remote { - return o, nil - } - } - } - return nil, fs.ErrorObjectNotFound -} - -func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, filename string, uploadURL string, options ...fs.OpenOption) (*api.UploadResponse, error) { - opts := rest.Opts{ - Method: "POST", - RootURL: "https:" + uploadURL, - Body: in, - ContentLength: &size, - Options: options, - MultipartContentName: "files", - MultipartFileName: filename, - } - - var err error - var resp *http.Response - var ul api.UploadResponse - err = f.pacer.CallNoRetry(func() (bool, error) { - resp, err = f.srv.CallJSON(ctx, &opts, nil, &ul) - return shouldRetry(ctx, resp, err) - }) - if err != nil { - return nil, fmt.Errorf("couldn't upload file: %w", err) - } - return &ul, nil -} - -// dstPath starts from root and includes // -func (f *Fs) move(ctx context.Context, dstPath string, fileID string) (err error) { - meta, err := f.readMetaDataForPath(ctx, dstPath, &api.MetadataRequestOptions{Limit: 10}) - if err != nil { - return err - } - - opts := rest.Opts{ - Method: "PATCH", - Path: "/user/files", - } - mv := api.CopyMoveFileRequest{ - Token: f.opt.AccessToken, - FileCodes: fileID, - DestinationFolderID: meta.Data.CurrentFolder.FolderID, - Action: "move", - } - - var resp *http.Response - var info api.UpdateResponse - err = f.pacer.Call(func() (bool, error) { - resp, err = f.srv.CallJSON(ctx, &opts, &mv, &info) - return shouldRetry(ctx, resp, err) - }) - if err != nil { - return fmt.Errorf("couldn't move file: %w", err) - } - if info.StatusCode != 0 { - return fmt.Errorf("move: api error: %d - %s", info.StatusCode, info.Message) - } - return err -} - -// updateFileInformation set's various file attributes most importantly it's name -func (f *Fs) updateFileInformation(ctx context.Context, update *api.UpdateFileInformation) (err error) { - opts := rest.Opts{ - Method: "PATCH", - Path: "/user/files", - } - - var resp *http.Response - var info api.UpdateResponse - err = f.pacer.Call(func() (bool, error) { - resp, err = f.srv.CallJSON(ctx, &opts, update, &info) - return shouldRetry(ctx, resp, err) - }) - if err != nil { - return fmt.Errorf("couldn't update file info: %w", err) - } - if info.StatusCode != 0 { - return fmt.Errorf("updateFileInfo: api error: %d - %s", info.StatusCode, info.Message) - } - return err -} - -func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) error { - if size > int64(200e9) { // max size 200GB - return errors.New("file too big, can't upload") - } else if size == 0 { - return fs.ErrorCantUploadEmptyFiles - } - // yes it does take 4 requests if we're uploading to root and 6+ if we're uploading to any subdir :( - - // create upload request - opts := rest.Opts{ - Method: "GET", - Path: "/upload", - } - token := api.Token{ - Token: f.opt.AccessToken, - } - var info api.UploadInfo - err := f.pacer.Call(func() (bool, error) { - resp, err := f.srv.CallJSON(ctx, &opts, &token, &info) - return shouldRetry(ctx, resp, err) - }) - if err != nil { - return err - } - if info.StatusCode != 0 { - return fmt.Errorf("putUnchecked api error: %d - %s", info.StatusCode, info.Message) - } - // we need to have a safe name for the upload to work - tmpName := "rcloneTemp" + random.String(8) - upload, err := f.uploadFile(ctx, in, size, tmpName, info.Data.UploadLink, options...) - if err != nil { - return err - } - if len(upload.Files) != 1 { - return errors.New("upload unexpected response") - } - match := f.IDRegexp.FindStringSubmatch(upload.Files[0].URL) - - // move file to destination folder - base, leaf := f.splitPath(remote) - fullBase := f.dirPath(base) - - if fullBase != "//" { - // make all the parent folders - err = f.Mkdir(ctx, base) - if err != nil { - // this might need some more error handling. if any of the following requests fail - // we'll leave an orphaned temporary file floating around somewhere - // they rarely fail though - return err - } - - err = f.move(ctx, fullBase, match[1]) - if err != nil { - return err - } - } - - // rename file to final name - err = f.updateFileInformation(ctx, &api.UpdateFileInformation{ - Token: f.opt.AccessToken, - FileCode: match[1], - NewName: f.opt.Enc.FromStandardName(leaf), - Public: f.public, - }) - if err != nil { - return err - } - - return nil -} - -// Put in to the remote path with the modTime given of the given size -// -// When called from outside an Fs by rclone, src.Size() will always be >= 0. -// But for unknown-sized objects (indicated by src.Size() == -1), Put should either -// return an error or upload it properly (rather than e.g. calling panic). -// -// May create the object even if it returns an error - if so -// will return the object and the error, otherwise will return -// nil and the error -func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { - existingObj, err := f.NewObject(ctx, src.Remote()) - switch err { - case nil: - return existingObj, existingObj.Update(ctx, in, src, options...) - case fs.ErrorObjectNotFound: - // Not found so create it - return f.PutUnchecked(ctx, in, src, options...) - default: - return nil, err - } -} - -// PutUnchecked uploads the object -// -// This will create a duplicate if we upload a new file without -// checking to see if there is one already - use Put() for that. -func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { - err := f.putUnchecked(ctx, in, src.Remote(), src.Size(), options...) - if err != nil { - return nil, err - } - return f.NewObject(ctx, src.Remote()) -} - -// CreateDir dir creates a directory with the given parent path -// base starts from root and may or may not include // -func (f *Fs) CreateDir(ctx context.Context, base string, leaf string) (err error) { - base = "//" + strings.Trim(base, "/") - - var resp *http.Response - var apiErr api.Error - opts := rest.Opts{ - Method: "PUT", - Path: "/user/files", - } - mkdir := api.CreateFolderRequest{ - Name: f.opt.Enc.FromStandardName(leaf), - Path: f.opt.Enc.FromStandardPath(base), - Token: f.opt.AccessToken, - } - err = f.pacer.Call(func() (bool, error) { - resp, err = f.srv.CallJSON(ctx, &opts, &mkdir, &apiErr) - return shouldRetry(ctx, resp, err) - }) - if err != nil { - return err - } - // checking if the dir exists beforehand would be slower so we'll just ignore the error here - if apiErr.StatusCode != 0 && !strings.Contains(apiErr.Data, "already exists") { - return apiErr - } - return nil -} - -func (f *Fs) mkDirs(ctx context.Context, path string) (err error) { - // chop of any leading or trailing slashes - dirs := strings.Split(path, "/") - var base = "" - for _, element := range dirs { - // create every dir one by one - if element != "" { - err = f.CreateDir(ctx, base, element) - if err != nil { - return err - } - base += "/" + element - } - } - return nil -} - -// Mkdir makes the directory (container, bucket) -// -// Shouldn't return an error if it already exists -func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) { - if dir == "" || dir == "." { - return f.mkDirs(ctx, f.root) - } - return f.mkDirs(ctx, path.Join(f.root, dir)) -} - -// may or may not delete folders with contents? -func (f *Fs) purge(ctx context.Context, folderID uint64) (err error) { - var resp *http.Response - var apiErr api.Error - opts := rest.Opts{ - Method: "DELETE", - Path: "/user/files", - } - rm := api.DeleteFolderRequest{ - FolderID: folderID, - Token: f.opt.AccessToken, - } - err = f.pacer.Call(func() (bool, error) { - resp, err = f.srv.CallJSON(ctx, &opts, &rm, &apiErr) - return shouldRetry(ctx, resp, err) - }) - if err != nil { - return err - } - if apiErr.StatusCode != 0 { - return apiErr - } - return nil -} - -// Rmdir removes the directory (container, bucket) if empty -// -// Return an error if it doesn't exist or isn't empty -func (f *Fs) Rmdir(ctx context.Context, dir string) error { - info, err := f.readMetaDataForPath(ctx, f.dirPath(dir), &api.MetadataRequestOptions{Limit: 10}) - if err != nil { - return err - } - if len(info.Data.Folders) > 0 || len(info.Data.Files) > 0 { - return fs.ErrorDirectoryNotEmpty - } - - return f.purge(ctx, info.Data.CurrentFolder.FolderID) -} - -// Move src to this remote using server side move operations. -func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { - srcObj, ok := src.(*Object) - if !ok { - fs.Debugf(src, "Can't move - not same remote type") - return nil, fs.ErrorCantMove - } - - srcBase, srcLeaf := srcObj.fs.splitPathFull(src.Remote()) - dstBase, dstLeaf := f.splitPathFull(remote) - - needRename := srcLeaf != dstLeaf - needMove := srcBase != dstBase - - // do the move if required - if needMove { - err := f.mkDirs(ctx, strings.Trim(dstBase, "/")) - if err != nil { - return nil, fmt.Errorf("move: failed to make destination dirs: %w", err) - } - - err = f.move(ctx, dstBase, srcObj.code) - if err != nil { - return nil, err - } - } - - // rename to final name if we need to - if needRename { - err := f.updateFileInformation(ctx, &api.UpdateFileInformation{ - Token: f.opt.AccessToken, - FileCode: srcObj.code, - NewName: f.opt.Enc.FromStandardName(dstLeaf), - Public: f.public, - }) - if err != nil { - return nil, fmt.Errorf("move: failed final rename: %w", err) - } - } - - // copy the old object and apply the changes - newObj := *srcObj - newObj.remote = remote - newObj.fs = f - return &newObj, nil -} - -// renameDir renames a directory -func (f *Fs) renameDir(ctx context.Context, folderID uint64, newName string) (err error) { - var resp *http.Response - var apiErr api.Error - opts := rest.Opts{ - Method: "PATCH", - Path: "/user/files", - } - rename := api.RenameFolderRequest{ - Token: f.opt.AccessToken, - FolderID: folderID, - NewName: newName, - } - err = f.pacer.Call(func() (bool, error) { - resp, err = f.srv.CallJSON(ctx, &opts, &rename, &apiErr) - return shouldRetry(ctx, resp, err) - }) - if err != nil { - return err - } - if apiErr.StatusCode != 0 { - return apiErr - } - return nil -} - -// DirMove moves src, srcRemote to this remote at dstRemote -// using server-side move operations. -// -// Will only be called if src.Fs().Name() == f.Name() -// -// If it isn't possible then return fs.ErrorCantDirMove -// -// If destination exists then return fs.ErrorDirExists -func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { - srcFs, ok := src.(*Fs) - if !ok { - fs.Debugf(srcFs, "Can't move directory - not same remote type") - return fs.ErrorCantDirMove - } - - // find out source - srcPath := srcFs.dirPath(srcRemote) - srcInfo, err := f.readMetaDataForPath(ctx, srcPath, &api.MetadataRequestOptions{Limit: 1}) - if err != nil { - return fmt.Errorf("dirmove: source not found: %w", err) - } - // check if the destination already exists - dstPath := f.dirPath(dstRemote) - _, err = f.readMetaDataForPath(ctx, dstPath, &api.MetadataRequestOptions{Limit: 1}) - if err == nil { - return fs.ErrorDirExists - } - - // make the destination parent path - dstBase, dstName := f.splitPathFull(dstRemote) - err = f.mkDirs(ctx, strings.Trim(dstBase, "/")) - if err != nil { - return fmt.Errorf("dirmove: failed to create dirs: %w", err) - } - - // find the destination parent dir - dstInfo, err := f.readMetaDataForPath(ctx, dstBase, &api.MetadataRequestOptions{Limit: 1}) - if err != nil { - return fmt.Errorf("dirmove: failed to read destination: %w", err) - } - srcBase, srcName := srcFs.splitPathFull(srcRemote) - - needRename := srcName != dstName - needMove := srcBase != dstBase - - // if we have to rename we'll have to use a temporary name since - // there could already be a directory with the same name as the src directory - if needRename { - // rename to a temporary name - tmpName := "rcloneTemp" + random.String(8) - err = f.renameDir(ctx, srcInfo.Data.CurrentFolder.FolderID, tmpName) - if err != nil { - return fmt.Errorf("dirmove: failed initial rename: %w", err) - } - } - - // do the move - if needMove { - opts := rest.Opts{ - Method: "PATCH", - Path: "/user/files", - } - move := api.MoveFolderRequest{ - Token: f.opt.AccessToken, - FolderID: srcInfo.Data.CurrentFolder.FolderID, - DestinationFolderID: dstInfo.Data.CurrentFolder.FolderID, - Action: "move", - } - var resp *http.Response - var apiErr api.Error - err = f.pacer.Call(func() (bool, error) { - resp, err = f.srv.CallJSON(ctx, &opts, &move, &apiErr) - return shouldRetry(ctx, resp, err) - }) - if err != nil { - return fmt.Errorf("dirmove: failed to move: %w", err) - } - if apiErr.StatusCode != 0 { - return apiErr - } - } - - // rename to final name - if needRename { - err = f.renameDir(ctx, srcInfo.Data.CurrentFolder.FolderID, dstName) - if err != nil { - return fmt.Errorf("dirmove: failed final rename: %w", err) - } - } - return nil -} - -func (f *Fs) copy(ctx context.Context, dstPath string, fileID string) (err error) { - meta, err := f.readMetaDataForPath(ctx, dstPath, &api.MetadataRequestOptions{Limit: 10}) - if err != nil { - return err - } - - opts := rest.Opts{ - Method: "PATCH", - Path: "/user/files", - } - cp := api.CopyMoveFileRequest{ - Token: f.opt.AccessToken, - FileCodes: fileID, - DestinationFolderID: meta.Data.CurrentFolder.FolderID, - Action: "copy", - } - - var resp *http.Response - var info api.UpdateResponse - err = f.pacer.Call(func() (bool, error) { - resp, err = f.srv.CallJSON(ctx, &opts, &cp, &info) - return shouldRetry(ctx, resp, err) - }) - if err != nil { - return fmt.Errorf("couldn't copy file: %w", err) - } - if info.StatusCode != 0 { - return fmt.Errorf("copy: api error: %d - %s", info.StatusCode, info.Message) - } - return err -} - -// Copy src to this remote using server side move operations. -func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { - srcObj, ok := src.(*Object) - if !ok { - fs.Debugf(src, "Can't copy - not same remote type") - return nil, fs.ErrorCantMove - } - - _, srcLeaf := f.splitPath(src.Remote()) - dstBase, dstLeaf := f.splitPath(remote) - - needRename := srcLeaf != dstLeaf - - err := f.mkDirs(ctx, path.Join(f.root, dstBase)) - if err != nil { - return nil, fmt.Errorf("copy: failed to make destination dirs: %w", err) - } - - err = f.copy(ctx, f.dirPath(dstBase), srcObj.code) - if err != nil { - return nil, err - } - - newObj, err := f.NewObject(ctx, path.Join(dstBase, srcLeaf)) - if err != nil { - return nil, fmt.Errorf("copy: couldn't find copied object: %w", err) - } - - if needRename { - err := f.updateFileInformation(ctx, &api.UpdateFileInformation{ - Token: f.opt.AccessToken, - FileCode: newObj.(*Object).code, - NewName: f.opt.Enc.FromStandardName(dstLeaf), - Public: f.public, - }) - if err != nil { - return nil, fmt.Errorf("copy: failed final rename: %w", err) - } - newObj.(*Object).remote = remote - } - - return newObj, nil -} - -// ------------------------------------------------------------ - -// Fs returns the parent Fs -func (o *Object) Fs() fs.Info { - return o.fs -} - -// Return a string version -func (o *Object) String() string { - if o == nil { - return "" - } - return o.remote -} - -// Remote returns the remote path -func (o *Object) Remote() string { - return o.remote -} - -// ModTime returns the modification time of the object -// -// It attempts to read the objects mtime and if that isn't present the -// LastModified returned in the http headers -func (o *Object) ModTime(ctx context.Context) time.Time { - ci := fs.GetConfig(ctx) - return time.Time(ci.DefaultTime) -} - -// Size returns the size of an object in bytes -func (o *Object) Size() int64 { - return o.size -} - -// Hash returns the Md5sum of an object returning a lowercase hex string -func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { - return "", hash.ErrUnsupported -} - -// ID returns the ID of the Object if known, or "" if not -func (o *Object) ID() string { - return o.code -} - -// Storable returns whether this object is storable -func (o *Object) Storable() bool { - return true -} - -// SetModTime sets the modification time of the local fs object -func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { - return fs.ErrorCantSetModTime -} - -// Open an object for read -func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { - opts := rest.Opts{ - Method: "GET", - Path: "/link", - Parameters: url.Values{ - "token": []string{o.fs.opt.AccessToken}, - "file_code": []string{o.code}, - }, - } - var dl api.Download - var resp *http.Response - err = o.fs.pacer.Call(func() (bool, error) { - resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &dl) - return shouldRetry(ctx, resp, err) - }) - if err != nil { - return nil, fmt.Errorf("open: failed to get download link: %w", err) - } - - fs.FixRangeOption(options, o.size) - opts = rest.Opts{ - Method: "GET", - RootURL: dl.Data.DownloadLink, - Options: options, - } - - err = o.fs.pacer.Call(func() (bool, error) { - resp, err = o.fs.srv.Call(ctx, &opts) - return shouldRetry(ctx, resp, err) - }) - - if err != nil { - return nil, err - } - return resp.Body, err -} - -// Update the already existing object -// -// Copy the reader into the object updating modTime and size. -// -// The new object may have been created if an error is returned -func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { - if src.Size() < 0 { - return errors.New("refusing to update with unknown size") - } - - // upload with new size but old name - err := o.fs.putUnchecked(ctx, in, o.Remote(), src.Size(), options...) - if err != nil { - return err - } - - // delete duplicate object after successful upload - err = o.Remove(ctx) - if err != nil { - return fmt.Errorf("failed to remove old version: %w", err) - } - - // Fetch new object after deleting the duplicate - info, err := o.fs.NewObject(ctx, o.Remote()) - if err != nil { - return err - } - - // Replace guts of old object with new one - *o = *info.(*Object) - - return nil -} - -// Remove an object -func (o *Object) Remove(ctx context.Context) error { - opts := rest.Opts{ - Method: "DELETE", - Path: "/user/files", - } - delete := api.RemoveFileRequest{ - Token: o.fs.opt.AccessToken, - FileCodes: o.code, - } - var info api.UpdateResponse - err := o.fs.pacer.Call(func() (bool, error) { - resp, err := o.fs.srv.CallJSON(ctx, &opts, &delete, &info) - return shouldRetry(ctx, resp, err) - }) - if err != nil { - return err - } - if info.StatusCode != 0 { - return fmt.Errorf("remove: api error: %d - %s", info.StatusCode, info.Message) - } - return nil -} - -// Check the interfaces are satisfied -var ( - _ fs.Fs = (*Fs)(nil) - _ fs.Copier = (*Fs)(nil) - _ fs.Mover = (*Fs)(nil) - _ fs.DirMover = (*Fs)(nil) - _ fs.Object = (*Object)(nil) -) diff --git a/backend/uptobox/uptobox_test.go b/backend/uptobox/uptobox_test.go deleted file mode 100644 index 4210b884d5b9d..0000000000000 --- a/backend/uptobox/uptobox_test.go +++ /dev/null @@ -1,21 +0,0 @@ -// Test Uptobox filesystem interface -package uptobox_test - -import ( - "testing" - - "github.com/rclone/rclone/backend/uptobox" - "github.com/rclone/rclone/fstest" - "github.com/rclone/rclone/fstest/fstests" -) - -// TestIntegration runs integration tests against the remote -func TestIntegration(t *testing.T) { - if *fstest.RemoteName == "" { - *fstest.RemoteName = "TestUptobox:" - } - fstests.Run(t, &fstests.Opt{ - RemoteName: *fstest.RemoteName, - NilObject: (*uptobox.Object)(nil), - }) -} diff --git a/backend/zoho/zoho.go b/backend/zoho/zoho.go index 30adb413eca44..3b40e08286df0 100644 --- a/backend/zoho/zoho.go +++ b/backend/zoho/zoho.go @@ -817,7 +817,7 @@ func (f *Fs) upload(ctx context.Context, name string, parent string, size int64, params.Set("filename", url.QueryEscape(name)) params.Set("parent_id", parent) params.Set("override-name-exist", strconv.FormatBool(true)) - formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, nil, "content", name) + formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, nil, "content", name, "application/octet-stream") if err != nil { return nil, fmt.Errorf("failed to make multipart upload: %w", err) } diff --git a/bin/make_manual.py b/bin/make_manual.py index 86a3e04813d15..68d0164fc029a 100755 --- a/bin/make_manual.py +++ b/bin/make_manual.py @@ -43,9 +43,11 @@ "compress.md", "combine.md", "doi.md", + "drime.md" "dropbox.md", "filefabric.md", "filelu.md", + "filen.md", "filescom.md", "ftp.md", "gofile.md", @@ -85,11 +87,11 @@ "protondrive.md", "seafile.md", "sftp.md", + "shade.md", "smb.md", "storj.md", "sugarsync.md", "ulozto.md", - "uptobox.md", "union.md", "webdav.md", "yandex.md", diff --git a/cmd/gitannex/gitannex_test.go b/cmd/gitannex/gitannex_test.go index 7a45ee522ff20..7ee7452db0461 100644 --- a/cmd/gitannex/gitannex_test.go +++ b/cmd/gitannex/gitannex_test.go @@ -341,7 +341,7 @@ func (h *testState) preconfigureServer() { // The `\\?\` prefix tells Windows APIs to pass strings unmodified to the // filesystem without additional parsing [1]. Our workaround is roughly to add // the prefix to whichever parameter doesn't have it (when the OS is Windows). -// I'm not sure this generalizes, but it works for the the kinds of inputs we're +// I'm not sure this generalizes, but it works for the kinds of inputs we're // throwing at it. // // [1]: https://learn.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDN#win32-file-namespaces diff --git a/cmd/ls/lshelp/lshelp.go b/cmd/ls/lshelp/lshelp.go index f4cae3d19d87f..445f190dbf462 100644 --- a/cmd/ls/lshelp/lshelp.go +++ b/cmd/ls/lshelp/lshelp.go @@ -26,6 +26,10 @@ Note that |ls| and |lsl| recurse by default - use |--max-depth 1| to stop the re The other list commands |lsd|,|lsf|,|lsjson| do not recurse by default - use |-R| to make them recurse. +List commands prefer a recursive method that uses more memory but fewer +transactions by default. Use |--disable ListR| to suppress the behavior. +See [|--fast-list|](/docs/#fast-list) for more details. + Listing a nonexistent directory will produce an error except for remotes which can't have empty directories (e.g. s3, swift, or gcs - the bucket-based remotes).`, "|", "`") diff --git a/cmd/lsjson/lsjson.go b/cmd/lsjson/lsjson.go index 9f2390d3eeca0..d786bd740f8a6 100644 --- a/cmd/lsjson/lsjson.go +++ b/cmd/lsjson/lsjson.go @@ -97,7 +97,7 @@ with the following options: - If ` + "`--files-only`" + ` is specified then files will be returned only, no directories. -If ` + "`--stat`" + ` is set then the the output is not an array of items, +If ` + "`--stat`" + ` is set then the output is not an array of items, but instead a single JSON blob will be returned about the item pointed to. This will return an error if the item isn't found, however on bucket based backends (like s3, gcs, b2, azureblob etc) if the item isn't found it will diff --git a/cmd/mountlib/rc.go b/cmd/mountlib/rc.go index 65af12d71284a..b569153b52734 100644 --- a/cmd/mountlib/rc.go +++ b/cmd/mountlib/rc.go @@ -71,7 +71,7 @@ rclone rc mount/mount fs=mydrive: mountPoint=/home//mountPoint mountType=m rclone rc mount/mount fs=TestDrive: mountPoint=/mnt/tmp vfsOpt='{"CacheMode": 2}' mountOpt='{"AllowOther": true}' ` + "```" + ` -The vfsOpt are as described in options/get and can be seen in the the +The vfsOpt are as described in options/get and can be seen in the "vfs" section when running and the mountOpt can be seen in the "mount" section: ` + "```console" + ` diff --git a/cmd/obscure/obscure.go b/cmd/obscure/obscure.go index 531192b0e6aa4..7159670c6d5ac 100644 --- a/cmd/obscure/obscure.go +++ b/cmd/obscure/obscure.go @@ -34,7 +34,7 @@ argument by passing a hyphen as an argument. This will use the first line of STDIN as the password not including the trailing newline. ` + "```console" + ` -echo "secretpassword" | rclone obscure - +echo 'secretpassword' | rclone obscure - ` + "```" + ` If there is no data on STDIN to read, rclone obscure will default to diff --git a/cmd/serve/proxy/proxy_test.go b/cmd/serve/proxy/proxy_test.go index 574a7aa15671d..8f802ab12c2bb 100644 --- a/cmd/serve/proxy/proxy_test.go +++ b/cmd/serve/proxy/proxy_test.go @@ -153,7 +153,7 @@ func TestRun(t *testing.T) { fs.Fatal(nil, "error generating test private key "+privateKeyErr.Error()) } publicKey, publicKeyError := ssh.NewPublicKey(&privateKey.PublicKey) - if privateKeyErr != nil { + if publicKeyError != nil { fs.Fatal(nil, "error generating test public key "+publicKeyError.Error()) } diff --git a/cmd/serve/s3/serve_s3.md b/cmd/serve/s3/serve_s3.md index 7143da9acfb4b..a1281b515b07b 100644 --- a/cmd/serve/s3/serve_s3.md +++ b/cmd/serve/s3/serve_s3.md @@ -13,6 +13,26 @@ docs](https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html)). `--auth-key` is not provided then `serve s3` will allow anonymous access. +Like all rclone flags `--auth-key` can be set via environment +variables, in this case `RCLONE_AUTH_KEY`. Since this flag can be +repeated, the input to `RCLONE_AUTH_KEY` is CSV encoded. Because the +`accessKey,secretKey` has a comma in, this means it needs to be in +quotes. + +```console +export RCLONE_AUTH_KEY='"user,pass"' +rclone serve s3 ... +``` + +Or to supply multiple identities: + +```console +export RCLONE_AUTH_KEY='"user1,pass1","user2,pass2"' +rclone serve s3 ... +``` + +Setting this variable without quotes will produce an error. + Please note that some clients may require HTTPS endpoints. See [the SSL docs](#tls-ssl) for more information. diff --git a/cmd/serve/s3/server.go b/cmd/serve/s3/server.go index 4ef79270186cd..0d690b6421327 100644 --- a/cmd/serve/s3/server.go +++ b/cmd/serve/s3/server.go @@ -70,6 +70,11 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Opt w.s3Secret = getAuthSecret(opt.AuthKey) } + authList, err := authlistResolver(opt.AuthKey) + if err != nil { + return nil, fmt.Errorf("parsing auth list failed: %q", err) + } + var newLogger logger w.faker = gofakes3.New( newBackend(w), @@ -77,7 +82,7 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Opt gofakes3.WithLogger(newLogger), gofakes3.WithRequestID(rand.Uint64()), gofakes3.WithoutVersioning(), - gofakes3.WithV4Auth(authlistResolver(opt.AuthKey)), + gofakes3.WithV4Auth(authList), gofakes3.WithIntegrityCheck(true), // Check Content-MD5 if supplied ) @@ -92,7 +97,7 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Opt w._vfs = vfs.New(f, vfsOpt) if len(opt.AuthKey) > 0 { - w.faker.AddAuthKeys(authlistResolver(opt.AuthKey)) + w.faker.AddAuthKeys(authList) } } diff --git a/cmd/serve/s3/utils.go b/cmd/serve/s3/utils.go index 5020df63af9e9..cb6a926a37595 100644 --- a/cmd/serve/s3/utils.go +++ b/cmd/serve/s3/utils.go @@ -3,6 +3,7 @@ package s3 import ( "context" "encoding/hex" + "errors" "io" "os" "path" @@ -125,15 +126,14 @@ func rmdirRecursive(p string, VFS *vfs.VFS) { } } -func authlistResolver(list []string) map[string]string { +func authlistResolver(list []string) (map[string]string, error) { authList := make(map[string]string) for _, v := range list { parts := strings.Split(v, ",") if len(parts) != 2 { - fs.Infof(nil, "Ignored: invalid auth pair %s", v) - continue + return nil, errors.New("invalid auth pair: expecting a single comma") } authList[parts[0]] = parts[1] } - return authList + return authList, nil } diff --git a/cmd/serve/sftp/connection.go b/cmd/serve/sftp/connection.go index 5d174c2ab362d..a9282fc5bb3be 100644 --- a/cmd/serve/sftp/connection.go +++ b/cmd/serve/sftp/connection.go @@ -291,7 +291,7 @@ func (c *conn) handleChannel(newChannel ssh.NewChannel) { } } fs.Debugf(c.what, " - accepted: %v\n", ok) - err = req.Reply(ok, reply) + err := req.Reply(ok, reply) if err != nil { fs.Errorf(c.what, "Failed to Reply to request: %v", err) return diff --git a/cmd/serve/webdav/webdav.go b/cmd/serve/webdav/webdav.go index fbc64b7a483bb..7ea259737efa9 100644 --- a/cmd/serve/webdav/webdav.go +++ b/cmd/serve/webdav/webdav.go @@ -45,6 +45,10 @@ var OptionsInfo = fs.Options{{ Name: "disable_dir_list", Default: false, Help: "Disable HTML directory list on GET request for a directory", +}, { + Name: "disable_zip", + Default: false, + Help: "Disable zip download of directories", }}. Add(libhttp.ConfigInfo). Add(libhttp.AuthConfigInfo). @@ -57,6 +61,7 @@ type Options struct { Template libhttp.TemplateConfig EtagHash string `config:"etag_hash"` DisableDirList bool `config:"disable_dir_list"` + DisableZip bool `config:"disable_zip"` } // Opt is options set by command line flags @@ -408,6 +413,24 @@ func (w *WebDAV) serveDir(rw http.ResponseWriter, r *http.Request, dirRemote str return } dir := node.(*vfs.Dir) + + if r.URL.Query().Get("download") == "zip" && !w.opt.DisableZip { + fs.Infof(dirRemote, "%s: Zipping directory", r.RemoteAddr) + zipName := path.Base(dirRemote) + if dirRemote == "" { + zipName = "root" + } + rw.Header().Set("Content-Disposition", "attachment; filename=\""+zipName+".zip\"") + rw.Header().Set("Content-Type", "application/zip") + rw.Header().Set("Last-Modified", time.Now().UTC().Format(http.TimeFormat)) + err := vfs.CreateZip(ctx, dir, rw) + if err != nil { + serve.Error(ctx, dirRemote, rw, "Failed to create zip", err) + return + } + return + } + dirEntries, err := dir.ReadDirAll() if err != nil { @@ -417,6 +440,7 @@ func (w *WebDAV) serveDir(rw http.ResponseWriter, r *http.Request, dirRemote str // Make the entries for display directory := serve.NewDirectory(dirRemote, w.server.HTMLTemplate()) + directory.DisableZip = w.opt.DisableZip for _, node := range dirEntries { if vfscommon.Opt.NoModTime { directory.AddHTMLEntry(node.Path(), node.IsDir(), node.Size(), time.Time{}) diff --git a/docs/content/_index.md b/docs/content/_index.md index d5a19d6a82ed4..2ff6c82d0fe51 100644 --- a/docs/content/_index.md +++ b/docs/content/_index.md @@ -31,8 +31,8 @@ mv, mount, ls, ncdu, tree, rm, and cat. Rclone's familiar syntax includes shell pipeline support, and `--dry-run` protection. It is used at the command line, in scripts or via its [API](/rc). -Users call rclone _"The Swiss army knife of cloud storage"_, and -_"Technology indistinguishable from magic"_. +Users call rclone *"The Swiss army knife of cloud storage"*, and +*Technology indistinguishable from magic"*. Rclone really looks after your data. It preserves timestamps and verifies checksums at all times. Transfers over limited bandwidth; @@ -116,6 +116,7 @@ WebDAV or S3, that work out of the box.) {{< provider name="Akamai Netstorage" home="https://www.akamai.com/us/en/products/media-delivery/netstorage.jsp" config="/netstorage/" >}} {{< provider name="Alibaba Cloud (Aliyun) Object Storage System (OSS)" home="https://www.alibabacloud.com/product/oss/" config="/s3/#alibaba-oss" >}} {{< provider name="Amazon S3" home="https://aws.amazon.com/s3/" config="/s3/" >}} +{{< provider name="Bizfly Cloud Simple Storage" home="https://bizflycloud.vn/" config="/s3/#bizflycloud" >}} {{< provider name="Backblaze B2" home="https://www.backblaze.com/cloud-storage" config="/b2/" >}} {{< provider name="Box" home="https://www.box.com/" config="/box/" >}} {{< provider name="Ceph" home="http://ceph.com/" config="/s3/#ceph" >}} @@ -128,12 +129,14 @@ WebDAV or S3, that work out of the box.) {{< provider name="DigitalOcean Spaces" home="https://www.digitalocean.com/products/object-storage/" config="/s3/#digitalocean-spaces" >}} {{< provider name="Digi Storage" home="https://storage.rcs-rds.ro/" config="/koofr/#digi-storage" >}} {{< provider name="Dreamhost" home="https://www.dreamhost.com/cloud/storage/" config="/s3/#dreamhost" >}} +{{< provider name="Drime" home="https://www.drime.cloud/" config="/drime/" >}} {{< provider name="Dropbox" home="https://www.dropbox.com/" config="/dropbox/" >}} {{< provider name="Enterprise File Fabric" home="https://storagemadeeasy.com/about/" config="/filefabric/" >}} {{< provider name="Exaba" home="https://exaba.com/" config="/s3/#exaba" >}} {{< provider name="Fastmail Files" home="https://www.fastmail.com/" config="/webdav/#fastmail-files" >}} {{< provider name="FileLu Cloud Storage" home="https://filelu.com/" config="/filelu/" >}} {{< provider name="FileLu S5 (S3-Compatible Object Storage)" home="https://s5lu.com/" config="/s3/#filelu-s5" >}} +{{< provider name="Filen" home="https://www.filen.io/" config="/filen/" >}} {{< provider name="Files.com" home="https://www.files.com/" config="/filescom/" >}} {{< provider name="FlashBlade" home="https://www.purestorage.com/products/unstructured-data-storage.html" config="/s3/#pure-storage-flashblade" >}} {{< provider name="FTP" home="https://en.wikipedia.org/wiki/File_Transfer_Protocol" config="/ftp/" >}} @@ -203,6 +206,7 @@ WebDAV or S3, that work out of the box.) {{< provider name="Selectel" home="https://selectel.ru/services/cloud/storage/" config="/s3/#selectel" >}} {{< provider name="Servercore Object Storage" home="https://servercore.com/services/object-storage/" config="/s3/#servercore" >}} {{< provider name="SFTP" home="https://en.wikipedia.org/wiki/SSH_File_Transfer_Protocol" config="/sftp/" >}} +{{< provider name="Shade" home="https://shade.inc" config="/shade/" >}} {{< provider name="Sia" home="https://sia.tech/" config="/sia/" >}} {{< provider name="SMB / CIFS" home="https://en.wikipedia.org/wiki/Server_Message_Block" config="/smb/" >}} {{< provider name="Spectra Logic" home="https://spectralogic.com/blackpearl-nearline-object-gateway/" config="/s3/#spectralogic" >}} @@ -212,7 +216,6 @@ WebDAV or S3, that work out of the box.) {{< provider name="SugarSync" home="https://sugarsync.com/" config="/sugarsync/" >}} {{< provider name="Tencent Cloud Object Storage (COS)" home="https://intl.cloud.tencent.com/product/cos" config="/s3/#tencent-cos" >}} {{< provider name="Uloz.to" home="https://uloz.to" config="/ulozto/" >}} -{{< provider name="Uptobox" home="https://uptobox.com" config="/uptobox/" >}} {{< provider name="Wasabi" home="https://wasabi.com/" config="/s3/#wasabi" >}} {{< provider name="WebDAV" home="https://en.wikipedia.org/wiki/WebDAV" config="/webdav/" >}} {{< provider name="Yandex Disk" home="https://disk.yandex.com/" config="/yandex/" >}} diff --git a/docs/content/authors.md b/docs/content/authors.md index 3d5dec351b4b6..bea75210d23db 100644 --- a/docs/content/authors.md +++ b/docs/content/authors.md @@ -1050,3 +1050,18 @@ put them back in again. --> - Nikolay Kiryanov - Diana <5275194+DianaNites@users.noreply.github.com> - Duncan Smart +- vicerace +- Cliff Frey +- Vladislav Tropnikov +- Leo +- Johannes Rothe +- Tingsong Xu +- Jonas Tingeborn <134889+jojje@users.noreply.github.com> +- jhasse-shade +- vyv03354 +- masrlinu <5259918+masrlinu@users.noreply.github.com> +- vupn0712 <126212736+vupn0712@users.noreply.github.com> +- darkdragon-001 +- sys6101 +- Nicolas Dessart +- Qingwei Li <332664203@qq.com> diff --git a/docs/content/azureblob.md b/docs/content/azureblob.md index 3e4a9fb2720f6..11f9989e7eb66 100644 --- a/docs/content/azureblob.md +++ b/docs/content/azureblob.md @@ -103,6 +103,26 @@ MD5 hashes are stored with blobs. However blobs that were uploaded in chunks only have an MD5 if the source remote was capable of MD5 hashes, e.g. the local disk. +### Metadata and tags + +Rclone can map arbitrary metadata to Azure Blob headers, user metadata, and tags +when `--metadata` is enabled (or when using `--metadata-set` / `--metadata-mapper`). + +- Headers: Set these keys in metadata to map to the corresponding blob headers: + - `cache-control`, `content-disposition`, `content-encoding`, `content-language`, `content-type`. +- User metadata: Any other non-reserved keys are written as user metadata + (keys are normalized to lowercase). Keys starting with `x-ms-` are reserved and + are not stored as user metadata. +- Tags: Provide `x-ms-tags` as a comma-separated list of `key=value` pairs, e.g. + `x-ms-tags=env=dev,team=sync`. These are applied as blob tags on upload and on + server-side copies. Whitespace around keys/values is ignored. +- Modtime override: Provide `mtime` in RFC3339/RFC3339Nano format to override the + stored modtime persisted in user metadata. If `mtime` cannot be parsed, rclone + logs a debug message and ignores the override. + +Notes: +- Rclone ignores reserved `x-ms-*` keys (except `x-ms-tags`) for user metadata. + ### Performance When uploading large files, increasing the value of diff --git a/docs/content/changelog.md b/docs/content/changelog.md index fbba79f4110b1..e90b70c550b82 100644 --- a/docs/content/changelog.md +++ b/docs/content/changelog.md @@ -6,6 +6,22 @@ description: "Rclone Changelog" # Changelog +## v1.72.1 - 2025-12-10 + +[See commits](https://github.com/rclone/rclone/compare/v1.72.0...v1.72.1) + +- Bug Fixes + - build: update to go1.25.5 to fix [CVE-2025-61729](https://pkg.go.dev/vuln/GO-2025-4155) + - doc fixes (Duncan Smart, Nick Craig-Wood) + - configfile: Fix piped config support (Jonas Tingeborn) + - log + - Fix PID not included in JSON log output (Tingsong Xu) + - Fix backtrace not going to the --log-file (Nick Craig-Wood) +- Google Cloud Storage + - Improve endpoint parameter docs (Johannes Rothe) +- S3 + - Add missing regions for Selectel provider (Nick Craig-Wood) + ## v1.72.0 - 2025-11-21 [See commits](https://github.com/rclone/rclone/compare/v1.71.0...v1.72.0) diff --git a/docs/content/commands/rclone.md b/docs/content/commands/rclone.md index 8fb1e6dd30f6f..8d670c739b892 100644 --- a/docs/content/commands/rclone.md +++ b/docs/content/commands/rclone.md @@ -1015,10 +1015,6 @@ rclone [flags] --union-search-policy string Policy to choose upstream on SEARCH category (default "ff") --union-upstreams string List of space separated upstreams -u, --update Skip files that are newer on the destination - --uptobox-access-token string Your access token - --uptobox-description string Description of the remote - --uptobox-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot) - --uptobox-private Set to make uploaded files private --use-cookies Enable session cookiejar --use-json-log Use json log format --use-mmap Use mmap allocator (see docs) diff --git a/docs/content/commands/rclone_mount.md b/docs/content/commands/rclone_mount.md index ce5af7d95f136..9cba7200ab83d 100644 --- a/docs/content/commands/rclone_mount.md +++ b/docs/content/commands/rclone_mount.md @@ -336,7 +336,7 @@ full new copy of the file. When mounting with `--read-only`, attempts to write to files will fail *silently* as opposed to with a clear warning as in macFUSE. -# Mounting on Linux +## Mounting on Linux On newer versions of Ubuntu, you may encounter the following error when running `rclone mount`: diff --git a/docs/content/docs.md b/docs/content/docs.md index 8c597b3527396..abe735760a931 100644 --- a/docs/content/docs.md +++ b/docs/content/docs.md @@ -43,9 +43,11 @@ See the following for detailed instructions for - [Crypt](/crypt/) - to encrypt other remotes - [DigitalOcean Spaces](/s3/#digitalocean-spaces) - [Digi Storage](/koofr/#digi-storage) +- [Drime](/drime/) - [Dropbox](/dropbox/) - [Enterprise File Fabric](/filefabric/) - [FileLu Cloud Storage](/filelu/) +- [Filen](/filen/) - [Files.com](/filescom/) - [FTP](/ftp/) - [Gofile](/gofile/) @@ -83,13 +85,13 @@ See the following for detailed instructions for - [rsync.net](/sftp/#rsync-net) - [Seafile](/seafile/) - [SFTP](/sftp/) +- [Shade](/shade/) - [Sia](/sia/) - [SMB](/smb/) - [Storj](/storj/) - [SugarSync](/sugarsync/) - [Union](/union/) - [Uloz.to](/ulozto/) -- [Uptobox](/uptobox/) - [WebDAV](/webdav/) - [Yandex Disk](/yandex/) - [Zoho WorkDrive](/zoho/) diff --git a/docs/content/drime.md b/docs/content/drime.md new file mode 100644 index 0000000000000..7b8055e2a3fad --- /dev/null +++ b/docs/content/drime.md @@ -0,0 +1,244 @@ +--- +title: "Drime" +description: "Rclone docs for Drime" +versionIntroduced: "v1.73" +--- + +# {{< icon "fa fa-cloud" >}} Drime + +[Drime](https://drime.cloud/) is a cloud storage and transfer service focused +on fast, resilient file delivery. It offers both free and paid tiers with +emphasis on high-speed uploads and link sharing. + +To setup Drime you need to log in, navigate to Settings, Developer, and create a +token to use as an API access key. Give it a sensible name and copy the token +for use in the config. + +## Configuration + +Here is a run through of `rclone config` to make a remote called `remote`. + +Firstly run: + + +```console +rclone config +``` + +Then follow through the interactive setup: + + +```text +No remotes found, make a new one? +n) New remote +s) Set configuration password +q) Quit config +n/s/q> n + +Enter name for new remote. +name> remote + +Option Storage. +Type of storage to configure. +Choose a number from below, or type in your own value. +XX / Drime + \ (drime) +Storage> drime + +Option access_token. +API Access token +You can get this from the web control panel. +Enter a value. Press Enter to leave empty. +access_token> YOUR_API_ACCESS_TOKEN + +Edit advanced config? +y) Yes +n) No (default) +y/n> n + +Configuration complete. +Options: +- type: drime +- access_token: YOUR_API_ACCESS_TOKEN +Keep this "remote" remote? +y) Yes this is OK (default) +e) Edit this remote +d) Delete this remote +y/e/d> y +``` + +Once configured you can then use `rclone` like this (replace `remote` with the +name you gave your remote): + +List directories and files in the top level of your Drime + +```console +rclone lsf remote: +``` + +To copy a local directory to a Drime directory called backup + +```console +rclone copy /home/source remote:backup +``` + + +### Modification times and hashes + +Drime does not support modification times or hashes. + +This means that by default syncs will only use the size of the file to determine +if it needs updating. + +You can use the `--update` flag which will use the time the object was uploaded. +For many operations this is sufficient to determine if it has changed. However +files created with timestamps in the past will be missed by the sync if using +`--update`. + + +### Restricted filename characters + +In addition to the [default restricted characters set](/overview/#restricted-characters) +the following characters are also replaced: + +| Character | Value | Replacement | +| --------- |:-----:|:-----------:| +| \ | 0x5C | \ | + +File names can also not start or end with the following characters. These only +get replaced if they are the first or last character in the name: + +| Character | Value | Replacement | +| --------- |:-----:|:-----------:| +| SP | 0x20 | ␠ | + +Invalid UTF-8 bytes will also be [replaced](/overview/#invalid-utf8), +as they can't be used in JSON strings. + +### Root folder ID + +You can set the `root_folder_id` for rclone. This is the directory +(identified by its `Folder ID`) that rclone considers to be the root +of your Drime drive. + +Normally you will leave this blank and rclone will determine the +correct root to use itself and fill in the value in the config file. + +However you can set this to restrict rclone to a specific folder +hierarchy. + +In order to do this you will have to find the `Folder ID` of the +directory you wish rclone to display. + +You can do this with rclone + +```console +$ rclone lsf -Fip --dirs-only remote: +d6341f53-ee65-4f29-9f59-d11e8070b2a0;Files/ +f4f5c9b8-6ece-478b-b03e-4538edfe5a1c;Photos/ +d50e356c-29ca-4b27-a3a7-494d91026e04;Videos/ +``` + +The ID to use is the part before the `;` so you could set + +```text +root_folder_id = d6341f53-ee65-4f29-9f59-d11e8070b2a0 +``` + +To restrict rclone to the `Files` directory. + + +### Standard options + +Here are the Standard options specific to drime (Drime). + +#### --drime-access-token + +API Access token + +You can get this from the web control panel. + +Properties: + +- Config: access_token +- Env Var: RCLONE_DRIME_ACCESS_TOKEN +- Type: string +- Required: false + +### Advanced options + +Here are the Advanced options specific to drime (Drime). + +#### --drime-root-folder-id + +ID of the root folder + +Leave this blank normally, rclone will fill it in automatically. + +If you want rclone to be restricted to a particular folder you can +fill it in - see the docs for more info. + + +Properties: + +- Config: root_folder_id +- Env Var: RCLONE_DRIME_ROOT_FOLDER_ID +- Type: string +- Required: false + +#### --drime-workspace-id + +Account ID + +Leave this blank normally, rclone will fill it in automatically. + + +Properties: + +- Config: workspace_id +- Env Var: RCLONE_DRIME_WORKSPACE_ID +- Type: string +- Required: false + +#### --drime-list-chunk + +Number of items to list in each call + +Properties: + +- Config: list_chunk +- Env Var: RCLONE_DRIME_LIST_CHUNK +- Type: int +- Default: 1000 + +#### --drime-encoding + +The encoding for the backend. + +See the [encoding section in the overview](/overview/#encoding) for more info. + +Properties: + +- Config: encoding +- Env Var: RCLONE_DRIME_ENCODING +- Type: Encoding +- Default: Slash,BackSlash,Del,Ctl,LeftSpace,RightSpace,InvalidUtf8,Dot + +#### --drime-description + +Description of the remote. + +Properties: + +- Config: description +- Env Var: RCLONE_DRIME_DESCRIPTION +- Type: string +- Required: false + + + +## Limitations + +Drime only supports filenames up to 255 bytes in length, where filenames are +encoded in UTF8. + diff --git a/docs/content/filen.md b/docs/content/filen.md new file mode 100644 index 0000000000000..5697ed120648d --- /dev/null +++ b/docs/content/filen.md @@ -0,0 +1,244 @@ +--- +title: "Filen" +description: "Rclone docs for Filen" +versionIntroduced: "1.73" +--- + +# {{< icon "fa fa-solid fa-f" >}} Filen +## Configuration +The initial setup for Filen requires that you get an API key for your account, +currently this is only possible using the [Filen CLI](https://github.com/FilenCloudDienste/filen-cli). +This means you must first download the CLI, login, and then run the `export-api-key` command. + +Here is an example of how to make a remote called `FilenRemote`. First run: + + rclone config + +This will guide you through an interactive setup process: +``` +No remotes found, make a new one? +n) New remote +s) Set configuration password +q) Quit config +n/s/q> n + +name> FilenRemote +Option Storage. + +Type of storage to configure. +Choose a number from below, or type in your own value. +[snip] +XX / Filen + \ "filen" +[snip] +Storage> filen + +Option Email. +The email of your Filen account +Enter a value. +Email> youremail@provider.com + +Option Password. +The password of your Filen account +Choose an alternative below. +y) Yes, type in my own password +g) Generate random password +y/g> y +Enter the password: +password: +Confirm the password: +password: + +Option API Key. +An API Key for your Filen account +Get this using the Filen CLI export-api-key command +You can download the Filen CLI from https://github.com/FilenCloudDienste/filen-cli +Choose an alternative below. +y) Yes, type in my own password +g) Generate random password +y/g> y +Enter the password: +password: +Confirm the password: +password: + +Edit advanced config? +y) Yes +n) No (default) +y/n> n + +Configuration complete. +Options: +- type: filen +- Email: youremail@provider.com +- Password: *** ENCRYPTED *** +- API Key: *** ENCRYPTED *** +Keep this "FilenRemote" remote? +y) Yes this is OK (default) +e) Edit this remote +d) Delete this remote +y/e/d> y +``` + +### Modification times and hashes +Modification times are fully supported for files, for directories, only the creation time matters. + +Filen supports SHA512 hashes. + +### Restricted filename characters +Invalid UTF-8 bytes will be [replaced](/overview/#invalid-utf8) + + +### API Key + + +### Standard options + +Here are the Standard options specific to filen (Filen). + +#### --filen-email + +Email of your Filen account + +Properties: + +- Config: email +- Env Var: RCLONE_FILEN_EMAIL +- Type: string +- Required: true + +#### --filen-password + +Password of your Filen account + +**NB** Input to this must be obscured - see [rclone obscure](/commands/rclone_obscure/). + +Properties: + +- Config: password +- Env Var: RCLONE_FILEN_PASSWORD +- Type: string +- Required: true + +#### --filen-api-key + +API Key for your Filen account + +Get this using the Filen CLI export-api-key command +You can download the Filen CLI from https://github.com/FilenCloudDienste/filen-cli + +**NB** Input to this must be obscured - see [rclone obscure](/commands/rclone_obscure/). + +Properties: + +- Config: api_key +- Env Var: RCLONE_FILEN_API_KEY +- Type: string +- Required: true + +### Advanced options + +Here are the Advanced options specific to filen (Filen). + +#### --filen-encoding + +The encoding for the backend. + +See the [encoding section in the overview](/overview/#encoding) for more info. + +Properties: + +- Config: encoding +- Env Var: RCLONE_FILEN_ENCODING +- Type: Encoding +- Default: Slash,Del,Ctl,InvalidUtf8,Dot + +#### --filen-upload-concurrency + +Concurrency for multipart uploads. + +This is the number of chunks of the same file that are uploaded +concurrently for multipart uploads. + +Note that chunks are stored in memory and there may be up to +"--transfers" * "--filen-upload-concurrency" chunks stored at once +in memory. + +If you are uploading small numbers of large files over high-speed links +and these uploads do not fully utilize your bandwidth, then increasing +this may help to speed up the transfers. + +Properties: + +- Config: upload_concurrency +- Env Var: RCLONE_FILEN_UPLOAD_CONCURRENCY +- Type: int +- Default: 16 + +#### --filen-master-keys + +Master Keys (internal use only) + +Properties: + +- Config: master_keys +- Env Var: RCLONE_FILEN_MASTER_KEYS +- Type: string +- Required: false + +#### --filen-private-key + +Private RSA Key (internal use only) + +Properties: + +- Config: private_key +- Env Var: RCLONE_FILEN_PRIVATE_KEY +- Type: string +- Required: false + +#### --filen-public-key + +Public RSA Key (internal use only) + +Properties: + +- Config: public_key +- Env Var: RCLONE_FILEN_PUBLIC_KEY +- Type: string +- Required: false + +#### --filen-auth-version + +Authentication Version (internal use only) + +Properties: + +- Config: auth_version +- Env Var: RCLONE_FILEN_AUTH_VERSION +- Type: string +- Required: false + +#### --filen-base-folder-uuid + +UUID of Account Root Directory (internal use only) + +Properties: + +- Config: base_folder_uuid +- Env Var: RCLONE_FILEN_BASE_FOLDER_UUID +- Type: string +- Required: false + +#### --filen-description + +Description of the remote. + +Properties: + +- Config: description +- Env Var: RCLONE_FILEN_DESCRIPTION +- Type: string +- Required: false + + diff --git a/docs/content/flags.md b/docs/content/flags.md index cc3b11e0d53ba..24706dbf1aea2 100644 --- a/docs/content/flags.md +++ b/docs/content/flags.md @@ -1138,10 +1138,6 @@ Backend-only flags (these can be set in the config file also). --union-min-free-space SizeSuffix Minimum viable free space for lfs/eplfs policies (default 1Gi) --union-search-policy string Policy to choose upstream on SEARCH category (default "ff") --union-upstreams string List of space separated upstreams - --uptobox-access-token string Your access token - --uptobox-description string Description of the remote - --uptobox-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot) - --uptobox-private Set to make uploaded files private --webdav-auth-redirect Preserve authentication on redirect --webdav-bearer-token string Bearer token instead of user/pass (e.g. a Macaroon) --webdav-bearer-token-command string Command to run to get a bearer token diff --git a/docs/content/ftp.md b/docs/content/ftp.md index 38b9bad0cc87d..6e8495388a1c2 100644 --- a/docs/content/ftp.md +++ b/docs/content/ftp.md @@ -498,6 +498,12 @@ URL for HTTP CONNECT proxy Set this to a URL for an HTTP proxy which supports the HTTP CONNECT verb. +Supports the format http://user:pass@host:port, http://host:port, http://host. + +Example: + + http://myUser:myPass@proxyhostname.example.com:8000 + Properties: diff --git a/docs/content/internxt.md b/docs/content/internxt.md index 2ad5c98595b88..dc61eba70309c 100644 --- a/docs/content/internxt.md +++ b/docs/content/internxt.md @@ -1,22 +1,20 @@ --- -title: "Internxt.com" -description: "Rclone docs for Internxt.com" -versionIntroduced: "TODO" +title: "Internxt Drive" +description: "Rclone docs for Internxt Drive" +versionIntroduced: "v1.69" --- -# {{< icon "fas fa-angle-double-down" >}} Internxt.com +# {{< icon "fas fa-cloud" >}} Internxt Drive + +[Internxt Drive](https://internxt.com) is a zero-knowledge encrypted cloud storage service. Paths are specified as `remote:path` Paths may be as deep as required, e.g. `remote:directory/subdirectory`. -The initial setup for Internxt.com involves filling in the user credentials. -`rclone config` walks you through it. - ## Configuration -You can configure it as a remote like this with `rclone config` too if -you want to: +Internxt uses browser-based authentication. Run `rclone config` and follow the prompts: ``` No remotes found, make a new one? @@ -24,47 +22,100 @@ n) New remote s) Set configuration password q) Quit config n/s/q> n -name> remote +name> internxt Type of storage to configure. -Enter a string value. Press Enter for the default (""). Choose a number from below, or type in your own value [snip] -XX / Internxt +XX / Internxt Drive \ "internxt" [snip] Storage> internxt -** See help for internxt backend at: https://rclone.org/internxt/ ** -Remote config +If your browser doesn't open automatically, visit this URL: +https://drive.internxt.com/login?universalLink=true&redirectUri=... + +Log in and authorize rclone for access +Waiting for authentication... + +Authentication successful! Configuration complete. Options: - type: internxt -Keep this "remote" remote? +Keep this "internxt" remote? y) Yes this is OK (default) e) Edit this remote d) Delete this remote y/e/d> y ``` -Because the internxt backend isn't persistent it is most useful for -testing or with an rclone server or rclone mount, e.g. +Your default web browser will open automatically. Log in to your Internxt account to authorize rclone. + +### Headless/Remote Machine Setup + +If you're configuring rclone on a remote or headless machine (such as a server or NAS), the Internxt backend requires browser-based authentication. + +See the [rclone remote setup documentation](/remote_setup/) for detailed instructions on how to configure remotes on machines without a browser. The documentation covers: + +- Using `rclone authorize` to run authorization on a local machine and transfer the token +- SSH tunneling to forward the authentication port +- Copying the config file from a configured machine + +### Security Considerations + +The authentication process stores your mnemonic and JWT token in the rclone configuration file. It is **strongly recommended** to encrypt your rclone config to protect these sensitive credentials: + +``` +rclone config password +``` + +This will prompt you to set a password that encrypts your entire configuration file. - rclone mount :internxt: /mnt/tmp - rclone serve webdav :internxt: - rclone serve sftp :internxt: +## Usage Examples + +``` +# List files +rclone ls internxt: + +# Copy files to Internxt +rclone copy /local/path internxt:remote/path + +# Sync local directory to Internxt +rclone sync /local/path internxt:remote/path + +# Mount Internxt Drive as a local filesystem +rclone mount internxt: /path/to/mountpoint + +# Check storage usage +rclone about internxt: +``` ### Modification times and hashes -The internxt backend supports no hashes and modification times accurate to 1 second. +The Internxt backend does not support hashes. + +Modification times are read from the server but cannot be set. The backend reports `ModTimeNotSupported` precision, so modification times will not be used for sync comparisons. ### Restricted filename characters -The internxt backend replaces the [default restricted characters +The Internxt backend replaces the [default restricted characters set](/overview/#restricted-characters). {{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/internxt/internxt.go then run make backenddocs" >}} ### Advanced options -There are no Advanced options specific to internxt (In internxt object storage system). +Here are the Advanced options specific to internxt (Internxt Drive). + +#### --internxt-skip-hash-validation + +Skip hash validation when downloading files. + +By default, hash validation is disabled. Set this to false to enable validation. + +Properties: + +- Config: skip_hash_validation +- Env Var: RCLONE_INTERNXT_SKIP_HASH_VALIDATION +- Type: bool +- Default: true diff --git a/docs/content/overview.md b/docs/content/overview.md index 06dbf5a3c383b..f52fd1343c9ca 100644 --- a/docs/content/overview.md +++ b/docs/content/overview.md @@ -23,9 +23,11 @@ Here is an overview of the major features of each cloud storage system. | Box | SHA1 | R/W | Yes | No | - | - | | Citrix ShareFile | MD5 | R/W | Yes | No | - | - | | Cloudinary | MD5 | R | No | Yes | - | - | +| Drime | - | - | No | No | R/W | - | | Dropbox | DBHASH ¹ | R | Yes | No | - | - | | Enterprise File Fabric | - | R/W | Yes | No | R/W | - | | FileLu Cloud Storage | MD5 | R/W | No | Yes | R | - | +| Filen | SHA512 | R/W | Yes | No | R/W | - | | Files.com | MD5, CRC32 | DR/W | Yes | No | R | - | | FTP | - | R/W ¹⁰ | No | No | - | - | | Gofile | MD5 | DR/W | No | Yes | R | - | @@ -60,12 +62,12 @@ Here is an overview of the major features of each cloud storage system. | Quatrix by Maytech | - | R/W | No | No | - | - | | Seafile | - | - | No | No | - | - | | SFTP | MD5, SHA1 ² | DR/W | Depends | No | - | - | +| Shade | - | - | Yes | No | - | - | | Sia | - | - | No | No | - | - | | SMB | - | R/W | Yes | No | - | - | | SugarSync | - | - | No | No | - | - | | Storj | - | R | No | No | - | - | | Uloz.to | MD5, SHA256 ¹³ | - | No | Yes | - | - | -| Uptobox | - | - | No | Yes | - | - | | WebDAV | MD5, SHA1 ³ | R ⁴ | Depends | No | - | - | | Yandex Disk | MD5 | R/W | No | No | R | - | | Zoho WorkDrive | - | - | No | No | - | - | @@ -515,9 +517,11 @@ upon backend-specific capabilities. | Backblaze B2 | No | Yes | No | No | Yes | Yes | Yes | Yes | Yes | No | No | | Box | Yes | Yes | Yes | Yes | Yes | No | Yes | No | Yes | Yes | Yes | | Citrix ShareFile | Yes | Yes | Yes | Yes | No | No | No | No | No | No | Yes | +| Drime | Yes | Yes | Yes | Yes | No | No | Yes | Yes | No | No | Yes | | Dropbox | Yes | Yes | Yes | Yes | No | No | Yes | No | Yes | Yes | Yes | | Cloudinary | No | No | No | No | No | No | Yes | No | No | No | No | | Enterprise File Fabric | Yes | Yes | Yes | Yes | Yes | No | No | No | No | No | Yes | +| Filen | Yes | Yes | Yes | Yes | Yes | Yes | Yes | Yes | No | Yes | Yes | | Files.com | Yes | Yes | Yes | Yes | No | No | Yes | No | Yes | No | Yes | | FTP | No | No | Yes | Yes | No | No | Yes | No | No | No | Yes | | Gofile | Yes | Yes | Yes | Yes | No | No | Yes | No | Yes | Yes | Yes | @@ -541,7 +545,7 @@ upon backend-specific capabilities. | OpenDrive | Yes | Yes | Yes | Yes | No | No | No | No | No | Yes | Yes | | OpenStack Swift | Yes ¹ | Yes | No | No | No | Yes | Yes | No | No | Yes | No | | Oracle Object Storage | No | Yes | No | No | Yes | Yes | Yes | Yes | No | No | No | -| pCloud | Yes | Yes | Yes | Yes | Yes | No | No | No | Yes | Yes | Yes | +| pCloud | Yes | Yes | Yes | Yes | Yes | Yes | No | No | Yes | Yes | Yes | | PikPak | Yes | Yes | Yes | Yes | Yes | No | No | No | Yes | Yes | Yes | | Pixeldrain | Yes | No | Yes | Yes | No | No | Yes | No | Yes | Yes | Yes | | premiumize.me | Yes | No | Yes | Yes | No | No | No | No | Yes | Yes | Yes | @@ -556,7 +560,6 @@ upon backend-specific capabilities. | SugarSync | Yes | Yes | Yes | Yes | No | No | Yes | No | Yes | No | Yes | | Storj | Yes ² | Yes | Yes | No | No | Yes | Yes | No | Yes | No | No | | Uloz.to | No | No | Yes | Yes | No | No | No | No | No | No | Yes | -| Uptobox | No | Yes | Yes | Yes | No | No | No | No | No | No | No | | WebDAV | Yes | Yes | Yes | Yes | No | No | Yes ³ | No | No | Yes | Yes | | Yandex Disk | Yes | Yes | Yes | Yes | Yes | No | Yes | No | Yes | Yes | Yes | | Zoho WorkDrive | Yes | Yes | Yes | Yes | No | No | No | No | No | Yes | Yes | diff --git a/docs/content/pcloud.md b/docs/content/pcloud.md index 8f8167e34c75c..90e16b3801f64 100644 --- a/docs/content/pcloud.md +++ b/docs/content/pcloud.md @@ -173,6 +173,31 @@ So if the folder you want rclone to use your is "My Music/", then use the return id from ```rclone lsf``` command (ex. `dxxxxxxxx2`) as the `root_folder_id` variable value in the config file. +### Change notifications and mounts + +The pCloud backend supports real‑time updates for rclone mounts via change +notifications. rclone uses pCloud’s diff long‑polling API to detect changes and +will automatically refresh directory listings in the mounted filesystem when +changes occur. + +Notes and behavior: + +- Works automatically when using `rclone mount` and requires no additional + configuration. +- Notifications are directory‑scoped: when rclone detects a change, it refreshes + the affected directory so new/removed/renamed files become visible promptly. +- Updates are near real‑time. The backend uses a long‑poll with short fallback + polling intervals, so you should see changes appear quickly without manual + refreshes. + +If you want to debug or verify notifications, you can use the helper command: + +```bash +rclone test changenotify remote: +``` + +This will log incoming change notifications for the given remote. + ### Standard options diff --git a/docs/content/s3.md b/docs/content/s3.md index f16779bcc7056..7c98828845ced 100644 --- a/docs/content/s3.md +++ b/docs/content/s3.md @@ -18,6 +18,7 @@ The S3 backend can be used with a number of different providers: {{< provider name="China Mobile Ecloud Elastic Object Storage (EOS)" home="https://ecloud.10086.cn/home/product-introduction/eos/" config="/s3/#china-mobile-ecloud-eos" >}} {{< provider name="Cloudflare R2" home="https://blog.cloudflare.com/r2-open-beta/" config="/s3/#cloudflare-r2" >}} {{< provider name="Arvan Cloud Object Storage (AOS)" home="https://www.arvancloud.com/en/products/cloud-storage" config="/s3/#arvan-cloud" >}} +{{< provider name="Bizfly Cloud Simple Storage" home="https://bizflycloud.vn/" config="/s3/#bizflycloud" >}} {{< provider name="Cubbit DS3" home="https://cubbit.io/ds3-cloud" config="/s3/#Cubbit" >}} {{< provider name="DigitalOcean Spaces" home="https://www.digitalocean.com/products/object-storage/" config="/s3/#digitalocean-spaces" >}} {{< provider name="Dreamhost" home="https://www.dreamhost.com/cloud/storage/" config="/s3/#dreamhost" >}} @@ -745,6 +746,68 @@ If none of these option actually end up providing `rclone` with AWS credentials then S3 interaction will be non-authenticated (see the [anonymous access](#anonymous-access) section for more info). +#### Assume Role (Cross-Account Access) + +If you need to access S3 resources in a different AWS account, you can use IAM role assumption. +This is useful for cross-account access scenarios where you have credentials in one account +but need to access resources in another account. + +To use assume role, configure the following parameters: + +- `role_arn` - The ARN (Amazon Resource Name) of the IAM role to assume in the target account. + Format: `arn:aws:iam::ACCOUNT-ID:role/ROLE-NAME` +- `role_session_name` (optional) - A name for the assumed role session. If not specified, + rclone will generate one automatically. +- `role_session_duration` (optional) - Duration for which the assumed role credentials are valid. + If not specified, AWS default duration will be used (typically 1 hour). +- `role_external_id` (optional) - An external ID required by the role's trust policy for additional security. + This is typically used when the role is accessed by a third party. + +The assume role feature works with both direct credentials (`env_auth = false`) and environment-based +authentication (`env_auth = true`). Rclone will first authenticate using the base credentials, then +use those credentials to assume the specified role. + +Example configuration for cross-account access: + +``` +[s3-cross-account] +type = s3 +provider = AWS +env_auth = true +region = us-east-1 +role_arn = arn:aws:iam::123456789012:role/CrossAccountS3Role +role_session_name = rclone-session +role_external_id = unique-role-external-id-12345 +``` + +In this example: +- Base credentials are obtained from the environment (IAM role, credentials file, or environment variables) +- These credentials are then used to assume the role `CrossAccountS3Role` in account `123456789012` +- An external ID is provided for additional security as required by the role's trust policy + +The target role's trust policy in the destination account must allow the source account or user to assume it. +Example trust policy: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "AWS": "arn:aws:iam::SOURCE-ACCOUNT-ID:root" + }, + "Action": "sts:AssumeRole", + "Condition": { + "StringEquals": { + "sts:ExternalID": "unique-role-external-id-12345" + } + } + } + ] +} +``` + ### S3 Permissions When using the `sync` subcommand of `rclone` the following minimum @@ -4474,6 +4537,36 @@ server_side_encryption = storage_class = ``` +### BizflyCloud {#bizflycloud} + +[Bizfly Cloud Simple Storage](https://bizflycloud.vn/simple-storage) is an +S3-compatible service with regions in Hanoi (HN) and Ho Chi Minh City (HCM). + +Use the endpoint for your region: + +- HN: `hn.ss.bfcplatform.vn` +- HCM: `hcm.ss.bfcplatform.vn` + +A minimal configuration looks like this. + +```ini +[bizfly] +type = s3 +provider = BizflyCloud +env_auth = false +access_key_id = YOUR_ACCESS_KEY +secret_access_key = YOUR_SECRET_KEY +region = HN +endpoint = hn.ss.bfcplatform.vn +location_constraint = +acl = +server_side_encryption = +storage_class = +``` + +Switch `region` and `endpoint` to `HCM` and `hcm.ss.bfcplatform.vn` for Ho Chi +Minh City. + ### Ceph [Ceph](https://ceph.com/) is an open-source, unified, distributed diff --git a/docs/content/sftp.md b/docs/content/sftp.md index be7aa96ae1a85..b747235de19ce 100644 --- a/docs/content/sftp.md +++ b/docs/content/sftp.md @@ -1186,6 +1186,12 @@ URL for HTTP CONNECT proxy Set this to a URL for an HTTP proxy which supports the HTTP CONNECT verb. +Supports the format http://user:pass@host:port, http://host:port, http://host. + +Example: + + http://myUser:myPass@proxyhostname.example.com:8000 + Properties: diff --git a/docs/content/shade.md b/docs/content/shade.md new file mode 100644 index 0000000000000..7735d7eeafd2b --- /dev/null +++ b/docs/content/shade.md @@ -0,0 +1,218 @@ +# {{< icon "fa fa-moon" >}} Shade + +This is a backend for the [Shade](https://shade.inc/) platform + +## About Shade + +[Shade](https://shade.inc/) is an AI-powered cloud NAS that makes your cloud files behave like a local drive, optimized for media and creative workflows. It provides fast, secure access with natural-language search, easy sharing, and scalable cloud storage. + + +## Accounts & Pricing + +To use this backend, you need to [create a free account](https://app.shade.inc/) on Shade. You can start with a free account and get 20GB of storage for free. + + +## Usage + +Paths are specified as `remote:path` + +Paths may be as deep as required, e.g. `remote:directory/subdirectory`. + + +## Configuration + +Here is an example of making a Shade configuration. + +First, create a [create a free account](https://app.shade.inc/) account and choose a plan. + +You will need to log in and get the `API Key` and `Drive ID` for your account from the settings section of your account and created drive respectively. + +Now run + +`rclone config` + +Follow this interactive process: + +```sh +$ rclone config +e) Edit existing remote +n) New remote +d) Delete remote +r) Rename remote +c) Copy remote +s) Set configuration password +q) Quit config +e/n/d/r/c/s/q> n + +Enter name for new remote. +name> Shade + +Option Storage. +Type of storage to configure. +Choose a number from below, or type in your own value. +[OTHER OPTIONS] +xx / Shade FS + \ (shade) +[OTHER OPTIONS] +Storage> xx + +Option drive_id. +The ID of your drive, see this in the drive settings. Individual rclone configs must be made per drive. +Enter a value. +drive_id> [YOUR_ID] + +Option api_key. +An API key for your account. +Enter a value. +api_key> [YOUR_API_KEY] + +Edit advanced config? +y) Yes +n) No (default) +y/n> n + +Configuration complete. +Options: +- type: shade +- drive_id: [YOUR_ID] +- api_key: [YOUR_API_KEY] +Keep this "Shade" remote? +y) Yes this is OK (default) +e) Edit this remote +d) Delete this remote +y/e/d> y +``` + +### Modification times and hashes + +Shade does not support hashes and writing mod times. + + +### Transfers + +Shade uses multipart uploads by default. This means that files will be chunked and sent up to Shade concurrently. In order to configure how many simultaneous uploads you want to use, upload the 'concurrency' option in the advanced config section. Note that this uses more memory and initiates more http requests. + +### Deleting files + +Please note that when deleting files in Shade via rclone it will delete the file instantly, instead of sending it to the trash. This means that it will not be recoverable. + + + +### Standard options + +Here are the Standard options specific to shade (Shade FS). + +#### --shade-drive-id + +The ID of your drive, see this in the drive settings. Individual rclone configs must be made per drive. + +Properties: + +- Config: drive_id +- Env Var: RCLONE_SHADE_DRIVE_ID +- Type: string +- Required: true + +#### --shade-api-key + +An API key for your account. You can find this under Settings > API Keys + +Properties: + +- Config: api_key +- Env Var: RCLONE_SHADE_API_KEY +- Type: string +- Required: true + +### Advanced options + +Here are the Advanced options specific to shade (Shade FS). + +#### --shade-endpoint + +Endpoint for the service. + +Leave blank normally. + +Properties: + +- Config: endpoint +- Env Var: RCLONE_SHADE_ENDPOINT +- Type: string +- Required: false + +#### --shade-chunk-size + +Chunk size to use for uploading. + +Any files larger than this will be uploaded in chunks of this size. + +Note that this is stored in memory per transfer, so increasing it will +increase memory usage. + +Minimum is 5MB, maximum is 5GB. + +Properties: + +- Config: chunk_size +- Env Var: RCLONE_SHADE_CHUNK_SIZE +- Type: SizeSuffix +- Default: 64Mi + +#### --shade-encoding + +The encoding for the backend. + +See the [encoding section in the overview](/overview/#encoding) for more info. + +Properties: + +- Config: encoding +- Env Var: RCLONE_SHADE_ENCODING +- Type: Encoding +- Default: Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot + +#### --shade-description + +Description of the remote. + +Properties: + +- Config: description +- Env Var: RCLONE_SHADE_DESCRIPTION +- Type: string +- Required: false + + + +## Limitations + +Note that Shade is case insensitive so you can't have a file called +"Hello.doc" and one called "hello.doc". + +Shade only supports filenames up to 255 characters in length. + +`rclone about` is not supported by the Shade backend. Backends without +this capability cannot determine free space for an rclone mount or +use policy `mfs` (most free space) as a member of an rclone union +remote. + +See [List of backends that do not support rclone about](https://rclone.org/overview/#optional-features) and [rclone about](https://rclone.org/commands/rclone_about/) + +## Backend commands + +Here are the commands specific to the shade backend. + +Run them with + + rclone backend COMMAND remote: + +The help below will explain what arguments each command takes. + +See the [backend](/commands/rclone_backend/) command for more +info on how to pass options and arguments. + +These can be run on a running backend using the rc command +[backend/command](/rc/#backend-command). + + diff --git a/docs/content/sponsor.md b/docs/content/sponsor.md index f42ccd916a50a..e57a54246d65e 100644 --- a/docs/content/sponsor.md +++ b/docs/content/sponsor.md @@ -13,7 +13,7 @@ Thank you to our sponsors: -{{< sponsor src="/img/logos/rabata/txt_1_300x114.png" width="300" height="200" title="Visit our sponsor Rabata.io" link="https://rabata.io/?utm_source=banner&utm_medium=rclone&utm_content=general">}} +{{< sponsor src="/img/logos/rabata.svg" width="300" height="200" title="Visit our sponsor Rabata.io" link="https://rabata.io/?utm_source=banner&utm_medium=rclone&utm_content=general">}} {{< sponsor src="/img/logos/idrive_e2.svg" width="300" height="200" title="Visit our sponsor IDrive e2" link="https://www.idrive.com/e2/?refer=rclone">}} {{< sponsor src="/img/logos/filescom-enterprise-grade-workflows.png" width="300" height="200" title="Start Your Free Trial Today" link="https://files.com/?utm_source=rclone&utm_medium=referral&utm_campaign=banner&utm_term=rclone">}} {{< sponsor src="/img/logos/mega-s4.svg" width="300" height="200" title="MEGA S4: New S3 compatible object storage. High scale. Low cost. Free egress." link="https://mega.io/objectstorage?utm_source=rclone&utm_medium=referral&utm_campaign=rclone-mega-s4&mct=rclonepromo">}} diff --git a/docs/content/uptobox.md b/docs/content/uptobox.md deleted file mode 100644 index deb08d13d74bd..0000000000000 --- a/docs/content/uptobox.md +++ /dev/null @@ -1,179 +0,0 @@ ---- -title: "Uptobox" -description: "Rclone docs for Uptobox" -versionIntroduced: "v1.56" ---- - -# {{< icon "fa fa-archive" >}} Uptobox - -This is a Backend for Uptobox file storage service. Uptobox is closer to a -one-click hoster than a traditional cloud storage provider and therefore not -suitable for long term storage. - -Paths are specified as `remote:path` - -Paths may be as deep as required, e.g. `remote:directory/subdirectory`. - -## Configuration - -To configure an Uptobox backend you'll need your personal api token. You'll find -it in your [account settings](https://uptobox.com/my_account). - -Here is an example of how to make a remote called `remote` with the default setup. -First run: - -```console -rclone config -``` - -This will guide you through an interactive setup process: - -```text -Current remotes: - -Name Type -==== ==== -TestUptobox uptobox - -e) Edit existing remote -n) New remote -d) Delete remote -r) Rename remote -c) Copy remote -s) Set configuration password -q) Quit config -e/n/d/r/c/s/q> n -name> uptobox -Type of storage to configure. -Enter a string value. Press Enter for the default (""). -Choose a number from below, or type in your own value -[...] -37 / Uptobox - \ "uptobox" -[...] -Storage> uptobox -** See help for uptobox backend at: https://rclone.org/uptobox/ ** - -Your API Key, get it from https://uptobox.com/my_account -Enter a string value. Press Enter for the default (""). -api_key> xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx -Edit advanced config? (y/n) -y) Yes -n) No (default) -y/n> n -Remote config --------------------- -[uptobox] -type = uptobox -api_key = xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx --------------------- -y) Yes this is OK (default) -e) Edit this remote -d) Delete this remote -y/e/d> -``` - -Once configured you can then use `rclone` like this (replace `remote` with the -name you gave your remote): - -List directories in top level of your Uptobox - -```console -rclone lsd remote: -``` - -List all the files in your Uptobox - -```console -rclone ls remote: -``` - -To copy a local directory to an Uptobox directory called backup - -```console -rclone copy /home/source remote:backup -``` - -### Modification times and hashes - -Uptobox supports neither modified times nor checksums. All timestamps -will read as that set by `--default-time`. - -### Restricted filename characters - -In addition to the [default restricted characters set](/overview/#restricted-characters) -the following characters are also replaced: - -| Character | Value | Replacement | -| --------- |:-----:|:-----------:| -| " | 0x22 | " | -| ` | 0x41 | ` | - -Invalid UTF-8 bytes will also be [replaced](/overview/#invalid-utf8), -as they can't be used in XML strings. - - -### Standard options - -Here are the Standard options specific to uptobox (Uptobox). - -#### --uptobox-access-token - -Your access token. - -Get it from https://uptobox.com/my_account. - -Properties: - -- Config: access_token -- Env Var: RCLONE_UPTOBOX_ACCESS_TOKEN -- Type: string -- Required: false - -### Advanced options - -Here are the Advanced options specific to uptobox (Uptobox). - -#### --uptobox-private - -Set to make uploaded files private - -Properties: - -- Config: private -- Env Var: RCLONE_UPTOBOX_PRIVATE -- Type: bool -- Default: false - -#### --uptobox-encoding - -The encoding for the backend. - -See the [encoding section in the overview](/overview/#encoding) for more info. - -Properties: - -- Config: encoding -- Env Var: RCLONE_UPTOBOX_ENCODING -- Type: Encoding -- Default: Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot - -#### --uptobox-description - -Description of the remote. - -Properties: - -- Config: description -- Env Var: RCLONE_UPTOBOX_DESCRIPTION -- Type: string -- Required: false - - - -## Limitations - -Uptobox will delete inactive files that have not been accessed in 60 days. - -`rclone about` is not supported by this backend an overview of used space can however -been seen in the uptobox web interface. diff --git a/docs/layouts/chrome/menu.html b/docs/layouts/chrome/menu.html index 966fc5b623dce..bb5e165c213e5 100644 --- a/docs/layouts/chrome/menu.html +++ b/docs/layouts/chrome/menu.html @@ -10,40 +10,21 @@ {{end}}
-
- Platinum Sponsor -
+
Platinum Sponso⁠r
-
- +
-
- Gold Sponsor -
+
Gold Sponso⁠r

-
- Gold Sponsor -
+
Gold Sponso⁠r

@@ -51,25 +32,19 @@ {{if .IsHome}}
-
- Silver Sponsor -
+
Silver Sponso⁠r
-
+
-
- Silver Sponsor -
+
Silver Sponso⁠r
-
+
-
- Silver Sponsor -
+
Silver Sponso⁠r

diff --git a/docs/layouts/chrome/navbar.html b/docs/layouts/chrome/navbar.html index 522e3cd7b6965..5392dfa57e4d0 100644 --- a/docs/layouts/chrome/navbar.html +++ b/docs/layouts/chrome/navbar.html @@ -66,10 +66,12 @@ Citrix ShareFile Crypt (encrypts the others) Digi Storage + Drime Dropbox Enterprise File Fabric FileLu Cloud Storage FileLu S5 (S3-Compatible) + Filen Files.com FTP Gofile @@ -107,11 +109,11 @@ Seafile SFTP Sia + Shade SMB / CIFS Storj SugarSync Uloz.to - Uptobox Union (merge backends) WebDAV Yandex Disk diff --git a/fs/config/config.go b/fs/config/config.go index 617780c9c09e2..d01306534fb52 100644 --- a/fs/config/config.go +++ b/fs/config/config.go @@ -764,7 +764,7 @@ func SetCacheDir(path string) (err error) { // // To override the default we therefore set environment variable TMPDIR // on Unix systems, and both TMP and TEMP on Windows (they are almost exclusively -// aliases for the same path, and programs may refer to to either of them). +// aliases for the same path, and programs may refer to either of them). // This should make all libraries and forked processes use the same. func SetTempDir(path string) (err error) { var tempDir string diff --git a/fs/config/configfile/configfile_test.go b/fs/config/configfile/configfile_test.go index 8ec7cd5189b32..1abaa58c74db7 100644 --- a/fs/config/configfile/configfile_test.go +++ b/fs/config/configfile/configfile_test.go @@ -2,6 +2,7 @@ package configfile import ( "fmt" + "io" "os" "path/filepath" "runtime" @@ -362,3 +363,39 @@ func TestConfigFileSaveSymlinkAbsolute(t *testing.T) { testSymlink(t, link, target, resolvedTarget) }) } + +type pipedInput struct { + io.Reader +} + +func (p *pipedInput) Read(b []byte) (int, error) { + return p.Reader.Read(b) +} + +func (*pipedInput) Seek(int64, int) (int64, error) { + return 0, fmt.Errorf("Seek not supported") +} + +func TestPipedConfig(t *testing.T) { + t.Run("DoesNotSupportSeeking", func(t *testing.T) { + r := &pipedInput{strings.NewReader("")} + _, err := r.Seek(0, io.SeekStart) + require.Error(t, err) + }) + + t.Run("IsSupported", func(t *testing.T) { + r := &pipedInput{strings.NewReader(configData)} + _, err := config.Decrypt(r) + require.NoError(t, err) + }) + + t.Run("PlainTextConfigIsNotConsumedByCryptCheck", func(t *testing.T) { + in := &pipedInput{strings.NewReader(configData)} + + r, _ := config.Decrypt(in) + got, err := io.ReadAll(r) + require.NoError(t, err) + + assert.Equal(t, configData, string(got)) + }) +} diff --git a/fs/config/configstruct/configstruct.go b/fs/config/configstruct/configstruct.go index ba421f7f24141..bb72ae35989b8 100644 --- a/fs/config/configstruct/configstruct.go +++ b/fs/config/configstruct/configstruct.go @@ -31,7 +31,7 @@ func camelToSnake(in string) string { // // Builtin types are expected to be encoding as their natural // stringificatons as produced by fmt.Sprint except for []string which -// is expected to be encoded a a CSV with empty array encoded as "". +// is expected to be encoded as a CSV with empty array encoded as "". // // Any other types are expected to be encoded by their String() // methods and decoded by their `Set(s string) error` methods. @@ -93,7 +93,7 @@ func StringToInterface(def any, in string) (newValue any, err error) { // // Builtin types are expected to be encoding as their natural // stringificatons as produced by fmt.Sprint except for []string which -// is expected to be encoded a a CSV with empty array encoded as "". +// is expected to be encoded as a CSV with empty array encoded as "". // // Any other types are expected to be encoded by their String() // methods and decoded by their `Set(s string) error` methods. diff --git a/fs/config/crypt.go b/fs/config/crypt.go index 4b356580cb322..c25595900fc68 100644 --- a/fs/config/crypt.go +++ b/fs/config/crypt.go @@ -77,8 +77,9 @@ func Decrypt(b io.ReadSeeker) (io.Reader, error) { if strings.HasPrefix(l, "RCLONE_ENCRYPT_V") { return nil, errors.New("unsupported configuration encryption - update rclone for support") } + // Restore non-seekable plain-text stream to its original state if _, err := b.Seek(0, io.SeekStart); err != nil { - return nil, err + return io.MultiReader(strings.NewReader(l+"\n"), r), nil } return b, nil } @@ -179,7 +180,7 @@ func Decrypt(b io.ReadSeeker) (io.Reader, error) { // GetPasswordCommand gets the password using the --password-command setting // -// If the the --password-command flag was not in use it returns "", nil +// If the --password-command flag was not in use it returns "", nil func GetPasswordCommand(ctx context.Context) (pass string, err error) { ci := fs.GetConfig(ctx) if len(ci.PasswordCommand) == 0 { diff --git a/fs/hash/hash.go b/fs/hash/hash.go index 972fea186e598..634548a967621 100644 --- a/fs/hash/hash.go +++ b/fs/hash/hash.go @@ -225,7 +225,7 @@ func fromTypes(set Set) (map[Type]hash.Hash, error) { // single multiwriter, where one write will update all // the hashers. func toMultiWriter(h map[Type]hash.Hash) io.Writer { - // Convert to to slice + // Convert to slice var w = make([]io.Writer, 0, len(h)) for _, v := range h { w = append(w, v) diff --git a/fs/log/log.go b/fs/log/log.go index dd6f5d2749e55..76bf2b72d6b48 100644 --- a/fs/log/log.go +++ b/fs/log/log.go @@ -79,7 +79,7 @@ type Options struct { File string `config:"log_file"` // Log everything to this file MaxSize fs.SizeSuffix `config:"log_file_max_size"` // Max size of log file MaxBackups int `config:"log_file_max_backups"` // Max backups of log file - MaxAge fs.Duration `config:"log_file_max_age"` // Max age of of log file + MaxAge fs.Duration `config:"log_file_max_age"` // Max age of log file Compress bool `config:"log_file_compress"` // Set to compress log file Format logFormat `config:"log_format"` // Comma separated list of log format options UseSyslog bool `config:"syslog"` // Use Syslog for logging @@ -209,7 +209,7 @@ func InitLogging() { // Log file output if Opt.File != "" { var w io.Writer - if Opt.MaxSize == 0 { + if Opt.MaxSize < 0 { // No log rotation - just open the file as normal // We'll capture tracebacks like this too. f, err := os.OpenFile(Opt.File, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0640) diff --git a/fs/log/slog.go b/fs/log/slog.go index 0efbcbee66bb2..36592c87f7100 100644 --- a/fs/log/slog.go +++ b/fs/log/slog.go @@ -310,6 +310,10 @@ func (h *OutputHandler) jsonLog(ctx context.Context, buf *bytes.Buffer, r slog.R r.AddAttrs( slog.String("source", getCaller(2)), ) + // Add PID if requested + if h.format&logFormatPid != 0 { + r.AddAttrs(slog.Int("pid", os.Getpid())) + } h.mu.Lock() err = h.jsonHandler.Handle(ctx, r) if err == nil { diff --git a/fs/log/slog_test.go b/fs/log/slog_test.go index 5cfa521311bae..e0389e3e4f61e 100644 --- a/fs/log/slog_test.go +++ b/fs/log/slog_test.go @@ -198,6 +198,17 @@ func TestAddOutputUseJSONLog(t *testing.T) { assert.Equal(t, "2020/01/02 03:04:05 INFO : world\n", extraText) } +// Test JSON log includes PID when logFormatPid is set. +func TestJSONLogWithPid(t *testing.T) { + buf := &bytes.Buffer{} + h := NewOutputHandler(buf, nil, logFormatJSON|logFormatPid) + + r := slog.NewRecord(t0, slog.LevelInfo, "hello", 0) + require.NoError(t, h.Handle(context.Background(), r)) + output := buf.String() + assert.Contains(t, output, fmt.Sprintf(`"pid":%d`, os.Getpid())) +} + // Test WithAttrs and WithGroup return new handlers with same settings. func TestWithAttrsAndGroup(t *testing.T) { buf := &bytes.Buffer{} diff --git a/fs/log/systemd_unix.go b/fs/log/systemd_unix.go index fa29b203c1da2..8ea34f1e89f28 100644 --- a/fs/log/systemd_unix.go +++ b/fs/log/systemd_unix.go @@ -16,7 +16,7 @@ func startSystemdLog(handler *OutputHandler) bool { handler.clearFormatFlags(logFormatDate | logFormatTime | logFormatMicroseconds | logFormatUTC | logFormatLongFile | logFormatShortFile | logFormatPid) handler.setFormatFlags(logFormatNoLevel) handler.SetOutput(func(level slog.Level, text string) { - _ = journal.Print(slogLevelToSystemdPriority(level), "%-6s: %s\n", level, text) + _ = journal.Print(slogLevelToSystemdPriority(level), "%-6s: %s", level, text) }) return true } diff --git a/fs/operations/rc.go b/fs/operations/rc.go index 644a947e9f816..ce92a0024f2a7 100644 --- a/fs/operations/rc.go +++ b/fs/operations/rc.go @@ -921,6 +921,18 @@ See the [hashsum](/commands/rclone_hashsum/) command for more information on the }) } +// Parse download, base64 and hashType parameters +func parseHashParameters(in rc.Params) (download bool, base64 bool, ht hash.Type, err error) { + download, _ = in.GetBool("download") + base64, _ = in.GetBool("base64") + hashType, err := in.GetString("hashType") + if err != nil { + return + } + err = ht.Set(hashType) + return +} + // Hashsum a directory func rcHashsum(ctx context.Context, in rc.Params) (out rc.Params, err error) { ctx, f, err := rc.GetFsNamedFileOK(ctx, in, "fs") @@ -928,16 +940,9 @@ func rcHashsum(ctx context.Context, in rc.Params) (out rc.Params, err error) { return nil, err } - download, _ := in.GetBool("download") - base64, _ := in.GetBool("base64") - hashType, err := in.GetString("hashType") - if err != nil { - return nil, fmt.Errorf("%s\n%w", hash.HelpString(0), err) - } - var ht hash.Type - err = ht.Set(hashType) + download, base64, ht, err := parseHashParameters(in) if err != nil { - return nil, fmt.Errorf("%s\n%w", hash.HelpString(0), err) + return out, err } hashes := []string{} @@ -948,3 +953,64 @@ func rcHashsum(ctx context.Context, in rc.Params) (out rc.Params, err error) { } return out, err } + +func init() { + rc.Add(rc.Call{ + Path: "operations/hashsumfile", + AuthRequired: true, + Fn: rcHashsumFile, + Title: "Produces a hash for a single file.", + Help: `Produces a hash for a single file using the hash named. + +This takes the following parameters: + +- fs - a remote name string e.g. "drive:" +- remote - a path within that remote e.g. "file.txt" +- hashType - type of hash to be used +- download - check by downloading rather than with hash (boolean) +- base64 - output the hashes in base64 rather than hex (boolean) + +If you supply the download flag, it will download the data from the +remote and create the hash on the fly. This can be useful for remotes +that don't support the given hash or if you really want to read all +the data. + +Returns: + +- hash - hash for the file +- hashType - type of hash used + +Example: + + $ rclone rc --loopback operations/hashsumfile fs=/ remote=/bin/bash hashType=MD5 download=true base64=true + { + "hashType": "md5", + "hash": "MDMw-fG2YXs7Uz5Nz-H68A==" + } + +See the [hashsum](/commands/rclone_hashsum/) command for more information on the above. +`, + }) +} + +// Hashsum a file +func rcHashsumFile(ctx context.Context, in rc.Params) (out rc.Params, err error) { + f, remote, err := rc.GetFsAndRemote(ctx, in) + if err != nil { + return nil, err + } + download, base64, ht, err := parseHashParameters(in) + if err != nil { + return out, err + } + o, err := f.NewObject(ctx, remote) + if err != nil { + return nil, err + } + sum, err := HashSum(ctx, ht, base64, download, o) + out = rc.Params{ + "hashType": ht.String(), + "hash": sum, + } + return out, err +} diff --git a/fs/operations/rc_test.go b/fs/operations/rc_test.go index 2d8d0f2afb8b5..04357fb72418b 100644 --- a/fs/operations/rc_test.go +++ b/fs/operations/rc_test.go @@ -561,7 +561,7 @@ func TestUploadFile(t *testing.T) { assert.NoError(t, currentFile.Close()) }() - formReader, contentType, _, err := rest.MultipartUpload(ctx, currentFile, url.Values{}, "file", testFileName) + formReader, contentType, _, err := rest.MultipartUpload(ctx, currentFile, url.Values{}, "file", testFileName, "application/octet-stream") require.NoError(t, err) httpReq := httptest.NewRequest("POST", "/", formReader) @@ -587,7 +587,7 @@ func TestUploadFile(t *testing.T) { assert.NoError(t, currentFile2.Close()) }() - formReader, contentType, _, err = rest.MultipartUpload(ctx, currentFile2, url.Values{}, "file", testFileName) + formReader, contentType, _, err = rest.MultipartUpload(ctx, currentFile2, url.Values{}, "file", testFileName, "application/octet-stream") require.NoError(t, err) httpReq = httptest.NewRequest("POST", "/", formReader) @@ -840,7 +840,7 @@ func TestRcHashsum(t *testing.T) { } // operations/hashsum: hashsum a single file -func TestRcHashsumFile(t *testing.T) { +func TestRcHashsumSingleFile(t *testing.T) { ctx := context.Background() r, call := rcNewRun(t, "operations/hashsum") r.Mkdir(ctx, r.Fremote) @@ -866,3 +866,27 @@ func TestRcHashsumFile(t *testing.T) { assert.Equal(t, "md5", out["hashType"]) assert.Equal(t, []string{"0ef726ce9b1a7692357ff70dd321d595 hashsum-file1"}, out["hashsum"]) } + +// operations/hashsumfile: hashsum a single file +func TestRcHashsumFile(t *testing.T) { + ctx := context.Background() + r, call := rcNewRun(t, "operations/hashsumfile") + r.Mkdir(ctx, r.Fremote) + + file1Contents := "file1 contents" + file1 := r.WriteBoth(ctx, "hashsumfile-file1", file1Contents, t1) + r.CheckLocalItems(t, file1) + r.CheckRemoteItems(t, file1) + + in := rc.Params{ + "fs": r.FremoteName, + "remote": file1.Path, + "hashType": "MD5", + "download": true, + } + + out, err := call.Fn(ctx, in) + require.NoError(t, err) + assert.Equal(t, "md5", out["hashType"]) + assert.Equal(t, "0ef726ce9b1a7692357ff70dd321d595", out["hash"]) +} diff --git a/fs/sync/sync_test.go b/fs/sync/sync_test.go index 6fe57d824312a..de00e15444eaf 100644 --- a/fs/sync/sync_test.go +++ b/fs/sync/sync_test.go @@ -806,7 +806,7 @@ func TestSyncBasedOnCheckSum(t *testing.T) { // Create a file and sync it. Change the last modified date and the // file contents but not the size. If we're only doing sync by size -// only, we expect nothing to to be transferred on the second sync. +// only, we expect nothing to be transferred on the second sync. func TestSyncSizeOnly(t *testing.T) { ctx := context.Background() ctx, ci := fs.AddConfig(ctx) @@ -843,7 +843,7 @@ func TestSyncSizeOnly(t *testing.T) { } // Create a file and sync it. Keep the last modified date but change -// the size. With --ignore-size we expect nothing to to be +// the size. With --ignore-size we expect nothing to be // transferred on the second sync. func TestSyncIgnoreSize(t *testing.T) { ctx := context.Background() @@ -1301,6 +1301,7 @@ func TestSyncAfterRemovingAFileAndAddingAFileSubDirWithErrors(t *testing.T) { err := Sync(ctx, r.Fremote, r.Flocal, false) assert.Equal(t, fs.ErrorNotDeleting, err) testLoggerVsLsf(ctx, r.Fremote, r.Flocal, operations.GetLoggerOpt(ctx).JSON, t) + accounting.GlobalStats().ResetCounters() r.CheckLocalListing( t, diff --git a/fs/sync/sync_transform_test.go b/fs/sync/sync_transform_test.go index d435945097e9d..930386a85939d 100644 --- a/fs/sync/sync_transform_test.go +++ b/fs/sync/sync_transform_test.go @@ -13,6 +13,7 @@ import ( _ "github.com/rclone/rclone/backend/all" "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/filter" "github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fs/walk" @@ -507,6 +508,7 @@ func TestError(t *testing.T) { err = Sync(ctx, r.Fremote, r.Flocal, true) // testLoggerVsLsf(ctx, r.Fremote, r.Flocal, operations.GetLoggerOpt(ctx).JSON, t) assert.Error(t, err) + accounting.GlobalStats().ResetCounters() r.CheckLocalListing(t, []fstest.Item{file1}, []string{"toe", "toe/toe"}) r.CheckRemoteListing(t, []fstest.Item{file1}, []string{"toe", "toe/toe"}) diff --git a/fs/types.go b/fs/types.go index dc3cc0cef8392..aed6926b6cb8b 100644 --- a/fs/types.go +++ b/fs/types.go @@ -328,7 +328,7 @@ type Flagger interface { // satisfy as non-pointers // // These are from pflag.Value and need to be tested against -// non-pointer value due the the way the backend flags are inserted +// non-pointer value due to the way the backend flags are inserted // into the flags. type FlaggerNP interface { String() string diff --git a/fstest/fstests/fstests.go b/fstest/fstests/fstests.go index 5e1105cfc61ef..98f765edca850 100644 --- a/fstest/fstests/fstests.go +++ b/fstest/fstests/fstests.go @@ -316,6 +316,7 @@ type Opt struct { SkipDirectoryCheckWrap bool // if set skip DirectoryCheckWrap SkipInvalidUTF8 bool // if set skip invalid UTF-8 checks SkipLeadingDot bool // if set skip leading dot checks + SkipTrailingDot bool // if set skip trailing dot checks QuickTestOK bool // if set, run this test with make quicktest } @@ -368,7 +369,7 @@ func Run(t *testing.T, opt *Opt) { } file1Contents string file1MimeType = "text/csv" - file1Metadata = fs.Metadata{"rclone-test": "potato"} + file1Metadata = fs.Metadata{"rclonetest": "potato"} file2 = fstest.Item{ ModTime: fstest.Time("2001-02-03T04:05:10.123123123Z"), Path: `hello? sausage/êé/Hello, 世界/ " ' @ < > & ? + ≠/z.txt`, @@ -701,6 +702,10 @@ func Run(t *testing.T, opt *Opt) { if opt.SkipLeadingDot && test.name == "leading dot" { t.Skip("Skipping " + test.name) } + if opt.SkipTrailingDot && test.name == "trailing dot" { + t.Skip("Skipping " + test.name) + } + // turn raw strings into Standard encoding fileName := encoder.Standard.Encode(test.path) dirName := fileName @@ -1273,10 +1278,14 @@ func Run(t *testing.T, opt *Opt) { assert.Equal(t, file2Copy.Path, dst.Remote()) // check that mutating dst does not mutate src - err = dst.SetModTime(ctx, fstest.Time("2004-03-03T04:05:06.499999999Z")) - if err != fs.ErrorCantSetModTimeWithoutDelete && err != fs.ErrorCantSetModTime { - assert.NoError(t, err) - assert.False(t, src.ModTime(ctx).Equal(dst.ModTime(ctx)), "mutating dst should not mutate src -- is it Copying by pointer?") + if !strings.Contains(fs.ConfigStringFull(f), "copy_is_hardlink") { + err = dst.SetModTime(ctx, fstest.Time("2004-03-03T04:05:06.499999999Z")) + if err != fs.ErrorCantSetModTimeWithoutDelete && err != fs.ErrorCantSetModTime { + assert.NoError(t, err) + // Re-read the source and check its modtime + src = fstest.NewObject(ctx, t, f, src.Remote()) + assert.False(t, src.ModTime(ctx).Equal(dst.ModTime(ctx)), "mutating dst should not mutate src -- is it Copying by pointer?") + } } // Delete copy diff --git a/fstest/test_all/config.yaml b/fstest/test_all/config.yaml index aea38b15b0157..728f73a37c0a3 100644 --- a/fstest/test_all/config.yaml +++ b/fstest/test_all/config.yaml @@ -164,6 +164,9 @@ backends: - backend: "gofile" remote: "TestGoFile:" fastlist: true + - backend: "filen" + remote: "TestFilen:" + fastlist: false - backend: "filescom" remote: "TestFilesCom:" fastlist: false @@ -624,11 +627,6 @@ backends: - TestSyncUTFNorm ignoretests: - cmd/gitannex - # - backend: "uptobox" - # remote: "TestUptobox:" - # fastlist: false - # ignore: - # - TestRWFileHandleWriteNoWrite - backend: "oracleobjectstorage" remote: "TestOracleObjectStorage:" fastlist: true @@ -662,6 +660,10 @@ backends: ignoretests: - cmd/bisync - cmd/gitannex + - backend: "shade" + remote: "TestShade:" + fastlist: false + - backend: "archive" remote: "TestArchive:" fastlist: false @@ -676,3 +678,9 @@ backends: - backend: "internxt" remote: "TestInternxt:" fastlist: false + - backend: "drime" + remote: "TestDrime:" + ignoretests: + # The TestBisyncRemoteLocal/check_access_filters tests fail due to duplicated objects + - cmd/bisync + fastlist: false diff --git a/go.mod b/go.mod index c0680a78f68f6..ea67b1fe38dd3 100644 --- a/go.mod +++ b/go.mod @@ -11,10 +11,10 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.3 github.com/Azure/azure-sdk-for-go/sdk/storage/azfile v1.5.3 github.com/Azure/go-ntlmssp v0.0.2-0.20251110135918-10b7b7e7cd26 + github.com/FilenCloudDienste/filen-sdk-go v0.0.34 github.com/Files-com/files-sdk-go/v3 v3.2.264 github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd github.com/a1ex3/zstd-seekable-format-go/pkg v0.10.0 - github.com/StarHack/go-internxt-drive v0.0.0-20250506081634-548256aeb125 github.com/a8m/tree v0.0.0-20240104212747-2c8764a5f17e github.com/aalpar/deheap v0.0.0-20210914013432-0cc84d79dec3 github.com/abbot/go-http-auth v0.4.0 @@ -26,6 +26,7 @@ require ( github.com/aws/aws-sdk-go-v2/credentials v1.18.21 github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.20.4 github.com/aws/aws-sdk-go-v2/service/s3 v1.90.0 + github.com/aws/aws-sdk-go-v2/service/sts v1.39.1 github.com/aws/smithy-go v1.23.2 github.com/buengese/sgzip v0.1.1 github.com/cloudinary/cloudinary-go/v2 v2.13.0 @@ -41,10 +42,12 @@ require ( github.com/go-chi/chi/v5 v5.2.3 github.com/go-darwin/apfs v0.0.0-20211011131704-f84b94dbf348 github.com/go-git/go-billy/v5 v5.6.2 + github.com/golang-jwt/jwt/v5 v5.3.0 github.com/google/uuid v1.6.0 github.com/hanwen/go-fuse/v2 v2.9.0 github.com/henrybear327/Proton-API-Bridge v1.0.0 github.com/henrybear327/go-proton-api v1.0.0 + github.com/internxt/rclone-adapter v0.0.0-20260120161514-a27b77ef4431 github.com/jcmturner/gokrb5/v8 v8.4.4 github.com/jlaffaye/ftp v0.2.1-0.20240918233326-1b970516f5d3 github.com/josephspurrier/goversioninfo v1.5.0 @@ -78,6 +81,7 @@ require ( github.com/spf13/pflag v1.0.10 github.com/stretchr/testify v1.11.1 github.com/t3rm1n4l/go-mega v0.0.0-20251031123324-a804aaa87491 + github.com/tyler-smith/go-bip39 v1.1.0 github.com/unknwon/goconfig v1.0.0 github.com/willscott/go-nfs v0.0.3 github.com/winfsp/cgofuse v1.6.1-0.20250813110601-7d90b0992471 @@ -134,7 +138,6 @@ require ( github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.13 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.30.1 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.5 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.39.1 // indirect github.com/bahlo/generic-list-go v0.2.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bodgit/plumbing v1.3.0 // indirect @@ -155,6 +158,8 @@ require ( github.com/creasty/defaults v1.8.0 // indirect github.com/cronokirby/saferith v0.33.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/disintegration/imaging v1.6.2 // indirect + github.com/dromara/dongle v1.0.1 // indirect github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/ebitengine/purego v0.9.1 // indirect @@ -177,7 +182,6 @@ require ( github.com/go-viper/mapstructure/v2 v2.4.0 // indirect github.com/gofrs/flock v0.13.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang-jwt/jwt/v5 v5.3.0 // indirect github.com/google/btree v1.1.3 // indirect github.com/google/s2a-go v0.1.9 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.7 // indirect @@ -255,13 +259,14 @@ require ( go.yaml.in/yaml/v2 v2.4.3 // indirect go4.org v0.0.0-20230225012048-214862532bf5 // indirect golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 // indirect + golang.org/x/image v0.32.0 // indirect + golang.org/x/mod v0.29.0 // indirect golang.org/x/tools v0.38.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 // indirect google.golang.org/grpc v1.76.0 // indirect google.golang.org/protobuf v1.36.10 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect moul.io/http2curl/v2 v2.3.0 // indirect - sigs.k8s.io/yaml v1.6.0 // indirect storj.io/common v0.0.0-20251107171817-6221ae45072c // indirect storj.io/drpc v0.0.35-0.20250513201419-f7819ea69b55 // indirect storj.io/eventkit v0.0.0-20250410172343-61f26d3de156 // indirect @@ -272,7 +277,7 @@ require ( require ( github.com/IBM/go-sdk-core/v5 v5.18.5 github.com/Microsoft/go-winio v0.6.1 // indirect - github.com/ProtonMail/go-crypto v1.2.0 + github.com/ProtonMail/go-crypto v1.3.0 github.com/golang-jwt/jwt/v4 v4.5.2 github.com/pkg/xattr v0.4.12 github.com/pquerna/otp v1.5.0 diff --git a/go.sum b/go.sum index 0ee8ace4217bf..94d7b2a2a8431 100644 --- a/go.sum +++ b/go.sum @@ -61,26 +61,27 @@ github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 h1:XRzhVemXdgv github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/FilenCloudDienste/filen-sdk-go v0.0.34 h1:Fd/wagh/Qn35p3PkCUYubmaELATQlYGC9pxpJ9TkHUE= +github.com/FilenCloudDienste/filen-sdk-go v0.0.34/go.mod h1:XkI1Iq30/tU8vk4Zd1cKr2cCTiFqBEf0ZfG4+KKUBrY= github.com/Files-com/files-sdk-go/v3 v3.2.264 h1:lMHTplAYI9FtmCo/QOcpRxmPA5REVAct1r2riQmDQKw= github.com/Files-com/files-sdk-go/v3 v3.2.264/go.mod h1:wGqkOzRu/ClJibvDgcfuJNAqI2nLhe8g91tPlDKRCdE= -github.com/IBM/go-sdk-core/v5 v5.21.0 h1:DUnYhvC4SoC8T84rx5omnhY3+xcQg/Whyoa3mDPIMkk= -github.com/IBM/go-sdk-core/v5 v5.21.0/go.mod h1:Q3BYO6iDA2zweQPDGbNTtqft5tDcEpm6RTuqMlPcvbw= +github.com/IBM/go-sdk-core/v5 v5.18.5 h1:g0JRl3sYXJczB/yuDlrN6x22LJ6jIxhp0Sa4ARNW60c= +github.com/IBM/go-sdk-core/v5 v5.18.5/go.mod h1:KonTFRR+8ZSgw5cxBSYo6E4WZoY1+7n1kfHM82VcjFU= github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g= github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd h1:nzE1YQBdx1bq9IlZinHa+HVffy+NmVRoKr+wHN8fpLE= github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd/go.mod h1:C8yoIfvESpM3GD07OCHU7fqI7lhwyZ2Td1rbNbTAhnc= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= -github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= -github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/ProtonMail/bcrypt v0.0.0-20210511135022-227b4adcab57/go.mod h1:HecWFHognK8GfRDGnFQbW/LiV7A3MX3gZVs45vk5h8I= github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf h1:yc9daCCYUefEs69zUkSzubzjBbL+cmOXgnmt9Fyd9ug= github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf/go.mod h1:o0ESU9p83twszAU8LBeJKFAAMX14tISa0yk4Oo5TOqo= github.com/ProtonMail/gluon v0.17.1-0.20230724134000-308be39be96e h1:lCsqUUACrcMC83lg5rTo9Y0PnPItE61JSfvMyIcANwk= github.com/ProtonMail/gluon v0.17.1-0.20230724134000-308be39be96e/go.mod h1:Og5/Dz1MiGpCJn51XujZwxiLG7WzvvjE5PRpZBQmAHo= github.com/ProtonMail/go-crypto v0.0.0-20230321155629-9a39f2531310/go.mod h1:8TI4H3IbrackdNgv+92dI+rhpCaLqM0IfpgCgenFvRE= -github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= -github.com/ProtonMail/go-crypto v1.2.0 h1:+PhXXn4SPGd+qk76TlEePBfOfivE0zkWFenhGhFLzWs= -github.com/ProtonMail/go-crypto v1.2.0/go.mod h1:9whxjD8Rbs29b4XWbB8irEcE8KHMqaR2e7GWU1R+/PE= +github.com/ProtonMail/go-crypto v1.3.0 h1:ILq8+Sf5If5DCpHQp4PbZdS1J7HDFRXz/+xKBiRGFrw= +github.com/ProtonMail/go-crypto v1.3.0/go.mod h1:9whxjD8Rbs29b4XWbB8irEcE8KHMqaR2e7GWU1R+/PE= github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f h1:tCbYj7/299ekTTXpdwKYF8eBlsYsDVoggDAuAjoK66k= github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f/go.mod h1:gcr0kNtGBqin9zDW9GOHcVntrwnjrK+qdJ06mWYBybw= github.com/ProtonMail/go-srp v0.0.7 h1:Sos3Qk+th4tQR64vsxGIxYpN3rdnG9Wf9K4ZloC1JrI= @@ -93,20 +94,6 @@ github.com/STARRY-S/zip v0.2.3 h1:luE4dMvRPDOWQdeDdUxUoZkzUIpTccdKdhHHsQJ1fm4= github.com/STARRY-S/zip v0.2.3/go.mod h1:lqJ9JdeRipyOQJrYSOtpNAiaesFO6zVDsE8GIGFaoSk= github.com/a1ex3/zstd-seekable-format-go/pkg v0.10.0 h1:iLDOF0rdGTrol/q8OfPIIs5kLD8XvA2q75o6Uq/tgak= github.com/a1ex3/zstd-seekable-format-go/pkg v0.10.0/go.mod h1:DrEWcQJjz7t5iF2duaiyhg4jyoF0kxOD6LtECNGkZ/Q= -github.com/StarHack/go-internxt-drive v0.0.0-20250505204940-afd5267fe57f h1:719s3rc1mxCQfFn5B6l40lwGaewlA2qFvhJ8VzHXVK4= -github.com/StarHack/go-internxt-drive v0.0.0-20250505204940-afd5267fe57f/go.mod h1:YmT8WhRhk9JTWrGJyuXsZdRU+poYZZIND5qXL7FZ2lQ= -github.com/StarHack/go-internxt-drive v0.0.0-20250505214629-764d0ec1512a h1:GY7UVDWm9vzv50icXw9MoLBOVOhaJKZJGKW4cQZDUdQ= -github.com/StarHack/go-internxt-drive v0.0.0-20250505214629-764d0ec1512a/go.mod h1:YmT8WhRhk9JTWrGJyuXsZdRU+poYZZIND5qXL7FZ2lQ= -github.com/StarHack/go-internxt-drive v0.0.0-20250505215717-25bd7e9f0180 h1:d7Jl3LJANlw7rviAECdEt5NrfaY89ziPjuacL2TGKQ8= -github.com/StarHack/go-internxt-drive v0.0.0-20250505215717-25bd7e9f0180/go.mod h1:YmT8WhRhk9JTWrGJyuXsZdRU+poYZZIND5qXL7FZ2lQ= -github.com/StarHack/go-internxt-drive v0.0.0-20250505220824-7a00a541234d h1:9T+fVJ6NqsX9c7fPuuIC3qV9xXX4R3YmIJyofqeFSBE= -github.com/StarHack/go-internxt-drive v0.0.0-20250505220824-7a00a541234d/go.mod h1:YmT8WhRhk9JTWrGJyuXsZdRU+poYZZIND5qXL7FZ2lQ= -github.com/StarHack/go-internxt-drive v0.0.0-20250505223234-6fd1f77d7d00 h1:sfT8QZF4yMUUwmgIfbiEGzmuS/SHvMc+tvRWasujf40= -github.com/StarHack/go-internxt-drive v0.0.0-20250505223234-6fd1f77d7d00/go.mod h1:YmT8WhRhk9JTWrGJyuXsZdRU+poYZZIND5qXL7FZ2lQ= -github.com/StarHack/go-internxt-drive v0.0.0-20250506081342-16852f088f5f h1:PJBZVoUwqPQZoV12s/TdMoRcyRfaCJwTbViyKS9Tgxs= -github.com/StarHack/go-internxt-drive v0.0.0-20250506081342-16852f088f5f/go.mod h1:YmT8WhRhk9JTWrGJyuXsZdRU+poYZZIND5qXL7FZ2lQ= -github.com/StarHack/go-internxt-drive v0.0.0-20250506081634-548256aeb125 h1:g9IlQWO9WCoy8DbJaee6hzR9ajdKVpTpE9vFOOWkk08= -github.com/StarHack/go-internxt-drive v0.0.0-20250506081634-548256aeb125/go.mod h1:YmT8WhRhk9JTWrGJyuXsZdRU+poYZZIND5qXL7FZ2lQ= github.com/a8m/tree v0.0.0-20240104212747-2c8764a5f17e h1:KMVieI1/Ub++GYfnhyFPoGE3g5TUiG4srE3TMGr5nM4= github.com/a8m/tree v0.0.0-20240104212747-2c8764a5f17e/go.mod h1:j5astEcUkZQX8lK+KKlQ3NRQ50f4EE8ZjyZpCz3mrH4= github.com/aalpar/deheap v0.0.0-20210914013432-0cc84d79dec3 h1:hhdWprfSpFbN7lz3W1gM40vOgvSh1WCSMxYD6gGB4Hs= @@ -211,7 +198,6 @@ github.com/clipperhouse/stringish v0.1.1/go.mod h1:v/WhFtE1q0ovMta2+m+UbpZ+2/HEX github.com/clipperhouse/uax29/v2 v2.3.0 h1:SNdx9DVUqMoBuBoW3iLOj4FQv3dN5mDtuqwuhIGpJy4= github.com/clipperhouse/uax29/v2 v2.3.0/go.mod h1:Wn1g7MK6OoeDT0vL+Q0SQLDz/KpfsVRgg6W7ihQeh4g= github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I= -github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0= github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs= github.com/cloudinary/cloudinary-go/v2 v2.13.0 h1:ugiQwb7DwpWQnete2AZkTh94MonZKmxD7hDGy1qTzDs= @@ -240,6 +226,8 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/disintegration/imaging v1.6.2 h1:w1LecBlG2Lnp8B3jk5zSuNqd7b4DXhcjwek1ei82L+c= +github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4= github.com/diskfs/go-diskfs v1.7.0 h1:vonWmt5CMowXwUc79jWyGrf2DIMeoOjkLlMnQYGVOs8= github.com/diskfs/go-diskfs v1.7.0/go.mod h1:LhQyXqOugWFRahYUSw47NyZJPezFzB9UELwhpszLP/k= github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c= @@ -248,6 +236,8 @@ github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= github.com/dop251/scsu v0.0.0-20220106150536-84ac88021d00 h1:xJBhC00smQpSZw3Kr0ErMUBXhUSjYoLRm2szxdbRBL0= github.com/dop251/scsu v0.0.0-20220106150536-84ac88021d00/go.mod h1:nNICngOdmNImBb/vuL+dSc0aIg3ryNATpjxThNoPw4g= +github.com/dromara/dongle v1.0.1 h1:si/7UP/EXxnFVZok1cNos70GiMGxInAYMilHQFP5dJs= +github.com/dromara/dongle v1.0.1/go.mod h1:ebFhTaDgxaDIKppycENTWlBsxz8mWCPWOLnsEgDpMv4= github.com/dropbox/dropbox-sdk-go-unofficial/v6 v6.0.5 h1:FT+t0UEDykcor4y3dMVKXIiWJETBpRgERYTGlmMd7HU= github.com/dropbox/dropbox-sdk-go-unofficial/v6 v6.0.5/go.mod h1:rSS3kM9XMzSQ6pw91Qgd6yB5jdt70N4OdtrAf74As5M= github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707 h1:2tV76y6Q9BB+NEBasnqvs7e49aEBFI8ejC89PSnWH+4= @@ -265,6 +255,7 @@ github.com/emersion/go-message v0.18.2 h1:rl55SQdjd9oJcIoQNhubD2Acs1E6IzlZISRTK7 github.com/emersion/go-message v0.18.2/go.mod h1:XpJyL70LwRvq2a8rVbHXikPgKj8+aI0kGdHlg16ibYA= github.com/emersion/go-vcard v0.0.0-20241024213814-c9703dde27ff h1:4N8wnS3f1hNHSmFD5zgFkWCyA4L1kCDkImPAtK7D6tg= github.com/emersion/go-vcard v0.0.0-20241024213814-c9703dde27ff/go.mod h1:HMJKR5wlh/ziNp+sHEDV2ltblO4JD2+IdDOWtGcQBTM= +github.com/emmansun/gmsm v0.15.5/go.mod h1:2m4jygryohSWkaSduFErgCwQKab5BNjURoFrn2DNwyU= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -438,6 +429,8 @@ github.com/henrybear327/go-proton-api v1.0.0/go.mod h1:w63MZuzufKcIZ93pwRgiOtxMX github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/internxt/rclone-adapter v0.0.0-20260120161514-a27b77ef4431 h1:ZiJoHpiFsodynTDqc7tZYhe+qG2eMAy1kMoQf818RFc= +github.com/internxt/rclone-adapter v0.0.0-20260120161514-a27b77ef4431/go.mod h1:jpF/MwuBg+opa4Q9izanNl8KzdtYhfBoZWyv70vqmgc= github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= @@ -550,8 +543,8 @@ github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.17.3 h1:oJcvKpIb7/8uLpDDtnQuf18xVnwKp8DTD7DQ6gTd/MU= github.com/onsi/ginkgo/v2 v2.17.3/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc= -github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y= -github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= +github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k= +github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY= github.com/oracle/oci-go-sdk/v65 v65.104.0 h1:l9awEvzWvxmYhy/97A0hZ87pa7BncYXmcO/S8+rvgK0= github.com/oracle/oci-go-sdk/v65 v65.104.0/go.mod h1:oB8jFGVc/7/zJ+DbleE8MzGHjhs2ioCz5stRTdZdIcY= github.com/panjf2000/ants/v2 v2.11.3 h1:AfI0ngBoXJmYOpDh9m516vjqoUu2sLrIVgppI9TZVpg= @@ -766,6 +759,7 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= @@ -788,6 +782,9 @@ golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 h1:mgKeJMpvi0yx/sU5GsxQ7p6s2 golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.32.0 h1:6lZQWq75h7L5IWNk0r+SCpUJ6tUVd3v4ZHnbRKLkUDQ= +golang.org/x/image v0.32.0/go.mod h1:/R37rrQmKXtO6tYXAjtDLwQgFLHmhW+V6ayXlxzP2Pc= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -846,6 +843,7 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= @@ -922,6 +920,7 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -934,6 +933,7 @@ golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= @@ -950,6 +950,7 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= @@ -1134,8 +1135,6 @@ moul.io/http2curl/v2 v2.3.0/go.mod h1:RW4hyBjTWSYDOxapodpNEtX0g5Eb16sxklBqmd2RHc rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= -sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= storj.io/common v0.0.0-20251107171817-6221ae45072c h1:UDXSrdeLJe3QFouavSW10fYdpclK0YNu3KvQHzqq2+k= storj.io/common v0.0.0-20251107171817-6221ae45072c/go.mod h1:XNX7uykja6aco92y2y8RuqaXIDRPpt1YA2OQDKlKEUk= storj.io/drpc v0.0.35-0.20250513201419-f7819ea69b55 h1:8OE12DvUnB9lfZcHe7IDGsuhjrY9GBAr964PVHmhsro= diff --git a/lib/dircache/dircache.go b/lib/dircache/dircache.go index 7fdf9e1afd35b..6a17dc2ca2473 100644 --- a/lib/dircache/dircache.go +++ b/lib/dircache/dircache.go @@ -361,9 +361,6 @@ func (dc *DirCache) RootParentID(ctx context.Context, create bool) (ID string, e } else if dc.rootID == dc.trueRootID { return "", errors.New("is root directory") } - if dc.rootParentID == "" { - return "", errors.New("internal error: didn't find rootParentID") - } return dc.rootParentID, nil } diff --git a/lib/pool/pool.go b/lib/pool/pool.go index 654ae444f75d1..3b7ddcc005323 100644 --- a/lib/pool/pool.go +++ b/lib/pool/pool.go @@ -46,7 +46,7 @@ type Pool struct { } // totalMemory is a semaphore used to control total buffer usage of -// all Pools. It it may be nil in which case the total buffer usage +// all Pools. It may be nil in which case the total buffer usage // will not be controlled. It counts memory in active use, it does not // count memory cached in the pool. var totalMemory *semaphore.Weighted diff --git a/lib/proxy/http.go b/lib/proxy/http.go index 616d2c4c1521b..647a3c5a12a11 100644 --- a/lib/proxy/http.go +++ b/lib/proxy/http.go @@ -3,6 +3,7 @@ package proxy import ( "bufio" "crypto/tls" + "encoding/base64" "fmt" "net" "net/http" @@ -55,7 +56,13 @@ func HTTPConnectDial(network, addr string, proxyURL *url.URL, proxyDialer proxy. } // send CONNECT - _, err = fmt.Fprintf(conn, "CONNECT %s HTTP/1.1\r\nHost: %s\r\n\r\n", addr, addr) + user := proxyURL.User + if user != nil { + credential := base64.StdEncoding.EncodeToString([]byte(user.String())) + _, err = fmt.Fprintf(conn, "CONNECT %s HTTP/1.1\r\nHost: %s\r\nProxy-Authorization: Basic %s\r\n\r\n", addr, addr, credential) + } else { + _, err = fmt.Fprintf(conn, "CONNECT %s HTTP/1.1\r\nHost: %s\r\n\r\n", addr, addr) + } if err != nil { _ = conn.Close() return nil, fmt.Errorf("HTTP CONNECT proxy failed to send CONNECT: %q", err) diff --git a/lib/rest/rest.go b/lib/rest/rest.go index 2557b68c3dc98..3e92163a60d57 100644 --- a/lib/rest/rest.go +++ b/lib/rest/rest.go @@ -14,7 +14,9 @@ import ( "maps" "mime/multipart" "net/http" + "net/textproto" "net/url" + "strings" "sync" "github.com/rclone/rclone/fs" @@ -145,6 +147,7 @@ type Opts struct { MultipartMetadataName string // ..this is used for the name of the metadata form part if set MultipartContentName string // ..name of the parameter which is the attached file MultipartFileName string // ..name of the file for the attached file + MultipartContentType string // ..content type of the attached file Parameters url.Values // any parameters for the final URL TransferEncoding []string // transfer encoding, set to "identity" to disable chunked encoding Trailer *http.Header // set the request trailer @@ -371,6 +374,32 @@ func (api *Client) Call(ctx context.Context, opts *Opts) (resp *http.Response, e return resp, nil } +var quoteEscaper = strings.NewReplacer("\\", "\\\\", `"`, "\\\"") + +func escapeQuotes(s string) string { + return quoteEscaper.Replace(s) +} + +// multipartFileContentDisposition returns the value of a Content-Disposition header +// with the provided field name and file name. +func multipartFileContentDisposition(fieldname, filename string) string { + return fmt.Sprintf(`form-data; name="%s"; filename="%s"`, + escapeQuotes(fieldname), escapeQuotes(filename)) +} + +// CreateFormFile is a convenience wrapper around [Writer.CreatePart]. It creates +// a new form-data header with the provided field name and file name. +func CreateFormFile(w *multipart.Writer, fieldname, filename, contentType string) (io.Writer, error) { + h := make(textproto.MIMEHeader) + // FIXME when go1.24 is no longer supported, change to + // multipart.FileContentDisposition and remove definition above + h.Set("Content-Disposition", multipartFileContentDisposition(fieldname, filename)) + if contentType != "" { + h.Set("Content-Type", contentType) + } + return w.CreatePart(h) +} + // MultipartUpload creates an io.Reader which produces an encoded a // multipart form upload from the params passed in and the passed in // @@ -382,10 +411,10 @@ func (api *Client) Call(ctx context.Context, opts *Opts) (resp *http.Response, e // the int64 returned is the overhead in addition to the file contents, in case Content-Length is required // // NB This doesn't allow setting the content type of the attachment -func MultipartUpload(ctx context.Context, in io.Reader, params url.Values, contentName, fileName string) (io.ReadCloser, string, int64, error) { +func MultipartUpload(ctx context.Context, in io.Reader, params url.Values, contentName, fileName string, contentType string) (io.ReadCloser, string, int64, error) { bodyReader, bodyWriter := io.Pipe() writer := multipart.NewWriter(bodyWriter) - contentType := writer.FormDataContentType() + formContentType := writer.FormDataContentType() // Create a Multipart Writer as base for calculating the Content-Length buf := &bytes.Buffer{} @@ -404,7 +433,7 @@ func MultipartUpload(ctx context.Context, in io.Reader, params url.Values, conte } } if in != nil { - _, err = dummyMultipartWriter.CreateFormFile(contentName, fileName) + _, err = CreateFormFile(dummyMultipartWriter, contentName, fileName, contentType) if err != nil { return nil, "", 0, err } @@ -445,7 +474,7 @@ func MultipartUpload(ctx context.Context, in io.Reader, params url.Values, conte } if in != nil { - part, err := writer.CreateFormFile(contentName, fileName) + part, err := CreateFormFile(writer, contentName, fileName, contentType) if err != nil { _ = bodyWriter.CloseWithError(fmt.Errorf("failed to create form file: %w", err)) return @@ -467,7 +496,7 @@ func MultipartUpload(ctx context.Context, in io.Reader, params url.Values, conte _ = bodyWriter.Close() }() - return bodyReader, contentType, multipartLength, nil + return bodyReader, formContentType, multipartLength, nil } // CallJSON runs Call and decodes the body as a JSON object into response (if not nil) @@ -539,7 +568,7 @@ func (api *Client) callCodec(ctx context.Context, opts *Opts, request any, respo opts = opts.Copy() var overhead int64 - opts.Body, opts.ContentType, overhead, err = MultipartUpload(ctx, opts.Body, params, opts.MultipartContentName, opts.MultipartFileName) + opts.Body, opts.ContentType, overhead, err = MultipartUpload(ctx, opts.Body, params, opts.MultipartContentName, opts.MultipartFileName, opts.MultipartContentType) if err != nil { return nil, err } diff --git a/ultest/Caplin, Julie - Die kleine Villa in Italien.epub b/ultest/Caplin, Julie - Die kleine Villa in Italien.epub new file mode 100644 index 0000000000000..3aa7711bb2dbb Binary files /dev/null and b/ultest/Caplin, Julie - Die kleine Villa in Italien.epub differ diff --git a/ultest/chile.jpg b/ultest/chile.jpg new file mode 100644 index 0000000000000..81118074fdd4f Binary files /dev/null and b/ultest/chile.jpg differ diff --git a/ultest/some-sub-dir/juice.txt b/ultest/some-sub-dir/juice.txt new file mode 100644 index 0000000000000..f9b550fc0f607 --- /dev/null +++ b/ultest/some-sub-dir/juice.txt @@ -0,0 +1,84 @@ +VA_-_Juice_Vol._52-2005-OMA +VA-Juice_Vol._53-2005-uC +VA-Juice_Vol._58-2005-uC +VA-Juice_Vol._59-2005-uC +VA--Juice_Vol._77-MAG-2007-OMA +VA--Juice_Vol._78-MAG-DE-2007-OMA +VA--Juice_Vol._80-MAG-2007-OMA +VA--Juice_Vol._81-MAG-2007-OMA +VA--Juice_Vol._82-MAG-2007-OMA +VA--Juice_Vol._83-MAG-2008-OMA +VA--Juice_Vol._84-MAG-2008-OMA +VA--Juice_Vol._85-MAG-2008-OMA +VA--Juice_Vol._86-MAG-2008-OMA +VA--Juice_Vol._87-MAG-2008-OMA +VA--Juice_Vol._88-MAG-2008-OMA +VA--Juice_Vol._89-MAG-2008-OMA +VA--Juice_Vol._90-MAG-2008-OMA +VA--Juice_Vol._91-MAG-2008-OMA +VA--Juice_Vol._92-MAG-2008-OMA +VA--Juice_Vol._93-MAG-2008-OMA +VA--Juice_Vol._94-MAG-2009-OMA +VA--Juice_Vol._95-MAG-2009-OMA +VA--Juice_Vol._96-MAG-2009-OMA +VA--Juice_Vol._97-MAG-2009-OMA +VA--Juice_Vol._98-MAG-2009-OMA +VA--Juice_Vol._99-MAG-2009-OMA +VA--Juice_Vol._100-MAG-DE-2009-OMA +VA--Juice_Vol._101-MAG-2009-OMA +VA--Juice_Vol._102-MAG-2009-OMA +VA--Juice_Vol._103-MAG-2009-OMA +VA--Juice_Vol._104-MAG-2009-OMA +VA--Juice_Vol._105-MAG-2010-OMA +VA--Juice_Vol._106-MAG-2010-OMA +VA--Juice_Vol._107-MAG-2010-OMA +VA--Juice_Vol._108-MAG-2010-OMA +VA--Juice_Vol._109-MAG-2011-OMA +VA--Juice_Vol._110-MAG-2011-OMA +VA--Juice_Vol._111_(Instrumental_Edition_Mixed_by_Sepalot)-MAG-2012-OMA +VA--Juice_Vol._112-MAG-2012-OMA +VA--Juice_Vol._113-MAG-2012-OMA +VA--Juice_Vol._114-MAG-2012-OMA +VA--Juice_Vol._115-MAG-DE-2013-OMA +VA--Juice_Vol._116-MAG-2013-OMA +VA-Juice_Vol._117-MAG-2013-NOiR +VA-Juice_Vol._118-DE-MAG-2013-NOiR +VA-Juice_Vol._119-DE-MAG-2013-NOiR +VA-Juice_Vol._120-MAG-2013-NOiR +VA-Juice_Vol._121-DE-MAG-2013-NOiR +VA-Juice_Vol._122-DE-MAG-2014-NOiR +VA-Juice_Vol._123-DE-MAG-2014-NOiR +VA-Juice_Vol._124-DE-2014-NOiR +VA--Juice_Vol._125-MAG-2014-OMA +VA--Juice_Vol._126-MAG-2014-OMA +VA-Juice_Vol._127-DE-MAG-2015-NOiR +VA-Juice_Vol._128-DE-MAG-2015-NOiR +VA--Juice_Vol._129-DE-MAG-2015-OMA +VA-Juice_Vol._130-DE-MAG-2015-NOiR +VA-Juice_Vol._131-DE-MAG-2015-NOiR +VA-Juice_Vol._132-DE-MAG-2015-NOiR +VA-Juice_Vol._133-DE-MAG-2016-NOiR +VA-Juice_Vol._134-DE-MAG-2016-NOiR +VA-Juice_Vol._135-DE-MAG-2016-NOiR +VA-Juice_Vol._136-DE-MAG-2017-NOiR +VA-Juice_Vol._137-DE-RERiP-MAG-2017-NOiR +VA-Juice_Vol._137-DE-MAG-DIRFIX-2017-NOiR +VA-Juice_Vol._138-MAG-DE-2017-NOiR +VA-Juice_Vol._139-MAG-DE-2017-NOiR +VA-Juice_Vol._140-MAG-DE-2017-NOiR +VA-Juice_Vol._141-MAG-DE-2018-NOiR +VA-Juice_Vol._142-MAG-DE-2018-NOiR +VA-Juice_Vol._143-MAG-DE-2018-NOiR +VA-Juice_Vol._144-MAG-DE-2018-NOiR +Sylabil_Spill-Anecker_EP_(Juice_Vol._145)-MAG-DE-2018-NOiR +VA-Juice_Vol._146-MAG-DE-2019-NOiR +VA-Juice_Vol._146-REPACK-MAG-DE-2019-NOiR +VA-Juice_Vol._147-MAG-DE-2019-NOiR +VA-Juice_Vol._148-MAG-DE-2019-NOiR + + + + +/MP3/Hip Hop/2005/VA-Juice_Vol._53-2005-uC +/MP3/Hip Hop/2005/VA-Juice_Vol._58-2005-uC +/MP3/Hip Hop/2005/VA-Juice_Vol._59-2005-uC \ No newline at end of file diff --git a/vfs/vfscache/writeback/writeback.go b/vfs/vfscache/writeback/writeback.go index fe7b9ed626364..ee9566c777f53 100644 --- a/vfs/vfscache/writeback/writeback.go +++ b/vfs/vfscache/writeback/writeback.go @@ -254,7 +254,7 @@ func (wb *WriteBack) SetID(pid *Handle) { // // Use SetID to create Handles in advance of calling Add. // -// If modified is false then it it doesn't cancel a pending upload if +// If modified is false then it doesn't cancel a pending upload if // there is one as there is no need. func (wb *WriteBack) Add(id Handle, name string, size int64, modified bool, putFn PutFn) Handle { wb.mu.Lock()