diff --git a/cmd/syncProcessor.go b/cmd/syncProcessor.go index fb149c84a..2603157ec 100644 --- a/cmd/syncProcessor.go +++ b/cmd/syncProcessor.go @@ -247,7 +247,7 @@ func newSyncDeleteProcessor(cca *cookedSyncCmdArgs, fpo common.FolderPropertyOpt if err != nil { return nil, err } - + return newInteractiveDeleteProcessor(deleter.delete, cca.deleteDestination, cca.fromTo.To().String(), cca.destination, cca.incrementDeletionCount, cca.dryrunMode), nil } @@ -284,7 +284,7 @@ func (b *remoteResourceDeleter) getObjectURL(objectURL string) (*url.URL, error) if err != nil { return nil, err } - return u,nil + return u, nil } func (b *remoteResourceDeleter) delete(object StoredObject) error { @@ -305,12 +305,12 @@ func (b *remoteResourceDeleter) delete(object StoredObject) error { var err error var objURL *url.URL - + switch b.targetLocation { case common.ELocation.Blob(): bsc, _ := sc.BlobServiceClient() - var blobClient *blob.Client = bsc.NewContainerClient(b.containerName).NewBlobClient(path.Join(b.rootPath + object.relativePath)) - + var blobClient *blob.Client = bsc.NewContainerClient(b.containerName).NewBlobClient(path.Join(b.rootPath, object.relativePath)) + objURL, err = b.getObjectURL(blobClient.URL()) if err != nil { break @@ -321,7 +321,7 @@ func (b *remoteResourceDeleter) delete(object StoredObject) error { _, err = blobClient.Delete(b.ctx, nil) case common.ELocation.File(): fsc, _ := sc.FileServiceClient() - fileClient := fsc.NewShareClient(b.containerName).NewRootDirectoryClient().NewFileClient(path.Join(b.rootPath + object.relativePath)) + fileClient := fsc.NewShareClient(b.containerName).NewRootDirectoryClient().NewFileClient(path.Join(b.rootPath, object.relativePath)) objURL, err = b.getObjectURL(fileClient.URL()) if err != nil { @@ -330,13 +330,13 @@ func (b *remoteResourceDeleter) delete(object StoredObject) error { b.folderManager.RecordChildExists(objURL) defer b.folderManager.RecordChildDeleted(objURL) - err = common.DoWithOverrideReadOnlyOnAzureFiles(b.ctx, func()(interface{}, error) { + err = common.DoWithOverrideReadOnlyOnAzureFiles(b.ctx, func() (interface{}, error) { return fileClient.Delete(b.ctx, nil) }, fileClient, b.forceIfReadOnly) case common.ELocation.BlobFS(): dsc, _ := sc.DatalakeServiceClient() - fileClient := dsc.NewFileSystemClient(b.containerName).NewFileClient(path.Join(b.rootPath + object.relativePath)) - + fileClient := dsc.NewFileSystemClient(b.containerName).NewFileClient(path.Join(b.rootPath, object.relativePath)) + objURL, err = b.getObjectURL(fileClient.DFSURL()) if err != nil { break @@ -369,48 +369,48 @@ func (b *remoteResourceDeleter) delete(object StoredObject) error { var objURL *url.URL var err error switch b.targetLocation { - case common.ELocation.Blob(): - bsc, _ := sc.BlobServiceClient() - blobClient := bsc.NewContainerClient(b.containerName).NewBlobClient(path.Join(b.rootPath + object.relativePath)) - // HNS endpoint doesn't like delete snapshots on a directory - objURL, err = b.getObjectURL(blobClient.URL()) - if err != nil { - return err - } + case common.ELocation.Blob(): + bsc, _ := sc.BlobServiceClient() + blobClient := bsc.NewContainerClient(b.containerName).NewBlobClient(path.Join(b.rootPath, object.relativePath)) + // HNS endpoint doesn't like delete snapshots on a directory + objURL, err = b.getObjectURL(blobClient.URL()) + if err != nil { + return err + } - deleteFunc = func(ctx context.Context, logger common.ILogger) bool { - _, err = blobClient.Delete(b.ctx, nil) - return (err == nil) - } - case common.ELocation.File(): - fsc, _ := sc.FileServiceClient() - dirClient := fsc.NewShareClient(b.containerName).NewDirectoryClient(path.Join(b.rootPath + object.relativePath)) - objURL, err = b.getObjectURL(dirClient.URL()) - if err != nil { - return err - } + deleteFunc = func(ctx context.Context, logger common.ILogger) bool { + _, err = blobClient.Delete(b.ctx, nil) + return (err == nil) + } + case common.ELocation.File(): + fsc, _ := sc.FileServiceClient() + dirClient := fsc.NewShareClient(b.containerName).NewDirectoryClient(path.Join(b.rootPath, object.relativePath)) + objURL, err = b.getObjectURL(dirClient.URL()) + if err != nil { + return err + } - deleteFunc = func(ctx context.Context, logger common.ILogger) bool { - err = common.DoWithOverrideReadOnlyOnAzureFiles(b.ctx, func()(interface{}, error) { - return dirClient.Delete(b.ctx, nil) - }, dirClient, b.forceIfReadOnly) - return (err == nil) - } - case common.ELocation.BlobFS(): - dsc, _ := sc.DatalakeServiceClient() - directoryClient := dsc.NewFileSystemClient(b.containerName).NewDirectoryClient(path.Join(b.rootPath + object.relativePath)) - objURL, err = b.getObjectURL(directoryClient.DFSURL()) - if err != nil { - return err - } + deleteFunc = func(ctx context.Context, logger common.ILogger) bool { + err = common.DoWithOverrideReadOnlyOnAzureFiles(b.ctx, func() (interface{}, error) { + return dirClient.Delete(b.ctx, nil) + }, dirClient, b.forceIfReadOnly) + return (err == nil) + } + case common.ELocation.BlobFS(): + dsc, _ := sc.DatalakeServiceClient() + directoryClient := dsc.NewFileSystemClient(b.containerName).NewDirectoryClient(path.Join(b.rootPath, object.relativePath)) + objURL, err = b.getObjectURL(directoryClient.DFSURL()) + if err != nil { + return err + } - deleteFunc = func(ctx context.Context, logger common.ILogger) bool { - recursiveContext := common.WithRecursive(b.ctx, false) - _, err = directoryClient.Delete(recursiveContext, nil) - return (err == nil) - } - default: - panic("not implemented, check your code") + deleteFunc = func(ctx context.Context, logger common.ILogger) bool { + recursiveContext := common.WithRecursive(b.ctx, false) + _, err = directoryClient.Delete(recursiveContext, nil) + return (err == nil) + } + default: + panic("not implemented, check your code") } b.folderManager.RecordChildExists(objURL) diff --git a/cmd/zt_make_test.go b/cmd/zt_make_test.go index da0dcbbad..ee51e797b 100644 --- a/cmd/zt_make_test.go +++ b/cmd/zt_make_test.go @@ -158,7 +158,7 @@ func TestMakeFileShareQuota(t *testing.T) { args := rawMakeCmdArgs{ resourceToCreate: scSAS.String(), - quota: 5, + quota: 5, } runMakeAndVerify(args, func(err error) { @@ -191,4 +191,4 @@ func TestMakeFileShareExists(t *testing.T) { _, err = sc.GetProperties(ctx, nil) a.Nil(err) }) -} \ No newline at end of file +} diff --git a/common/hash_data.go b/common/hash_data.go index 8d6cc581f..79503314c 100644 --- a/common/hash_data.go +++ b/common/hash_data.go @@ -22,6 +22,7 @@ var LocalHashStorageMode = EHashStorageMode.Default() var LocalHashDir = "" var hashDataFailureLogOnce = &sync.Once{} + func LogHashStorageFailure() { hashDataFailureLogOnce.Do(func() { lcm.Info("One or more hash storage operations (read/write) have failed. Check the scanning log for details.") @@ -29,12 +30,13 @@ func LogHashStorageFailure() { } type HashStorageMode uint8 + var EHashStorageMode = HashStorageMode(0) func (HashStorageMode) HiddenFiles() HashStorageMode { return 0 } func (e *HashStorageMode) Default() HashStorageMode { - if defaulter, ok := any(e).(interface{osDefault() HashStorageMode}); ok { // allow specific OSes to override the default functionality + if defaulter, ok := any(e).(interface{ osDefault() HashStorageMode }); ok { // allow specific OSes to override the default functionality return defaulter.osDefault() } diff --git a/common/hash_data_adapter_hidden_files.go b/common/hash_data_adapter_hidden_files.go index 3c29ffb18..f61d51c27 100644 --- a/common/hash_data_adapter_hidden_files.go +++ b/common/hash_data_adapter_hidden_files.go @@ -14,7 +14,7 @@ type HiddenFileDataAdapter struct { } func (a *HiddenFileDataAdapter) GetMode() HashStorageMode { - return EHashStorageMode.Default() + return EHashStorageMode.HiddenFiles() } func (a *HiddenFileDataAdapter) getHashPath(relativePath string) string { @@ -26,6 +26,12 @@ func (a *HiddenFileDataAdapter) getHashPath(relativePath string) string { dir, fName := filepath.Split(relativePath) fName = fmt.Sprintf(".%s%s", fName, AzCopyHashDataStream) + // Try to create the directory + err := os.Mkdir(filepath.Join(basePath, dir), 0775) + if err != nil && !os.IsExist(err) { + lcm.Warn("Failed to create hash data directory") + } + return filepath.Join(basePath, dir, fName) } @@ -76,7 +82,7 @@ func (a *HiddenFileDataAdapter) SetHashData(relativePath string, data *SyncHashD } // Push types around to check for OS-specific hide file method - if adapter, canHide := any(a).(interface{HideFile(string) error}); canHide { + if adapter, canHide := any(a).(interface{ HideFile(string) error }); canHide { dataFile := a.getDataPath(relativePath) err := adapter.HideFile(dataFile) diff --git a/e2etest/newe2e_asserter.go b/e2etest/newe2e_asserter.go index 4448537b8..04df9a592 100644 --- a/e2etest/newe2e_asserter.go +++ b/e2etest/newe2e_asserter.go @@ -6,10 +6,13 @@ import ( "testing" ) +var _ Asserter = &FrameworkAsserter{} +var _ ScenarioAsserter = &ScenarioVariationManager{} // covers all 3 interfaces + // ====== Asserter ====== type Asserter interface { - NoError(comment string, err error) + NoError(comment string, err error, failNow ...bool) // Assert fails the test, but does not exit. Assert(comment string, assertion Assertion, items ...any) // AssertNow wraps Assert, and exits if failed. @@ -23,6 +26,8 @@ type Asserter interface { // Failed returns if the test has already failed. Failed() bool + // HelperMarker returns the associated *testing.T, and if there is none, a NilHelperMarker. + HelperMarker() HelperMarker } type DryrunAsserter interface { @@ -39,6 +44,15 @@ type ScenarioAsserter interface { Cleanup(func(a ScenarioAsserter)) } +// HelperMarker handles the fact that testing.T can be sometimes nil, and that we can't indicate a depth to ignore with Helper() +type HelperMarker interface { + Helper() +} + +type NilHelperMarker struct{} + +func (NilHelperMarker) Helper() {} + // ====== Assertion ====== type Assertion interface { @@ -118,15 +132,35 @@ func (ta *FrameworkAsserter) Log(format string, a ...any) { ta.t.Log(fmt.Sprintf(format, a...)) } -func (ta *FrameworkAsserter) NoError(comment string, err error) { +func (ta *FrameworkAsserter) NoError(comment string, err error, failNow ...bool) { ta.t.Helper() - ta.AssertNow(comment, IsNil{}, err) + + if err != nil { + ta.t.Logf("Error was not nil (%s): %v", comment, err) + + if FirstOrZero(failNow) { + ta.t.FailNow() + } else { + ta.t.Fail() + } + } } func (ta *FrameworkAsserter) AssertNow(comment string, assertion Assertion, items ...any) { ta.t.Helper() - ta.Assert(comment, assertion, items...) - if ta.Failed() { + + if (assertion.MinArgs() > 0 && len(items) < assertion.MinArgs()) || (assertion.MaxArgs() > 0 && len(items) > assertion.MaxArgs()) { + ta.PrintFinalizingMessage("Failed to assert: Assertion %s supports argument counts between %d and %d, but received %d args.", assertion.Name(), assertion.MinArgs(), assertion.MaxArgs(), len(items)) + ta.t.FailNow() + } + + if !assertion.Assert(items...) { + if fa, ok := assertion.(FormattedAssertion); ok { + ta.PrintFinalizingMessage("Failed assertion %s: %s; %s", fa.Name(), fa.Format(items...), comment) + } else { + ta.PrintFinalizingMessage("Failed assertion %s with item(s): %v; %s", assertion.Name(), items, comment) + } + ta.t.FailNow() } } @@ -165,3 +199,11 @@ func (ta *FrameworkAsserter) Failed() bool { ta.t.Helper() return ta.t.Failed() } + +func (ta *FrameworkAsserter) HelperMarker() HelperMarker { + if ta.t != nil { + return ta.t + } + + return NilHelperMarker{} +} diff --git a/e2etest/newe2e_object_content.go b/e2etest/newe2e_object_content.go index fb4f84722..8d394e91f 100644 --- a/e2etest/newe2e_object_content.go +++ b/e2etest/newe2e_object_content.go @@ -2,6 +2,7 @@ package e2etest import ( "bytes" + "crypto/md5" "github.com/Azure/azure-storage-azcopy/v10/cmd" "github.com/Azure/azure-storage-azcopy/v10/common" "io" @@ -11,7 +12,7 @@ import ( type ObjectContentContainer interface { Size() int64 Reader() io.ReadSeeker - //MD5() [md5.Size]byte + MD5() [md5.Size]byte //CRC64() uint64 } @@ -48,3 +49,7 @@ func (o *ObjectContentContainerBuffer) Size() int64 { func (o *ObjectContentContainerBuffer) Reader() io.ReadSeeker { return bytes.NewReader(o.Data) } + +func (o *ObjectContentContainerBuffer) MD5() [md5.Size]byte { + return md5.Sum(o.Data) +} diff --git a/e2etest/newe2e_resource_manager_interface.go b/e2etest/newe2e_resource_manager_interface.go index ddfee1833..aee3b77ab 100644 --- a/e2etest/newe2e_resource_manager_interface.go +++ b/e2etest/newe2e_resource_manager_interface.go @@ -239,7 +239,9 @@ type ObjectResourceManager interface { EntityType() common.EntityType ContainerName() string ObjectName() string - // Create attempts to create an object. Should overwrite objects if they already exist. It is expected to attempt to track object creation. + // Create attempts to create an object. Should overwrite objects if they already exist. + // It is expected to attempt to track object creation. + // It is also expected to create parents, if required. Create(a Asserter, body ObjectContentContainer, properties ObjectProperties) // Delete attempts to delete an object. NotFound type errors are ignored. Delete(a Asserter) diff --git a/e2etest/newe2e_resource_managers_blob.go b/e2etest/newe2e_resource_managers_blob.go index 499f61b33..f0d9adea3 100644 --- a/e2etest/newe2e_resource_managers_blob.go +++ b/e2etest/newe2e_resource_managers_blob.go @@ -684,8 +684,10 @@ func (b *BlobObjectResourceManager) Download(a Asserter) io.ReadSeeker { a.NoError("Download stream", err) buf := &bytes.Buffer{} - _, err = io.Copy(buf, resp.Body) - a.NoError("Read body", err) + if err == nil && resp.Body != nil { + _, err = io.Copy(buf, resp.Body) + a.NoError("Read body", err) + } return bytes.NewReader(buf.Bytes()) } diff --git a/e2etest/newe2e_resource_managers_blobfs.go b/e2etest/newe2e_resource_managers_blobfs.go index 37402972b..666457b1c 100644 --- a/e2etest/newe2e_resource_managers_blobfs.go +++ b/e2etest/newe2e_resource_managers_blobfs.go @@ -12,7 +12,9 @@ import ( "github.com/Azure/azure-storage-azcopy/v10/cmd" "github.com/Azure/azure-storage-azcopy/v10/common" "io" + "path" "runtime" + "strings" ) // check that everything aligns with interfaces @@ -326,7 +328,24 @@ func (b *BlobFSPathResourceProvider) ObjectName() string { return b.objectPath } +func (b *BlobFSPathResourceProvider) CreateParents(a Asserter) { + if !b.Container.Exists() { + b.Container.Create(a, ContainerProperties{}) + } + + dir, _ := path.Split(b.objectPath) + if dir != "" { + obj := b.Container.GetObject(a, strings.TrimSuffix(dir, "/"), common.EEntityType.Folder()).(*BlobFSPathResourceProvider) + // Create recursively calls this function. + if !obj.Exists() { + obj.Create(a, nil, ObjectProperties{}) + } + } +} + func (b *BlobFSPathResourceProvider) Create(a Asserter, body ObjectContentContainer, properties ObjectProperties) { + b.CreateParents(a) + switch b.entityType { case common.EEntityType.Folder(): _, err := b.getDirClient().Create(ctx, &directory.CreateOptions{ @@ -369,6 +388,14 @@ func (b *BlobFSPathResourceProvider) Create(a Asserter, body ObjectContentContai } meta[common.POSIXSymlinkMeta] = pointerTo("true") + } else if b.entityType == common.EEntityType.Folder() { + meta = make(common.Metadata) + + for k, v := range properties.Metadata { + meta[k] = v + } + + meta[common.POSIXFolderMeta] = pointerTo("true") } b.SetMetadata(a, meta) @@ -458,6 +485,13 @@ func (b *BlobFSPathResourceProvider) SetHTTPHeaders(a Asserter, h contentHeaders func (b *BlobFSPathResourceProvider) SetMetadata(a Asserter, metadata common.Metadata) { _, err := b.getFileClient().SetMetadata(ctx, metadata, nil) + + if datalakeerror.HasCode(err, datalakeerror.UnsupportedHeader) { + // retry, removing hdi_isfolder + delete(metadata, common.POSIXFolderMeta) + _, err = b.getFileClient().SetMetadata(ctx, metadata, nil) + } + a.NoError("Set metadata", err) } @@ -506,8 +540,10 @@ func (b *BlobFSPathResourceProvider) Download(a Asserter) io.ReadSeeker { a.NoError("Download stream", err) buf := &bytes.Buffer{} - _, err = io.Copy(buf, resp.Body) - a.NoError("Read body", err) + if err == nil && resp.Body != nil { + _, err = io.Copy(buf, resp.Body) + a.NoError("Read body", err) + } return bytes.NewReader(buf.Bytes()) } diff --git a/e2etest/newe2e_resource_managers_file.go b/e2etest/newe2e_resource_managers_file.go index 7d235f203..0960bf566 100644 --- a/e2etest/newe2e_resource_managers_file.go +++ b/e2etest/newe2e_resource_managers_file.go @@ -16,6 +16,7 @@ import ( "io" "path" "runtime" + "strings" ) // check that everything complies with interfaces @@ -432,6 +433,21 @@ func (f *FileObjectResourceManager) PreparePermissions(a Asserter, p *string) *f return &file.Permissions{Permission: &perm} } +func (f *FileObjectResourceManager) CreateParents(a Asserter) { + if !f.Share.Exists() { + f.Share.Create(a, ContainerProperties{}) + } + + dir, _ := path.Split(strings.TrimSuffix(f.path, "/")) + if dir != "" { + obj := f.Share.GetObject(a, dir, common.EEntityType.Folder()).(*FileObjectResourceManager) + // Create recursively calls this function. + if !obj.Exists() { + obj.Create(a, nil, ObjectProperties{}) + } + } +} + func (f *FileObjectResourceManager) Create(a Asserter, body ObjectContentContainer, props ObjectProperties) { var attr *file.NTFSFileAttributes if DerefOrZero(props.FileProperties.FileAttributes) != "" { @@ -442,6 +458,8 @@ func (f *FileObjectResourceManager) Create(a Asserter, body ObjectContentContain perms := f.PreparePermissions(a, props.FileProperties.FilePermissions) + f.CreateParents(a) + switch f.entityType { case common.EEntityType.File(): client := f.getFileClient() @@ -652,8 +670,10 @@ func (f *FileObjectResourceManager) Download(a Asserter) io.ReadSeeker { a.NoError("Download stream", err) buf := &bytes.Buffer{} - _, err = io.Copy(buf, resp.Body) - a.NoError("Read body", err) + if err == nil && resp.Body != nil { + _, err = io.Copy(buf, resp.Body) + a.NoError("Read body", err) + } return bytes.NewReader(buf.Bytes()) } diff --git a/e2etest/newe2e_resource_managers_local.go b/e2etest/newe2e_resource_managers_local.go index dc741f8d7..d5df46cd4 100644 --- a/e2etest/newe2e_resource_managers_local.go +++ b/e2etest/newe2e_resource_managers_local.go @@ -240,9 +240,19 @@ func (l *LocalObjectResourceManager) EntityType() common.EntityType { return l.entityType } +func (l *LocalObjectResourceManager) CreateParents(a Asserter) { + if l.container != nil { + l.container.Create(a, ContainerProperties{}) + } + + err := os.MkdirAll(filepath.Dir(l.getWorkingPath()), 0775) + a.NoError("mkdirall", err) +} + func (l *LocalObjectResourceManager) Create(a Asserter, body ObjectContentContainer, properties ObjectProperties) { a.AssertNow("Object must be file to have content", Equal{}) + l.CreateParents(a) f, err := os.OpenFile(l.getWorkingPath(), os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0774) a.NoError("Open file", err) defer func(f *os.File) { diff --git a/e2etest/newe2e_sas_generation.go b/e2etest/newe2e_sas_generation.go index ee3ed6109..340c7d106 100644 --- a/e2etest/newe2e_sas_generation.go +++ b/e2etest/newe2e_sas_generation.go @@ -5,6 +5,7 @@ import ( datalakesas "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/sas" filesas "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/sas" "github.com/Azure/azure-storage-azcopy/v10/common" + "strings" "time" ) @@ -205,6 +206,8 @@ func (vals GenericAccountSignatureValues) AsBlob() BlobSignatureValues { func (vals GenericAccountSignatureValues) AsFile() FileSignatureValues { s := vals.withDefaults() + s.Permissions = strings.ReplaceAll(s.Permissions, "a", "") // remove 'a', because it's invalid and causes panics. + return filesas.AccountSignatureValues{ Version: s.Version, Protocol: filesas.Protocol(s.Protocol), diff --git a/e2etest/newe2e_scenario_variation_manager.go b/e2etest/newe2e_scenario_variation_manager.go index 6ef1e0772..a7d8d6f1d 100644 --- a/e2etest/newe2e_scenario_variation_manager.go +++ b/e2etest/newe2e_scenario_variation_manager.go @@ -89,13 +89,22 @@ func (svm *ScenarioVariationManager) DeleteCreatedResources() { // Assertions -func (svm *ScenarioVariationManager) NoError(comment string, err error) { +func (svm *ScenarioVariationManager) NoError(comment string, err error, failNow ...bool) { if svm.Dryrun() { return } svm.t.Helper() + failFast := FirstOrZero(failNow) - svm.AssertNow(comment, IsNil{}, err) + //svm.AssertNow(comment, IsNil{}, err) + if err != nil { + svm.t.Logf("Error was not nil (%s): %v", comment, err) + svm.isInvalid = true // Flip the failed flag + + if failFast { + svm.t.FailNow() + } + } } func (svm *ScenarioVariationManager) Assert(comment string, assertion Assertion, items ...any) { @@ -121,8 +130,14 @@ func (svm *ScenarioVariationManager) AssertNow(comment string, assertion Asserti } svm.t.Helper() - svm.Assert(comment, assertion, items...) - if svm.Failed() { + if !assertion.Assert(items...) { + if fa, ok := assertion.(FormattedAssertion); ok { + svm.t.Logf("Assertion %s failed: %s (%s)", fa.Name(), fa.Format(items...), comment) + } else { + svm.t.Logf("Assertion %s failed with items %v (%s)", assertion.Name(), items, comment) + } + + svm.isInvalid = true // We've now failed, so we flip the shared bad flag svm.t.FailNow() } } @@ -164,6 +179,14 @@ func (svm *ScenarioVariationManager) Failed() bool { return svm.isInvalid // This is actually technically safe during dryruns. } +func (svm *ScenarioVariationManager) HelperMarker() HelperMarker { + if svm.t != nil { + return svm.t + } + + return NilHelperMarker{} +} + // =========== Variation Handling ========== var variationExcludedCallers = map[string]bool{ @@ -300,6 +323,7 @@ func (svm *ScenarioVariationManager) Cleanup(cleanupFunc func(a ScenarioAsserter return } + //svm.CleanupFuncs = append(svm.CleanupFuncs, cleanupFunc) svm.t.Cleanup(func() { cleanupFunc(svm) }) diff --git a/e2etest/newe2e_task_resourcemanagement.go b/e2etest/newe2e_task_resourcemanagement.go index ffc79320e..02429704d 100644 --- a/e2etest/newe2e_task_resourcemanagement.go +++ b/e2etest/newe2e_task_resourcemanagement.go @@ -135,7 +135,7 @@ func ValidateResource[T ResourceManager](a Asserter, target T, definition Matche objDef := definition.(ResourceDefinitionObject) if !objDef.ShouldExist() { - a.AssertNow("object must not exist", Equal{}, objMan.Exists(), false) + a.Assert(fmt.Sprintf("object %s must not exist", objMan.ObjectName()), Equal{}, objMan.Exists(), false) return } diff --git a/e2etest/zt_basic_copy_sync_remove_test.go b/e2etest/zt_basic_copy_sync_remove_test.go index d8a57da0e..545296be9 100644 --- a/e2etest/zt_basic_copy_sync_remove_test.go +++ b/e2etest/zt_basic_copy_sync_remove_test.go @@ -846,7 +846,7 @@ func TestBasic_HashBasedSync_HashDir(t *testing.T) { a.Error(fmt.Sprintf("Could not create hash adapter: %s", err)) return } - a.Assert(hashAdapter.GetMode(), equals(), common.HashStorageMode(11)) // 1 is currently either XAttr or ADS; both are the intent of this test. + a.Assert(hashAdapter.GetMode(), equals(), common.EHashStorageMode.HiddenFiles()) hashData, err := hashAdapter.GetHashData("asdf.txt") if err != nil || hashData == nil { diff --git a/e2etest/zt_newe2e_sync_test.go b/e2etest/zt_newe2e_sync_test.go new file mode 100644 index 000000000..66529de10 --- /dev/null +++ b/e2etest/zt_newe2e_sync_test.go @@ -0,0 +1,172 @@ +package e2etest + +import ( + "encoding/base64" + "github.com/Azure/azure-storage-azcopy/v10/common" + "io/fs" + "os" + "path/filepath" + "runtime" + "time" +) + +type SyncTestSuite struct{} + +func init() { + suiteManager.RegisterSuite(&SyncTestSuite{}) +} + +func (s *SyncTestSuite) Scenario_TestSyncHashStorageModes(a *ScenarioVariationManager) { + // First, pick out our hash storage mode. + // Mode "11" is always XAttr or AlternateDataStreams. + hashStorageMode := ResolveVariation(a, []common.HashStorageMode{ + common.EHashStorageMode.HiddenFiles(), // OS-agnostic behavior + common.HashStorageMode(11), // XAttr (linux; if available), ADS (windows; if available) + }) + + customDirVariation := "UseCustomDir" + useCustomLocalHashDir := "NoCustomDir" + if hashStorageMode == common.EHashStorageMode.HiddenFiles() { // Custom hash dir is only available on HiddenFiles + a.InsertVariationSeparator("_") + useCustomLocalHashDir = ResolveVariation(a, []string{customDirVariation, "NoCustomDir"}) + } + + a.InsertVariationSeparator("|") + + // TODO: If you want to test XAttr support on Linux or Mac, remove me! ADO does not support XAttr! + if hashStorageMode == 11 && (runtime.GOOS != "windows") { + a.InvalidateScenario() + return + } + + // A local source is required to use any hash storage mode. + source := NewLocalContainer(a) + dupeBodyPath := "underfolder/donottransfer" // A directory is used to validate that the hidden files cache creates *all* subdirectories. + dupeBody := NewRandomObjectContentContainer(a, 512) + resourceSpec := ResourceDefinitionContainer{ + Objects: ObjectResourceMappingFlat{ + "newobject": ResourceDefinitionObject{Body: NewRandomObjectContentContainer(a, 512)}, + "shouldtransfer": ResourceDefinitionObject{Body: NewRandomObjectContentContainer(a, 512)}, + dupeBodyPath: ResourceDefinitionObject{Body: dupeBody}, // note: at this moment, this is *not* a great test, because we lack plan file validation. todo WI#26418256 + }, + } + CreateResource[ContainerResourceManager](a, source, resourceSpec) + + // We'll use Blob and Files as a target for the destination. + md5 := dupeBody.MD5() + dest := CreateResource[ContainerResourceManager](a, + GetRootResource(a, ResolveVariation(a, []common.Location{common.ELocation.Blob(), common.ELocation.File()})), + ResourceDefinitionContainer{ + Objects: ObjectResourceMappingFlat{ + // Object to overwrite + "shouldtransfer": ResourceDefinitionObject{Body: NewRandomObjectContentContainer(a, 512)}, + // Object to avoid overwriting + dupeBodyPath: ResourceDefinitionObject{Body: dupeBody, ObjectProperties: ObjectProperties{HTTPHeaders: contentHeaders{contentMD5: md5[:]}}}, + }, + }, + ) + + // Make local files overwritten at a much later date than storage to validate we're doing hash-based tx + if !a.Dryrun() { + err := filepath.WalkDir(source.URI(), func(path string, d fs.DirEntry, err error) error { + err = os.Chtimes(path, time.Time{}, time.Now().Add(time.Hour*24)) + return err + }) + + a.NoError("Tried to set times", err) + } + + var customDir *string + if useCustomLocalHashDir == customDirVariation { + f := NewLocalContainer(a) + customDir = pointerTo(f.URI()) + + if !a.Dryrun() { + a.Cleanup(func(a ScenarioAsserter) { + // Should be created by AzCopy, but, won't get tracked by the framework, because it's never actually created. + f.Delete(a) + }) + } + } + + RunAzCopy(a, AzCopyCommand{ + Verb: AzCopyVerbSync, + Targets: []ResourceManager{source, dest}, + Flags: SyncFlags{ + CopySyncCommonFlags: CopySyncCommonFlags{ + Recursive: pointerTo(true), + }, + CompareHash: PtrOf(common.ESyncHashType.MD5()), + LocalHashStorageMode: &hashStorageMode, + LocalHashDir: customDir, + }, + }) + + ValidateResource[ContainerResourceManager](a, dest, resourceSpec, true) + + // Finally, validate that we're actually storing the hash correctly. + // For this, we'll only validate the single hash we expected to conflict, because we already have the hash data for that. + if a.Dryrun() { + return // Don't do this if we're dryrunning, since we can't validate this at this time. + } + + if customDir != nil { + _, err := os.Stat(*customDir) + a.NoError("AzCopy must create the hash directory", err) + } + + adapter, err := common.NewHashDataAdapter(DerefOrZero(customDir), source.URI(), hashStorageMode) + a.NoError("create hash storage adapter", err) + a.Assert("create hash storage adapter with correct mode", Equal{}, adapter.GetMode(), hashStorageMode) + + data, err := adapter.GetHashData(dupeBodyPath) + a.NoError("Poll hash data", err) + a.Assert("Data must not be nil", Not{IsNil{}}, data) + a.Assert("Data must match target hash mode", Equal{}, data.Mode, common.ESyncHashType.MD5()) // for now, we only have MD5. In the future, CRC64 may be available. + + fi, err := os.Stat(filepath.Join(source.URI(), dupeBodyPath)) + a.NoError("Stat file at source", err) + a.Assert("LMTs must match between hash data and file", Equal{}, data.LMT.Equal(fi.ModTime()), true) + + a.Assert("hashes must match", Equal{}, data.Data, base64.StdEncoding.EncodeToString(md5[:])) +} + +func (s *SyncTestSuite) Scenario_TestSyncRemoveDestination(svm *ScenarioVariationManager) { + srcLoc := ResolveVariation(svm, []common.Location{common.ELocation.Local(), common.ELocation.Blob(), common.ELocation.File(), common.ELocation.BlobFS()}) + dstLoc := ResolveVariation(svm, []common.Location{common.ELocation.Local(), common.ELocation.Blob(), common.ELocation.File(), common.ELocation.BlobFS()}) + + if srcLoc == common.ELocation.Local() && srcLoc == dstLoc { + svm.InvalidateScenario() + return + } + + srcRes := CreateResource[ContainerResourceManager](svm, GetRootResource(svm, srcLoc, GetResourceOptions{ + PreferredAccount: common.Iff(srcLoc == common.ELocation.BlobFS(), pointerTo(PrimaryHNSAcct), nil), + }), ResourceDefinitionContainer{}) + dstRes := CreateResource[ContainerResourceManager](svm, GetRootResource(svm, dstLoc, GetResourceOptions{ + PreferredAccount: common.Iff(dstLoc == common.ELocation.BlobFS(), pointerTo(PrimaryHNSAcct), nil), + }), ResourceDefinitionContainer{ + Objects: ObjectResourceMappingFlat{ + "deleteme.txt": ResourceDefinitionObject{Body: NewRandomObjectContentContainer(svm, 512)}, + "also/deleteme.txt": ResourceDefinitionObject{Body: NewRandomObjectContentContainer(svm, 512)}, + }, + }) + + RunAzCopy(svm, AzCopyCommand{ + Verb: AzCopyVerbSync, + Targets: []ResourceManager{srcRes, dstRes}, + Flags: SyncFlags{ + CopySyncCommonFlags: CopySyncCommonFlags{ + Recursive: pointerTo(true), + }, + DeleteDestination: pointerTo(true), + }, + }) + + ValidateResource[ContainerResourceManager](svm, dstRes, ResourceDefinitionContainer{ + Objects: ObjectResourceMappingFlat{ + "deleteme.txt": ResourceDefinitionObject{ObjectShouldExist: pointerTo(false)}, + "also/deleteme.txt": ResourceDefinitionObject{ObjectShouldExist: pointerTo(false)}, + }, + }, false) +}