Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
93 commits
Select commit Hold shift + click to select a range
af35d88
hardlink skip support for NFS
dphulkar-msft Jan 6, 2026
f4034aa
hardlink skip support for NFS
dphulkar-msft Jan 6, 2026
b2b40f9
hardlink skip support for NFS
dphulkar-msft Jan 7, 2026
b0b48f3
hardlink skip support for NFS
dphulkar-msft Jan 7, 2026
7aa9dcf
hardlink skip support for NFS
dphulkar-msft Jan 8, 2026
c10059a
hardlink skip support for NFS
dphulkar-msft Jan 8, 2026
3b2aec7
hardlink skip support for NFS
dphulkar-msft Jan 13, 2026
5ec8ad1
hardlink skip support for NFS
dphulkar-msft Jan 13, 2026
7e301cf
hardlink skip support for NFS
dphulkar-msft Jan 13, 2026
375d159
fixing test case
dphulkar-msft Jan 13, 2026
04499eb
fixing test case
dphulkar-msft Jan 14, 2026
0eabde3
fixing test case
dphulkar-msft Jan 14, 2026
d56d23d
fixing test case
dphulkar-msft Jan 14, 2026
4304cc0
fixing test case
dphulkar-msft Jan 14, 2026
a330784
Merge branch 'dphulkar/NFSHardlinkSkipSupport' of https://github.com/…
dphulkar-msft Jan 20, 2026
610bfa5
Hardlink preserve support for local to file NFS
dphulkar-msft Jan 21, 2026
1169ddf
hardlink preserve support for local to nfs
dphulkar-msft Jan 22, 2026
a303aee
hardlink preserve support for local to nfs
dphulkar-msft Jan 24, 2026
5e4a2d9
hardlink support for File NFS to local
dphulkar-msft Jan 26, 2026
047923e
Merge branch 'dphulkar/NFSOverRESTSupport' of https://github.com/Azur…
dphulkar-msft Jan 26, 2026
7436ae3
hardlink support for local to filenfs
dphulkar-msft Jan 28, 2026
9e7da8c
Merge branch 'dphulkar/NFSOverRESTSupport' of https://github.com/Azur…
dphulkar-msft Jan 28, 2026
1997557
hardlink support for local to nfs
dphulkar-msft Jan 28, 2026
bb4284b
Merge branch 'dphulkar/hardlinkDesignPOC' of https://github.com/Azure…
dphulkar-msft Jan 29, 2026
493f1c7
hardlink support for nfs to local
dphulkar-msft Feb 2, 2026
964747f
hardlink support for nfs to local
dphulkar-msft Feb 3, 2026
69bd8e8
hardlink support for nfs to local
dphulkar-msft Feb 3, 2026
314ecd5
hardlink support for nfs to nfs in progress
dphulkar-msft Feb 3, 2026
bbc83fc
Merge branch 'dphulkar/NFSOverRESTSupport' of https://github.com/Azur…
dphulkar-msft Feb 4, 2026
6b17d71
added E2E tests
dphulkar-msft Feb 5, 2026
fcc1826
added E2E tests
dphulkar-msft Feb 5, 2026
3cc85bc
incorporated review comments
dphulkar-msft Feb 9, 2026
c72dad8
Merge branch 'dphulkar/NFSOverRESTSupport' of https://github.com/Azur…
dphulkar-msft Feb 9, 2026
e04a654
incorporated review comments
dphulkar-msft Feb 9, 2026
94185c1
bug fix
dphulkar-msft Feb 10, 2026
8384620
nfs tests update
dphulkar-msft Feb 10, 2026
d0c9431
Merge branch 'dphulkar/NFSOverRESTSupport' of https://github.com/Azur…
dphulkar-msft Feb 10, 2026
7a967e8
hardlink sync support
dphulkar-msft Feb 17, 2026
751d1bf
hardlink sync support
dphulkar-msft Feb 19, 2026
d6d7159
panic fix
dphulkar-msft Feb 20, 2026
b44f27c
hardlink sync support for local to NFS
dphulkar-msft Feb 24, 2026
40810ce
hardlink sync support for local to NFS
dphulkar-msft Feb 24, 2026
3ed65af
hardlink sync support for download and S2S
dphulkar-msft Feb 24, 2026
36f98a4
added E2E tests for sync hardlink preserve scenarios
dphulkar-msft Feb 24, 2026
9a915ca
added E2E tests for sync hardlink preserve scenarios
dphulkar-msft Feb 24, 2026
704985a
inode architecture
dphulkar-msft Feb 24, 2026
76608b7
fixing test cases
dphulkar-msft Feb 25, 2026
319be96
fixing test cases
dphulkar-msft Feb 27, 2026
1066697
fixing code
dphulkar-msft Mar 2, 2026
d818301
fixed issue with taregt hardlink path
dphulkar-msft Mar 2, 2026
f44579f
fixed issue with taregt hardlink path
dphulkar-msft Mar 7, 2026
46a4afe
Merge branch 'dphulkar/NFSOverRESTSupport' of https://github.com/Azur…
dphulkar-msft Mar 7, 2026
46f9579
handled edge case scenarios for upload hardlink sync
dphulkar-msft Mar 10, 2026
8580d4d
handled edge case scenarios for upload hardlink sync
dphulkar-msft Mar 10, 2026
942cfa4
handled edge case scenarios for upload hardlink sync
dphulkar-msft Mar 10, 2026
7b3f3d2
fixed test cases
dphulkar-msft Mar 12, 2026
8ae5395
Merge branch 'dphulkar/hardlinkSync' of https://github.com/Azure/azur…
dphulkar-msft Mar 12, 2026
0693c5a
add hardlink preserve support for download and S2S
dphulkar-msft Mar 13, 2026
4988731
fixed test cases
dphulkar-msft Mar 13, 2026
5de8971
fixed test cases
dphulkar-msft Mar 13, 2026
8609fa9
fixed test cases
dphulkar-msft Mar 16, 2026
7904934
Potential fix for pull request finding
dphulkar-msft Mar 16, 2026
fda4f5f
Potential fix for pull request finding
dphulkar-msft Mar 16, 2026
0ffb9bb
Potential fix for pull request finding
dphulkar-msft Mar 16, 2026
2954484
fixed comments and added UTs
dphulkar-msft Mar 16, 2026
2818fd3
Merge branch 'dphulkar/hardlinkSync' of https://github.com/Azure/azur…
dphulkar-msft Mar 16, 2026
6865929
Potential fix for pull request finding
dphulkar-msft Mar 16, 2026
9e7d400
Potential fix for pull request finding
dphulkar-msft Mar 16, 2026
c51ddcd
Potential fix for pull request finding
dphulkar-msft Mar 16, 2026
c02e39b
Potential fix for pull request finding
dphulkar-msft Mar 16, 2026
7a232b5
Potential fix for pull request finding
dphulkar-msft Mar 16, 2026
cc7aeda
Potential fix for pull request finding
dphulkar-msft Mar 16, 2026
012d386
Potential fix for pull request finding
dphulkar-msft Mar 16, 2026
110f26e
fixed comments
dphulkar-msft Mar 16, 2026
56733f9
Merge branch 'dphulkar/hardlinkSync' of https://github.com/Azure/azur…
dphulkar-msft Mar 16, 2026
29ff83e
fixed TCs
dphulkar-msft Mar 16, 2026
f4f8ce5
Merge branch 'dphulkar/hardlinkSync' of https://github.com/Azure/azur…
dphulkar-msft Mar 16, 2026
e1b7566
resolved conflicts
dphulkar-msft Mar 16, 2026
359442d
Potential fix for pull request finding
dphulkar-msft Mar 16, 2026
a935c28
Potential fix for pull request finding
dphulkar-msft Mar 16, 2026
48198c3
fixed Tcs
dphulkar-msft Mar 17, 2026
48f8ed9
Merge branch 'dphulkar/hardlinkSyncDwldAndS2S' of https://github.com/…
dphulkar-msft Mar 17, 2026
b42e8a4
build fix
dphulkar-msft Mar 17, 2026
9408430
resolved review comments
dphulkar-msft Mar 24, 2026
bbe88b7
Merge branch 'dphulkar/hardlinkSync' of https://github.com/Azure/azur…
dphulkar-msft Mar 25, 2026
e92b9d0
Merge branch 'dphulkar/NFSOverRESTSupport' of https://github.com/Azur…
dphulkar-msft Mar 25, 2026
cbad3b1
Merge branch 'dphulkar/NFSOverRESTSupport' of https://github.com/Azur…
dphulkar-msft Mar 25, 2026
ab119d8
Merge branch 'dphulkar/hardlinkSync' of https://github.com/Azure/azur…
dphulkar-msft Mar 26, 2026
643aa0a
fixed map cocurrent access issue
dphulkar-msft Mar 26, 2026
fcb361e
Merge branch 'dphulkar/hardlinkSync' of https://github.com/Azure/azur…
dphulkar-msft Mar 27, 2026
f5b97ab
Merge branch 'dphulkar/NFSOverRESTSupport' of https://github.com/Azur…
dphulkar-msft Mar 30, 2026
9907bcd
fixed build issue
dphulkar-msft Mar 30, 2026
30fae77
fixed merge conflicts
dphulkar-msft Mar 30, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
277 changes: 265 additions & 12 deletions azcopy/syncComparator.go
Original file line number Diff line number Diff line change
Expand Up @@ -432,17 +432,38 @@ type syncSourceComparator struct {
// the processor responsible for scheduling copy transfers
copyTransferScheduler traverser.ObjectProcessor

// the processor responsible for deleting extra destination objects
destinationCleaner traverser.ObjectProcessor

// storing the destination objects
destinationIndex *traverser.ObjectIndexer

comparisonHashType common.SyncHashType

preferSMBTime bool
disableComparison bool
preferSMBTime bool
disableComparison bool
srcPendingHardlinkObjects traverser.ObjectIndexer

// dstPathToInode is a snapshot of the destination index built on the first call
// to ProcessIfNecessary (before any deletions). It maps each destination path →
// its inode ID and is used in ProcessPendingHardlinks to reason about whether
// the source anchor is still present in the destination group.
dstPathToInode map[string]string

inodeStore *common.InodeStore
}

func NewSyncSourceComparator(i *traverser.ObjectIndexer, copyScheduler traverser.ObjectProcessor, comparisonHashType common.SyncHashType, preferSMBTime, disableComparison bool) *syncSourceComparator {
return &syncSourceComparator{destinationIndex: i, copyTransferScheduler: copyScheduler, preferSMBTime: preferSMBTime, disableComparison: disableComparison, comparisonHashType: comparisonHashType}
func NewSyncSourceComparator(i *traverser.ObjectIndexer, copyScheduler, cleaner traverser.ObjectProcessor, comparisonHashType common.SyncHashType, preferSMBTime, disableComparison bool, inodeStore *common.InodeStore) *syncSourceComparator {
return &syncSourceComparator{
destinationIndex: i,
copyTransferScheduler: copyScheduler,
destinationCleaner: cleaner,
preferSMBTime: preferSMBTime,
disableComparison: disableComparison,
comparisonHashType: comparisonHashType,
srcPendingHardlinkObjects: traverser.ObjectIndexer{IndexMap: make(map[string]traverser.StoredObject)},
inodeStore: inodeStore,
}
}

// it will only transfer source items that are:
Expand All @@ -452,13 +473,25 @@ func NewSyncSourceComparator(i *traverser.ObjectIndexer, copyScheduler traverser
// note: we remove the StoredObject if it is present so that when we have finished
// the index will contain all objects which exist at the destination but were NOT seen at the source
func (f *syncSourceComparator) ProcessIfNecessary(sourceObject traverser.StoredObject) error {
relPath := sourceObject.RelativePath
// Lazy-init: snapshot the destination inode groups the first time we are
// called, before any deletions from ProcessIfNecessary remove entries.
if f.dstPathToInode == nil {
f.dstPathToInode = buildSrcPathToInode(f.destinationIndex.IndexMap)
}

relPath := sourceObject.RelativePath
if f.destinationIndex.IsDestinationCaseInsensitive {
relPath = strings.ToLower(relPath)
}
destinationObjectInMap, present := f.destinationIndex.IndexMap[relPath]

if sourceObject.EntityType == common.EEntityType.Hardlink() {
// Defer hardlinks — we need the complete picture of source inode groups
// before deciding whether any dest links need to be recreated.
f.srcPendingHardlinkObjects.IndexMap[relPath] = sourceObject
return nil
}

if present {
defer delete(f.destinationIndex.IndexMap, relPath)

Expand All @@ -468,20 +501,26 @@ func (f *syncSourceComparator) ProcessIfNecessary(sourceObject traverser.StoredO
return f.copyTransferScheduler(sourceObject)
}

// Entity-type mismatch: destination is a hardlink but source is a regular
// file/folder/symlink. Delete the stale link and re-upload as the new
// entity type.
if destinationObjectInMap.EntityType == common.EEntityType.Hardlink() {
syncComparatorLog(sourceObject.RelativePath, syncStatusOverwritten, syncEntityTypeMismatch, false)
_ = f.destinationCleaner(destinationObjectInMap)
return f.copyTransferScheduler(sourceObject)
}

if f.comparisonHashType != common.ESyncHashType.None() && sourceObject.EntityType == common.EEntityType.File() {
switch f.comparisonHashType {
case common.ESyncHashType.MD5():
if sourceObject.Md5 == nil {
if sourceObject.IsMoreRecentThan(destinationObjectInMap, f.preferSMBTime) {
syncComparatorLog(sourceObject.RelativePath, syncStatusOverwritten, syncOverwriteReasonNewerLMTAndMissingHash, false)
return f.copyTransferScheduler(sourceObject)
} else {
// skip if dest is more recent
syncComparatorLog(sourceObject.RelativePath, syncStatusSkipped, syncSkipReasonTimeAndMissingHash, false)
return nil
}
syncComparatorLog(sourceObject.RelativePath, syncStatusSkipped, syncSkipReasonTimeAndMissingHash, false)
return nil
}

if !reflect.DeepEqual(sourceObject.Md5, destinationObjectInMap.Md5) {
// hash inequality = source "newer" in this model.
syncComparatorLog(sourceObject.RelativePath, syncStatusOverwritten, syncOverwriteReasonNewerHash, false)
Expand All @@ -490,7 +529,6 @@ func (f *syncSourceComparator) ProcessIfNecessary(sourceObject traverser.StoredO
default:
panic("sanity check: unsupported hash type " + f.comparisonHashType.String())
}

syncComparatorLog(sourceObject.RelativePath, syncStatusSkipped, syncSkipReasonSameHash, false)
return nil
} else if sourceObject.IsMoreRecentThan(destinationObjectInMap, f.preferSMBTime) {
Expand All @@ -504,6 +542,221 @@ func (f *syncSourceComparator) ProcessIfNecessary(sourceObject traverser.StoredO
return nil
}

// if source does not exist at the destination, then schedule it for transfer
// Source path does not exist at destination schedule transfer.
return f.copyTransferScheduler(sourceObject)
}

func (f *syncSourceComparator) ProcessPendingHardlinks() error {

// Build two flat lookup tables to detect structural mismatches between
// source and destination inode groups (merge / split detection).
//
// srcInodeIsMultiGroup: src inode → true when its members span >1 dest inode
// (group merge: multiple dest groups must be unified)
// destGroupIsMultiSource: dest inode → true when its members map to >1 src inode
// (group split: one dest group must be broken apart)
srcInodeFirstDest := make(map[string]string)
srcInodeIsMultiGroup := make(map[string]bool)
destInodeFirstSrc := make(map[string]string)
destGroupIsMultiSource := make(map[string]bool)

for _, obj := range f.srcPendingHardlinkObjects.IndexMap {
if obj.Inode == "" {
continue
}
lookupPath := obj.RelativePath
if f.destinationIndex.IsDestinationCaseInsensitive {
lookupPath = strings.ToLower(lookupPath)
}
destInode := f.dstPathToInode[lookupPath]
if destInode == "" {
continue // not present in destination; will be transferred below
}
if first, seen := srcInodeFirstDest[obj.Inode]; !seen {
srcInodeFirstDest[obj.Inode] = destInode
} else if first != destInode {
srcInodeIsMultiGroup[obj.Inode] = true
}
if first, seen := destInodeFirstSrc[destInode]; !seen {
destInodeFirstSrc[destInode] = obj.Inode
} else if first != obj.Inode {
destGroupIsMultiSource[destInode] = true
}
}

for _, sourceObject := range f.srcPendingHardlinkObjects.IndexMap {

dstKey := sourceObject.RelativePath
destinationObjectInMap, present := f.destinationIndex.IndexMap[dstKey]
if !present && f.destinationIndex.IsDestinationCaseInsensitive {
dstKey = strings.ToLower(sourceObject.RelativePath)
destinationObjectInMap, present = f.destinationIndex.IndexMap[dstKey]
}

if !present {
// Path does not exist at destination — transfer as new.
if err := f.copyTransferScheduler(sourceObject); err != nil {
return err
}
continue
}

// Remove from destination index so indexer.Traverse won't re-process it.
delete(f.destinationIndex.IndexMap, dstKey)

// Entity-type mismatch: dest is a plain file/folder/symlink but source is a
// hardlink. Mirror the same logic syncDestinationComparator.ProcessIfNecessary
// applies when src is a Hardlink and dest is a File: delete the stale object
// at the destination and re-download as a hardlink.
if destinationObjectInMap.EntityType != common.EEntityType.Hardlink() {
syncComparatorLog(sourceObject.RelativePath, syncStatusOverwritten, syncEntityTypeMismatch, false)
_ = f.destinationCleaner(destinationObjectInMap)
if err := f.copyTransferScheduler(sourceObject); err != nil {
return err
}
continue
}

if f.inodeStore == nil {
return fmt.Errorf("inodeStore is nil while processing pending hardlinks")
}

var srcAnchorFile string
if sourceObject.Inode != "" {
var err error
srcAnchorFile, err = f.inodeStore.GetAnchor(sourceObject.Inode)
if err != nil {
return err
}
}
// GetAnchor returns "" when the dest object is not in the InodeStore
// (e.g. it is a regular file), which naturally triggers the entity-type
// mismatch path below.
var dstAnchorFile string
if destinationObjectInMap.Inode != "" {
var err error
dstAnchorFile, err = f.inodeStore.GetAnchor(destinationObjectInMap.Inode)
if err != nil {
return err
}
}

// groupIntact: the src inode group maps 1:1 onto a single dest inode group.
groupIntact := !srcInodeIsMultiGroup[sourceObject.Inode] &&
!destGroupIsMultiSource[destinationObjectInMap.Inode]

// srcAnchorInDst: the dest inode of the source anchor, or "" if the source
// anchor does not exist at the destination.
srcAnchorInDst := f.dstPathToInode[srcAnchorFile]
anchorChanged := srcAnchorFile != dstAnchorFile

// Entity-type change: source became a regular file. Delete the stale link
// and re-upload.
if srcAnchorFile == "" {
_ = f.destinationCleaner(destinationObjectInMap)
if err := f.copyTransferScheduler(sourceObject); err != nil {
return err
}
continue
}

// needsRecreate: the hardlink target must change at the destination.
// True only when the anchor change is substantive:
// (a) the source anchor exists in dest but in a different dest inode group
// (real retarget), OR
// (b)/(c) the group is merging or splitting (!groupIntact).
needsRecreate := anchorChanged &&
((srcAnchorInDst != "" && srcAnchorInDst != destinationObjectInMap.Inode) || !groupIntact)

if needsRecreate {
syncComparatorLog(sourceObject.RelativePath, syncStatusOverwritten, syncHardlinkTargetMismatch, false)
_ = f.destinationCleaner(destinationObjectInMap)
if err := f.copyTransferScheduler(sourceObject); err != nil {
return err
}
continue
}

// Structure is intact. Non-anchor files carry no independent content;
// only the anchor needs a content check.
//
// Use the InodeStore lex-smallest anchor (srcAnchorFile) rather than the
// firstSeen-anchor flag (TargetHardlinkFile=="") to identify the anchor.
// NFS directory listings are NOT guaranteed alphabetical, so the firstSeen
// anchor may differ from the lex anchor. When firstSeen≠lex, the firstSeen
// anchor can hit the needsRecreate path above, while the true lex anchor
// has TargetHardlinkFile!="" and would be incorrectly skipped.
if srcAnchorFile != sourceObject.RelativePath {
syncComparatorLog(sourceObject.RelativePath, syncStatusSkipped, syncSkipReasonHardlinkRelationshipIntact, false)
continue
}

// Anchor content check.
//
// When the group is restructuring (!groupIntact), force-transfer the anchor
// so all relinked files at the destination carry the correct data.
if !groupIntact || f.disableComparison {
reason := syncOverwriteReasonGroupStructureChanged
if f.disableComparison {
reason = syncOverwriteReasonNewerHash
}
syncComparatorLog(sourceObject.RelativePath, syncStatusOverwritten, reason, false)
if err := f.copyTransferScheduler(sourceObject); err != nil {
return err
}
continue
}

// Hash comparison (when available).
if f.comparisonHashType != common.ESyncHashType.None() {
switch f.comparisonHashType {
case common.ESyncHashType.MD5():
if sourceObject.Md5 == nil {
if sourceObject.IsMoreRecentThan(destinationObjectInMap, f.preferSMBTime) {
syncComparatorLog(sourceObject.RelativePath, syncStatusOverwritten, syncOverwriteReasonNewerLMTAndMissingHash, false)
if err := f.copyTransferScheduler(sourceObject); err != nil {
return err
}
} else {
syncComparatorLog(sourceObject.RelativePath, syncStatusSkipped, syncSkipReasonTimeAndMissingHash, false)
}
} else if !reflect.DeepEqual(sourceObject.Md5, destinationObjectInMap.Md5) {
syncComparatorLog(sourceObject.RelativePath, syncStatusOverwritten, syncOverwriteReasonNewerHash, false)
if err := f.copyTransferScheduler(sourceObject); err != nil {
return err
}
} else {
syncComparatorLog(sourceObject.RelativePath, syncStatusSkipped, syncSkipReasonSameHash, false)
}
default:
panic("sanity check: unsupported hash type " + f.comparisonHashType.String())
}
continue
}

// Size mismatch: reliable hash-free content signal.
if sourceObject.Size != destinationObjectInMap.Size {
syncComparatorLog(sourceObject.RelativePath, syncStatusOverwritten, syncOverwriteReasonSizeMismatch, false)
if err := f.copyTransferScheduler(sourceObject); err != nil {
return err
}
continue
}

// LMT check — skipped for nominal anchor renames to avoid spurious transfers
// caused by FILETIME precision loss (dest anchor NFS write-time was set from
// the OLD anchor's mtime, not the new anchor's).
anchorNominallyChanged := anchorChanged // needsRecreate=false is implied here
if !anchorNominallyChanged && sourceObject.IsMoreRecentThan(destinationObjectInMap, f.preferSMBTime) {
syncComparatorLog(sourceObject.RelativePath, syncStatusOverwritten, syncOverwriteReasonNewerLMT, false)
if err := f.copyTransferScheduler(sourceObject); err != nil {
return err
}
continue
}

// Content matches (or nominal anchor rename with matching size/hash): skip.
syncComparatorLog(sourceObject.RelativePath, syncStatusSkipped, syncSkipReasonHardlinkRelationshipIntact, false)
}
return nil
}
9 changes: 8 additions & 1 deletion azcopy/syncEnumerator.go
Original file line number Diff line number Diff line change
Expand Up @@ -293,9 +293,16 @@ func (s *syncer) initEnumerator(ctx context.Context, logLevel common.LogLevel, m
indexer.IsDestinationCaseInsensitive = isDestinationCaseInsensitive(s.opts.fromTo)
// in all other cases (download and S2S), the destination is scanned/indexed first
// then the source is scanned and filtered based on what the destination contains
comparator = NewSyncSourceComparator(indexer, transferScheduler.ScheduleSyncRemoveSetPropertiesTransfer, s.opts.compareHash, s.opts.preserveInfo, s.opts.mirrorMode).ProcessIfNecessary
comparatorInstance := NewSyncSourceComparator(indexer, transferScheduler.ScheduleSyncRemoveSetPropertiesTransfer, deleteScheduler, s.opts.compareHash, s.opts.preserveInfo, s.opts.mirrorMode, s.inodeStore)
comparator = comparatorInstance.ProcessIfNecessary

finalize = func() error {

err = comparatorInstance.ProcessPendingHardlinks()
if err != nil {
return err
}

err = indexer.Traverse(deleteScheduler, nil)
if err != nil {
return err
Expand Down
2 changes: 1 addition & 1 deletion azcopy/syncProcessor.go
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,7 @@ func (l localFileDeleter) Delete(rootPath string, _ common.Location, object trav
objectURI := l.getObjectURL(object)
l.folderManager.RecordChildExists(objectURI)

if object.EntityType == common.EEntityType.File() {
if object.EntityType == common.EEntityType.File() || object.EntityType == common.EEntityType.Hardlink() {
msg := "Deleting extra file: " + object.RelativePath
common.GetLifecycleMgr().Info(msg)
if common.AzcopyScanningLogger != nil {
Expand Down
6 changes: 4 additions & 2 deletions cmd/zt_sync_file_file_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,12 +33,13 @@ import (
func TestSyncSourceComparator(t *testing.T) {
a := assert.New(t)
dummyCopyScheduler := dummyProcessor{}
dummyCleaner := dummyProcessor{}
srcMD5 := []byte{'s'}
destMD5 := []byte{'d'}

// set up the indexer as well as the source comparator
indexer := traverser.NewObjectIndexer()
sourceComparator := azcopy.NewSyncSourceComparator(indexer, dummyCopyScheduler.process, common.ESyncHashType.None(), false, false)
sourceComparator := azcopy.NewSyncSourceComparator(indexer, dummyCopyScheduler.process, dummyCleaner.process, common.ESyncHashType.None(), false, false, nil)

// create a sample destination object
sampleDestinationObject := traverser.StoredObject{Name: "test", RelativePath: "/usr/test", LastModifiedTime: time.Now(), Md5: destMD5}
Expand Down Expand Up @@ -87,12 +88,13 @@ func TestSyncSourceComparator(t *testing.T) {
func TestSyncSrcCompDisableComparator(t *testing.T) {
a := assert.New(t)
dummyCopyScheduler := dummyProcessor{}
dummyCleaner := dummyProcessor{}
srcMD5 := []byte{'s'}
destMD5 := []byte{'d'}

// set up the indexer as well as the source comparator
indexer := traverser.NewObjectIndexer()
sourceComparator := azcopy.NewSyncSourceComparator(indexer, dummyCopyScheduler.process, common.ESyncHashType.None(), false, true)
sourceComparator := azcopy.NewSyncSourceComparator(indexer, dummyCopyScheduler.process, dummyCleaner.process, common.ESyncHashType.None(), false, true, nil)

// test the comparator in case a given source object is not present at the destination
// meaning no entry in the index, so the comparator should pass the given object to schedule a transfer
Expand Down
Loading
Loading