-
Notifications
You must be signed in to change notification settings - Fork 4
/
chunker_gpt.go
129 lines (107 loc) · 3.38 KB
/
chunker_gpt.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
package fsdup
import (
"io"
)
const (
gptSignatureMagic = "EFI PART"
gptSignatureOffset = 512
gptHeaderOffset = 512
gptHeaderLength = 512
gptLogicalSectorSize = 512
gptFirstEntrySectorOffset = 72
gptFirstEntrySectorLength = 8
gptEntryCountOffset = 80
gptEntryCountLength = 4
gptEntrySizeOffset = 84
gptEntrySizeLength = 4
gptEntryFirstSectorRelativeOffset = 32
gptEntryFirstSectorRelativeLength = 8
)
type gptDiskChunker struct {
reader io.ReaderAt
store ChunkStore
start int64
size int64
exact bool
noFile bool
minSize int64
chunkMaxSize int64
writeConcurrency int64
manifest *manifest
}
func NewGptDiskChunker(reader io.ReaderAt, store ChunkStore, offset int64, size int64, exact bool, noFile bool,
minSize int64, chunkMaxSize int64, writeConcurrency int64) *gptDiskChunker {
return &gptDiskChunker{
reader: reader,
store: store,
start: offset,
size: size,
exact: exact,
noFile: noFile,
minSize: minSize,
chunkMaxSize: chunkMaxSize,
writeConcurrency: writeConcurrency,
manifest: NewManifest(chunkMaxSize),
}
}
func (d *gptDiskChunker) Dedup() (*manifest, error) {
statusf("Detected GPT disk ...\n")
if err := d.dedupNtfsPartitions(); err != nil {
return nil, err
}
if err := d.dedupRest(); err != nil {
return nil, err
}
statusf("GPT disk fully indexed\n")
return d.manifest, nil
}
func (d *gptDiskChunker) dedupNtfsPartitions() error {
buffer := make([]byte, gptHeaderLength)
_, err := d.reader.ReadAt(buffer, d.start + gptHeaderOffset)
if err != nil {
return err
}
// Read basic information, then re-read buffer
firstEntrySector := parseUintLE(buffer, gptFirstEntrySectorOffset, gptFirstEntrySectorLength)
entryCount := parseUintLE(buffer, gptEntryCountOffset, gptEntryCountLength)
entrySize := parseUintLE(buffer, gptEntrySizeOffset, gptEntrySizeLength)
buffer = make([]byte, entryCount * entrySize)
_, err = d.reader.ReadAt(buffer, d.start + firstEntrySector * gptLogicalSectorSize)
if err != nil {
return err
}
// Walk the entries, index partitions if supported
for i := int64(0); i < entryCount; i++ {
entryOffset := i * entrySize
partitionFirstSector := parseUintLE(buffer, entryOffset+gptEntryFirstSectorRelativeOffset, gptEntryFirstSectorRelativeLength)
partitionOffset := d.start + partitionFirstSector*gptLogicalSectorSize
debugf("Reading GPT entry %d, partition begins at sector %d, offset %d\n",
i+1, partitionFirstSector, partitionOffset)
if partitionOffset == 0 {
continue
}
partitionType, err := probeType(d.reader, partitionOffset) // TODO fix global func call
if err != nil {
continue
}
if partitionType == typeNtfs {
debugf("NTFS partition found at offset %d\n", partitionOffset)
ntfs := NewNtfsChunker(d.reader, d.store, partitionOffset, d.exact, d.noFile, d.minSize, d.chunkMaxSize, d.writeConcurrency)
manifest, err := ntfs.Dedup()
if err != nil {
return err
}
d.manifest.MergeAtOffset(partitionOffset, manifest)
}
}
return nil
}
func (d *gptDiskChunker) dedupRest() error {
chunker := NewFixedChunkerWithSkip(d.reader, d.store, d.start, d.size, d.chunkMaxSize, d.writeConcurrency, d.manifest)
gapManifest, err := chunker.Dedup()
if err != nil {
return err
}
d.manifest.Merge(gapManifest)
return nil
}