mirror of https://github.com/minio/minio.git
Browse Source
contoller: Implement controlled healing and trigger (#2381)
contoller: Implement controlled healing and trigger (#2381)
This patch introduces new command line 'control' - minio control TO manage minio server connecting through GoRPC API frontend. - minio control heal Is implemented for healing objects.pull/2480/head

committed by
Harshavardhana

16 changed files with 846 additions and 27 deletions
-
134control-main.go
-
84erasure-healfile.go
-
123erasure-healfile_test.go
-
5fs-v1-multipart.go
-
15fs-v1.go
-
1main.go
-
7object-errors.go
-
2object-interface.go
-
6routers.go
-
85rpc-control.go
-
1tree-walk-pool.go
-
66xl-v1-healing.go
-
206xl-v1-list-objects-heal.go
-
5xl-v1-list-objects.go
-
5xl-v1-multipart.go
-
128xl-v1-object.go
@ -0,0 +1,134 @@ |
|||
/* |
|||
* Minio Cloud Storage, (C) 2016 Minio, Inc. |
|||
* |
|||
* Licensed under the Apache License, Version 2.0 (the "License"); |
|||
* you may not use this file except in compliance with the License. |
|||
* You may obtain a copy of the License at |
|||
* |
|||
* http://www.apache.org/licenses/LICENSE-2.0
|
|||
* |
|||
* Unless required by applicable law or agreed to in writing, software |
|||
* distributed under the License is distributed on an "AS IS" BASIS, |
|||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|||
* See the License for the specific language governing permissions and |
|||
* limitations under the License. |
|||
*/ |
|||
|
|||
package main |
|||
|
|||
import ( |
|||
"fmt" |
|||
"strings" |
|||
|
|||
"net/rpc" |
|||
"net/url" |
|||
|
|||
"github.com/minio/cli" |
|||
) |
|||
|
|||
// "minio control" command.
|
|||
var controlCmd = cli.Command{ |
|||
Name: "control", |
|||
Usage: "Control and manage minio server.", |
|||
Action: mainControl, |
|||
Subcommands: []cli.Command{ |
|||
healCmd, |
|||
}, |
|||
} |
|||
|
|||
func mainControl(c *cli.Context) { |
|||
cli.ShowCommandHelp(c, "") |
|||
} |
|||
|
|||
var healCmd = cli.Command{ |
|||
Name: "heal", |
|||
Usage: "To heal objects.", |
|||
Action: healControl, |
|||
CustomHelpTemplate: `NAME: |
|||
minio {{.Name}} - {{.Usage}} |
|||
|
|||
USAGE: |
|||
minio {{.Name}} heal |
|||
|
|||
EAMPLES: |
|||
1. Heal an object. |
|||
$ minio control heal http://localhost:9000/songs/classical/western/piano.mp3
|
|||
|
|||
2. Heal all objects in a bucket recursively. |
|||
$ minio control heal http://localhost:9000/songs
|
|||
|
|||
3. Heall all objects with a given prefix recursively. |
|||
$ minio control heal http://localhost:9000/songs/classical/
|
|||
`, |
|||
} |
|||
|
|||
// "minio control heal" entry point.
|
|||
func healControl(c *cli.Context) { |
|||
// Parse bucket and object from url.URL.Path
|
|||
parseBucketObject := func(path string) (bucketName string, objectName string) { |
|||
splits := strings.SplitN(path, string(slashSeparator), 3) |
|||
switch len(splits) { |
|||
case 0, 1: |
|||
bucketName = "" |
|||
objectName = "" |
|||
case 2: |
|||
bucketName = splits[1] |
|||
objectName = "" |
|||
case 3: |
|||
bucketName = splits[1] |
|||
objectName = splits[2] |
|||
|
|||
} |
|||
return bucketName, objectName |
|||
} |
|||
|
|||
if len(c.Args()) != 1 { |
|||
cli.ShowCommandHelpAndExit(c, "heal", 1) |
|||
} |
|||
|
|||
parsedURL, err := url.ParseRequestURI(c.Args()[0]) |
|||
fatalIf(err, "Unable to parse URL") |
|||
|
|||
bucketName, objectName := parseBucketObject(parsedURL.Path) |
|||
if bucketName == "" { |
|||
cli.ShowCommandHelpAndExit(c, "heal", 1) |
|||
} |
|||
|
|||
client, err := rpc.DialHTTPPath("tcp", parsedURL.Host, healPath) |
|||
fatalIf(err, "Unable to connect to %s", parsedURL.Host) |
|||
|
|||
// If object does not have trailing "/" then it's an object, hence heal it.
|
|||
if objectName != "" && !strings.HasSuffix(objectName, slashSeparator) { |
|||
fmt.Printf("Healing : /%s/%s", bucketName, objectName) |
|||
args := &HealObjectArgs{bucketName, objectName} |
|||
reply := &HealObjectReply{} |
|||
err = client.Call("Heal.HealObject", args, reply) |
|||
fatalIf(err, "RPC Heal.HealObject call failed") |
|||
fmt.Println() |
|||
return |
|||
} |
|||
|
|||
// Recursively list and heal the objects.
|
|||
prefix := objectName |
|||
marker := "" |
|||
for { |
|||
args := HealListArgs{bucketName, prefix, marker, "", 1000} |
|||
reply := &HealListReply{} |
|||
err = client.Call("Heal.ListObjects", args, reply) |
|||
fatalIf(err, "RPC Heal.ListObjects call failed") |
|||
|
|||
// Heal the objects returned in the ListObjects reply.
|
|||
for _, obj := range reply.Objects { |
|||
fmt.Printf("Healing : /%s/%s", bucketName, obj) |
|||
reply := &HealObjectReply{} |
|||
err = client.Call("Heal.HealObject", HealObjectArgs{bucketName, obj}, reply) |
|||
fatalIf(err, "RPC Heal.HealObject call failed") |
|||
fmt.Println() |
|||
} |
|||
if !reply.IsTruncated { |
|||
// End of listing.
|
|||
break |
|||
} |
|||
marker = reply.NextMarker |
|||
} |
|||
} |
@ -0,0 +1,84 @@ |
|||
/* |
|||
* Minio Cloud Storage, (C) 2016 Minio, Inc. |
|||
* |
|||
* Licensed under the Apache License, Version 2.0 (the "License"); |
|||
* you may not use this file except in compliance with the License. |
|||
* You may obtain a copy of the License at |
|||
* |
|||
* http://www.apache.org/licenses/LICENSE-2.0
|
|||
* |
|||
* Unless required by applicable law or agreed to in writing, software |
|||
* distributed under the License is distributed on an "AS IS" BASIS, |
|||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|||
* See the License for the specific language governing permissions and |
|||
* limitations under the License. |
|||
*/ |
|||
|
|||
package main |
|||
|
|||
import "encoding/hex" |
|||
|
|||
// Heals the erasure coded file. reedsolomon.Reconstruct() is used to reconstruct the missing parts.
|
|||
func erasureHealFile(latestDisks []StorageAPI, outDatedDisks []StorageAPI, volume, path, healBucket, healPath string, size int64, blockSize int64, dataBlocks int, parityBlocks int, algo string) (checkSums []string, err error) { |
|||
var offset int64 |
|||
remainingSize := size |
|||
|
|||
// Hash for bitrot protection.
|
|||
hashWriters := newHashWriters(len(outDatedDisks), bitRotAlgo) |
|||
|
|||
for remainingSize > 0 { |
|||
curBlockSize := blockSize |
|||
if remainingSize < curBlockSize { |
|||
curBlockSize = remainingSize |
|||
} |
|||
|
|||
// Calculate the block size that needs to be read from each disk.
|
|||
curEncBlockSize := getChunkSize(curBlockSize, dataBlocks) |
|||
|
|||
// Memory for reading data from disks and reconstructing missing data using erasure coding.
|
|||
enBlocks := make([][]byte, len(latestDisks)) |
|||
|
|||
// Read data from the latest disks.
|
|||
// FIXME: no need to read from all the disks. dataBlocks+1 is enough.
|
|||
for index, disk := range latestDisks { |
|||
if disk == nil { |
|||
continue |
|||
} |
|||
enBlocks[index] = make([]byte, curEncBlockSize) |
|||
_, err := disk.ReadFile(volume, path, offset, enBlocks[index]) |
|||
if err != nil { |
|||
enBlocks[index] = nil |
|||
} |
|||
} |
|||
|
|||
// Reconstruct missing data.
|
|||
err := decodeData(enBlocks, dataBlocks, parityBlocks) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
// Write to the healPath file.
|
|||
for index, disk := range outDatedDisks { |
|||
if disk == nil { |
|||
continue |
|||
} |
|||
err := disk.AppendFile(healBucket, healPath, enBlocks[index]) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
hashWriters[index].Write(enBlocks[index]) |
|||
} |
|||
remainingSize -= curBlockSize |
|||
offset += curEncBlockSize |
|||
} |
|||
|
|||
// Checksums for the bit rot.
|
|||
checkSums = make([]string, len(outDatedDisks)) |
|||
for index, disk := range outDatedDisks { |
|||
if disk == nil { |
|||
continue |
|||
} |
|||
checkSums[index] = hex.EncodeToString(hashWriters[index].Sum(nil)) |
|||
} |
|||
return checkSums, nil |
|||
} |
@ -0,0 +1,123 @@ |
|||
/* |
|||
* Minio Cloud Storage, (C) 2016 Minio, Inc. |
|||
* |
|||
* Licensed under the Apache License, Version 2.0 (the "License"); |
|||
* you may not use this file except in compliance with the License. |
|||
* You may obtain a copy of the License at |
|||
* |
|||
* http://www.apache.org/licenses/LICENSE-2.0
|
|||
* |
|||
* Unless required by applicable law or agreed to in writing, software |
|||
* distributed under the License is distributed on an "AS IS" BASIS, |
|||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|||
* See the License for the specific language governing permissions and |
|||
* limitations under the License. |
|||
*/ |
|||
|
|||
package main |
|||
|
|||
import ( |
|||
"bytes" |
|||
"crypto/rand" |
|||
"os" |
|||
"path" |
|||
"testing" |
|||
) |
|||
|
|||
// Test erasureHealFile()
|
|||
func TestErasureHealFile(t *testing.T) { |
|||
// Initialize environment needed for the test.
|
|||
dataBlocks := 7 |
|||
parityBlocks := 7 |
|||
blockSize := int64(blockSizeV1) |
|||
setup, err := newErasureTestSetup(dataBlocks, parityBlocks, blockSize) |
|||
if err != nil { |
|||
t.Error(err) |
|||
return |
|||
} |
|||
defer setup.Remove() |
|||
|
|||
disks := setup.disks |
|||
|
|||
// Prepare a slice of 1MB with random data.
|
|||
data := make([]byte, 1*1024*1024) |
|||
_, err = rand.Read(data) |
|||
if err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
// Create a test file.
|
|||
size, checkSums, err := erasureCreateFile(disks, "testbucket", "testobject1", bytes.NewReader(data), blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1) |
|||
if err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
if size != int64(len(data)) { |
|||
t.Errorf("erasureCreateFile returned %d, expected %d", size, len(data)) |
|||
} |
|||
|
|||
latest := make([]StorageAPI, len(disks)) // Slice of latest disks
|
|||
outDated := make([]StorageAPI, len(disks)) // Slice of outdated disks
|
|||
|
|||
// Test case when one part needs to be healed.
|
|||
dataPath := path.Join(setup.diskPaths[0], "testbucket", "testobject1") |
|||
err = os.Remove(dataPath) |
|||
if err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
copy(latest, disks) |
|||
latest[0] = nil |
|||
outDated[0] = disks[0] |
|||
healCheckSums, err := erasureHealFile(latest, outDated, "testbucket", "testobject1", "testbucket", "testobject1", 1*1024*1024, blockSize, dataBlocks, parityBlocks, bitRotAlgo) |
|||
// Checksum of the healed file should match.
|
|||
if checkSums[0] != healCheckSums[0] { |
|||
t.Error("Healing failed, data does not match.") |
|||
} |
|||
|
|||
// Test case when parityBlocks number of disks need to be healed.
|
|||
// Should succeed.
|
|||
copy(latest, disks) |
|||
for index := 0; index < parityBlocks; index++ { |
|||
dataPath := path.Join(setup.diskPaths[index], "testbucket", "testobject1") |
|||
err = os.Remove(dataPath) |
|||
if err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
|
|||
latest[index] = nil |
|||
outDated[index] = disks[index] |
|||
} |
|||
|
|||
healCheckSums, err = erasureHealFile(latest, outDated, "testbucket", "testobject1", "testbucket", "testobject1", 1*1024*1024, blockSize, dataBlocks, parityBlocks, bitRotAlgo) |
|||
if err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
|
|||
// Checksums of the healed files should match.
|
|||
for index := 0; index < parityBlocks; index++ { |
|||
if checkSums[index] != healCheckSums[index] { |
|||
t.Error("Healing failed, data does not match.") |
|||
} |
|||
} |
|||
for index := dataBlocks; index < len(disks); index++ { |
|||
if healCheckSums[index] != "" { |
|||
t.Errorf("expected healCheckSums[%d] to be empty", index) |
|||
} |
|||
} |
|||
|
|||
// Test case when parityBlocks+1 number of disks need to be healed.
|
|||
// Should fail.
|
|||
copy(latest, disks) |
|||
for index := 0; index < parityBlocks+1; index++ { |
|||
dataPath := path.Join(setup.diskPaths[index], "testbucket", "testobject1") |
|||
err = os.Remove(dataPath) |
|||
if err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
|
|||
latest[index] = nil |
|||
outDated[index] = disks[index] |
|||
} |
|||
healCheckSums, err = erasureHealFile(latest, outDated, "testbucket", "testobject1", "testbucket", "testobject1", 1*1024*1024, blockSize, dataBlocks, parityBlocks, bitRotAlgo) |
|||
if err == nil { |
|||
t.Error("Expected erasureHealFile() to fail when the number of available disks <= parityBlocks") |
|||
} |
|||
} |
@ -0,0 +1,85 @@ |
|||
/* |
|||
* Minio Cloud Storage, (C) 2016 Minio, Inc. |
|||
* |
|||
* Licensed under the Apache License, Version 2.0 (the "License"); |
|||
* you may not use this file except in compliance with the License. |
|||
* You may obtain a copy of the License at |
|||
* |
|||
* http://www.apache.org/licenses/LICENSE-2.0
|
|||
* |
|||
* Unless required by applicable law or agreed to in writing, software |
|||
* distributed under the License is distributed on an "AS IS" BASIS, |
|||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|||
* See the License for the specific language governing permissions and |
|||
* limitations under the License. |
|||
*/ |
|||
|
|||
package main |
|||
|
|||
import ( |
|||
"net/rpc" |
|||
|
|||
router "github.com/gorilla/mux" |
|||
) |
|||
|
|||
// Routes paths for "minio control" commands.
|
|||
const ( |
|||
controlRPCPath = reservedBucket + "/control" |
|||
healPath = controlRPCPath + "/heal" |
|||
) |
|||
|
|||
// Register control RPC handlers.
|
|||
func registerControlRPCRouter(mux *router.Router, objAPI ObjectLayer) { |
|||
healRPCServer := rpc.NewServer() |
|||
healRPCServer.RegisterName("Heal", &healHandler{objAPI}) |
|||
mux.Path(healPath).Handler(healRPCServer) |
|||
} |
|||
|
|||
// Handler for object healing.
|
|||
type healHandler struct { |
|||
ObjectAPI ObjectLayer |
|||
} |
|||
|
|||
// HealListArgs - argument for ListObjects RPC.
|
|||
type HealListArgs struct { |
|||
Bucket string |
|||
Prefix string |
|||
Marker string |
|||
Delimiter string |
|||
MaxKeys int |
|||
} |
|||
|
|||
// HealListReply - reply by ListObjects RPC.
|
|||
type HealListReply struct { |
|||
IsTruncated bool |
|||
NextMarker string |
|||
Objects []string |
|||
} |
|||
|
|||
// ListObjects - list objects.
|
|||
func (h healHandler) ListObjects(arg *HealListArgs, reply *HealListReply) error { |
|||
info, err := h.ObjectAPI.ListObjectsHeal(arg.Bucket, arg.Prefix, arg.Marker, arg.Delimiter, arg.MaxKeys) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
reply.IsTruncated = info.IsTruncated |
|||
reply.NextMarker = info.NextMarker |
|||
for _, obj := range info.Objects { |
|||
reply.Objects = append(reply.Objects, obj.Name) |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
// HealObjectArgs - argument for HealObject RPC.
|
|||
type HealObjectArgs struct { |
|||
Bucket string |
|||
Object string |
|||
} |
|||
|
|||
// HealObjectReply - reply by HealObject RPC.
|
|||
type HealObjectReply struct{} |
|||
|
|||
// HealObject - heal the object.
|
|||
func (h healHandler) HealObject(arg *HealObjectArgs, reply *HealObjectReply) error { |
|||
return h.ObjectAPI.HealObject(arg.Bucket, arg.Object) |
|||
} |
@ -0,0 +1,206 @@ |
|||
/* |
|||
* Minio Cloud Storage, (C) 2016 Minio, Inc. |
|||
* |
|||
* Licensed under the Apache License, Version 2.0 (the "License"); |
|||
* you may not use this file except in compliance with the License. |
|||
* You may obtain a copy of the License at |
|||
* |
|||
* http://www.apache.org/licenses/LICENSE-2.0
|
|||
* |
|||
* Unless required by applicable law or agreed to in writing, software |
|||
* distributed under the License is distributed on an "AS IS" BASIS, |
|||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|||
* See the License for the specific language governing permissions and |
|||
* limitations under the License. |
|||
*/ |
|||
|
|||
package main |
|||
|
|||
import ( |
|||
"path" |
|||
"sort" |
|||
"strings" |
|||
) |
|||
|
|||
func listDirHealFactory(disks ...StorageAPI) listDirFunc { |
|||
// Returns sorted merged entries from all the disks.
|
|||
listDir := func(bucket, prefixDir, prefixEntry string) (mergedentries []string, delayIsLeaf bool, err error) { |
|||
for _, disk := range disks { |
|||
var entries []string |
|||
var newEntries []string |
|||
entries, err = disk.ListDir(bucket, prefixDir) |
|||
if err != nil { |
|||
// Skip the disk of listDir returns error.
|
|||
continue |
|||
} |
|||
|
|||
for i, entry := range entries { |
|||
if strings.HasSuffix(entry, slashSeparator) { |
|||
if _, err = disk.StatFile(bucket, path.Join(prefixDir, entry, xlMetaJSONFile)); err == nil { |
|||
// If it is an object trim the trailing "/"
|
|||
entries[i] = strings.TrimSuffix(entry, slashSeparator) |
|||
} |
|||
} |
|||
} |
|||
|
|||
if len(mergedentries) == 0 { |
|||
// For the first successful disk.ListDir()
|
|||
mergedentries = entries |
|||
sort.Strings(mergedentries) |
|||
continue |
|||
} |
|||
|
|||
// find elements in entries which are not in mergedentries
|
|||
for _, entry := range entries { |
|||
idx := sort.SearchStrings(mergedentries, entry) |
|||
if mergedentries[idx] == entry { |
|||
continue |
|||
} |
|||
newEntries = append(newEntries, entry) |
|||
} |
|||
|
|||
if len(newEntries) > 0 { |
|||
// Merge the entries and sort it.
|
|||
mergedentries = append(mergedentries, newEntries...) |
|||
sort.Strings(mergedentries) |
|||
} |
|||
} |
|||
return mergedentries, false, nil |
|||
} |
|||
return listDir |
|||
} |
|||
|
|||
// listObjectsHeal - wrapper function implemented over file tree walk.
|
|||
func (xl xlObjects) listObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) { |
|||
// Default is recursive, if delimiter is set then list non recursive.
|
|||
recursive := true |
|||
if delimiter == slashSeparator { |
|||
recursive = false |
|||
} |
|||
|
|||
// "heal" true for listObjectsHeal() and false for listObjects()
|
|||
heal := true |
|||
walkResultCh, endWalkCh := xl.listPool.Release(listParams{bucket, recursive, marker, prefix, heal}) |
|||
if walkResultCh == nil { |
|||
endWalkCh = make(chan struct{}) |
|||
listDir := listDirHealFactory(xl.storageDisks...) |
|||
walkResultCh = startTreeWalk(bucket, prefix, marker, recursive, listDir, nil, endWalkCh) |
|||
} |
|||
|
|||
var objInfos []ObjectInfo |
|||
var eof bool |
|||
var nextMarker string |
|||
for i := 0; i < maxKeys; { |
|||
walkResult, ok := <-walkResultCh |
|||
if !ok { |
|||
// Closed channel.
|
|||
eof = true |
|||
break |
|||
} |
|||
// For any walk error return right away.
|
|||
if walkResult.err != nil { |
|||
// File not found is a valid case.
|
|||
if walkResult.err == errFileNotFound { |
|||
return ListObjectsInfo{}, nil |
|||
} |
|||
return ListObjectsInfo{}, toObjectErr(walkResult.err, bucket, prefix) |
|||
} |
|||
entry := walkResult.entry |
|||
var objInfo ObjectInfo |
|||
if strings.HasSuffix(entry, slashSeparator) { |
|||
// Object name needs to be full path.
|
|||
objInfo.Bucket = bucket |
|||
objInfo.Name = entry |
|||
objInfo.IsDir = true |
|||
} else { |
|||
objInfo.Bucket = bucket |
|||
objInfo.Name = entry |
|||
} |
|||
nextMarker = objInfo.Name |
|||
objInfos = append(objInfos, objInfo) |
|||
i++ |
|||
if walkResult.end == true { |
|||
eof = true |
|||
break |
|||
} |
|||
} |
|||
|
|||
params := listParams{bucket, recursive, nextMarker, prefix, heal} |
|||
if !eof { |
|||
xl.listPool.Set(params, walkResultCh, endWalkCh) |
|||
} |
|||
|
|||
result := ListObjectsInfo{IsTruncated: !eof} |
|||
for _, objInfo := range objInfos { |
|||
result.NextMarker = objInfo.Name |
|||
if objInfo.IsDir { |
|||
result.Prefixes = append(result.Prefixes, objInfo.Name) |
|||
continue |
|||
} |
|||
result.Objects = append(result.Objects, ObjectInfo{ |
|||
Name: objInfo.Name, |
|||
ModTime: objInfo.ModTime, |
|||
Size: objInfo.Size, |
|||
IsDir: false, |
|||
}) |
|||
} |
|||
return result, nil |
|||
} |
|||
|
|||
// ListObjects - list all objects at prefix, delimited by '/'.
|
|||
func (xl xlObjects) ListObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) { |
|||
// Verify if bucket is valid.
|
|||
if !IsValidBucketName(bucket) { |
|||
return ListObjectsInfo{}, BucketNameInvalid{Bucket: bucket} |
|||
} |
|||
// Verify if bucket exists.
|
|||
if !xl.isBucketExist(bucket) { |
|||
return ListObjectsInfo{}, BucketNotFound{Bucket: bucket} |
|||
} |
|||
if !IsValidObjectPrefix(prefix) { |
|||
return ListObjectsInfo{}, ObjectNameInvalid{Bucket: bucket, Object: prefix} |
|||
} |
|||
// Verify if delimiter is anything other than '/', which we do not support.
|
|||
if delimiter != "" && delimiter != slashSeparator { |
|||
return ListObjectsInfo{}, UnsupportedDelimiter{ |
|||
Delimiter: delimiter, |
|||
} |
|||
} |
|||
// Verify if marker has prefix.
|
|||
if marker != "" { |
|||
if !strings.HasPrefix(marker, prefix) { |
|||
return ListObjectsInfo{}, InvalidMarkerPrefixCombination{ |
|||
Marker: marker, |
|||
Prefix: prefix, |
|||
} |
|||
} |
|||
} |
|||
|
|||
// With max keys of zero we have reached eof, return right here.
|
|||
if maxKeys == 0 { |
|||
return ListObjectsInfo{}, nil |
|||
} |
|||
|
|||
// For delimiter and prefix as '/' we do not list anything at all
|
|||
// since according to s3 spec we stop at the 'delimiter' along
|
|||
// with the prefix. On a flat namespace with 'prefix' as '/'
|
|||
// we don't have any entries, since all the keys are of form 'keyName/...'
|
|||
if delimiter == slashSeparator && prefix == slashSeparator { |
|||
return ListObjectsInfo{}, nil |
|||
} |
|||
|
|||
// Over flowing count - reset to maxObjectList.
|
|||
if maxKeys < 0 || maxKeys > maxObjectList { |
|||
maxKeys = maxObjectList |
|||
} |
|||
|
|||
// Initiate a list operation, if successful filter and return quickly.
|
|||
listObjInfo, err := xl.listObjectsHeal(bucket, prefix, marker, delimiter, maxKeys) |
|||
if err == nil { |
|||
// We got the entries successfully return.
|
|||
return listObjInfo, nil |
|||
} |
|||
|
|||
// Return error at the end.
|
|||
return ListObjectsInfo{}, toObjectErr(err, bucket, prefix) |
|||
} |
Write
Preview
Loading…
Cancel
Save
Reference in new issue