You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

244 lines
12 KiB

  1. // Copyright (c) 2015-2021 MinIO, Inc.
  2. //
  3. // This file is part of MinIO Object Storage stack
  4. //
  5. // This program is free software: you can redistribute it and/or modify
  6. // it under the terms of the GNU Affero General Public License as published by
  7. // the Free Software Foundation, either version 3 of the License, or
  8. // (at your option) any later version.
  9. //
  10. // This program is distributed in the hope that it will be useful
  11. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. // GNU Affero General Public License for more details.
  14. //
  15. // You should have received a copy of the GNU Affero General Public License
  16. // along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. package cmd
  18. import (
  19. "bytes"
  20. "context"
  21. "crypto/rand"
  22. "io"
  23. "testing"
  24. "github.com/dustin/go-humanize"
  25. )
  26. type badDisk struct{ StorageAPI }
  27. func (a badDisk) String() string {
  28. return "bad-disk"
  29. }
  30. func (a badDisk) AppendFile(ctx context.Context, volume string, path string, buf []byte) error {
  31. return errFaultyDisk
  32. }
  33. func (a badDisk) ReadFileStream(ctx context.Context, volume, path string, offset, length int64) (io.ReadCloser, error) {
  34. return nil, errFaultyDisk
  35. }
  36. func (a badDisk) CreateFile(ctx context.Context, origvolume, volume, path string, size int64, reader io.Reader) error {
  37. return errFaultyDisk
  38. }
  39. func (badDisk) Hostname() string {
  40. return ""
  41. }
  42. const oneMiByte = 1 * humanize.MiByte
  43. var erasureEncodeTests = []struct {
  44. dataBlocks int
  45. onDisks, offDisks int
  46. blocksize, data int64
  47. offset int
  48. algorithm BitrotAlgorithm
  49. shouldFail, shouldFailQuorum bool
  50. }{
  51. {dataBlocks: 2, onDisks: 4, offDisks: 0, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 0, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 0
  52. {dataBlocks: 3, onDisks: 6, offDisks: 0, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 1, algorithm: SHA256, shouldFail: false, shouldFailQuorum: false}, // 1
  53. {dataBlocks: 4, onDisks: 8, offDisks: 2, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 2, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 2
  54. {dataBlocks: 5, onDisks: 10, offDisks: 3, blocksize: int64(blockSizeV2), data: oneMiByte, offset: oneMiByte, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 3
  55. {dataBlocks: 6, onDisks: 12, offDisks: 4, blocksize: int64(blockSizeV2), data: oneMiByte, offset: oneMiByte, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 4
  56. {dataBlocks: 7, onDisks: 14, offDisks: 5, blocksize: int64(blockSizeV2), data: 0, offset: 0, shouldFail: false, algorithm: SHA256, shouldFailQuorum: false}, // 5
  57. {dataBlocks: 8, onDisks: 16, offDisks: 7, blocksize: int64(blockSizeV2), data: 0, offset: 0, shouldFail: false, algorithm: DefaultBitrotAlgorithm, shouldFailQuorum: false}, // 6
  58. {dataBlocks: 2, onDisks: 4, offDisks: 2, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 0, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: true}, // 7
  59. {dataBlocks: 4, onDisks: 8, offDisks: 4, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 0, algorithm: SHA256, shouldFail: false, shouldFailQuorum: true}, // 8
  60. {dataBlocks: 7, onDisks: 14, offDisks: 7, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 0, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: true}, // 9
  61. {dataBlocks: 8, onDisks: 16, offDisks: 8, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 0, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: true}, // 10
  62. {dataBlocks: 5, onDisks: 10, offDisks: 3, blocksize: int64(oneMiByte), data: oneMiByte, offset: 0, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 11
  63. {dataBlocks: 3, onDisks: 6, offDisks: 1, blocksize: int64(blockSizeV2), data: oneMiByte, offset: oneMiByte / 2, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 12
  64. {dataBlocks: 2, onDisks: 4, offDisks: 0, blocksize: int64(oneMiByte / 2), data: oneMiByte, offset: oneMiByte/2 + 1, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 13
  65. {dataBlocks: 4, onDisks: 8, offDisks: 0, blocksize: int64(oneMiByte - 1), data: oneMiByte, offset: oneMiByte - 1, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 14
  66. {dataBlocks: 8, onDisks: 12, offDisks: 2, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 2, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 15
  67. {dataBlocks: 8, onDisks: 10, offDisks: 1, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 0, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 16
  68. {dataBlocks: 10, onDisks: 14, offDisks: 0, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 17, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 17
  69. {dataBlocks: 2, onDisks: 6, offDisks: 2, blocksize: int64(oneMiByte), data: oneMiByte, offset: oneMiByte / 2, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 18
  70. {dataBlocks: 10, onDisks: 16, offDisks: 8, blocksize: int64(blockSizeV2), data: oneMiByte, offset: 0, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: true}, // 19
  71. }
  72. func TestErasureEncode(t *testing.T) {
  73. for i, test := range erasureEncodeTests {
  74. setup, err := newErasureTestSetup(t, test.dataBlocks, test.onDisks-test.dataBlocks, test.blocksize)
  75. if err != nil {
  76. t.Fatalf("Test %d: failed to create test setup: %v", i, err)
  77. }
  78. disks := setup.disks
  79. erasure, err := NewErasure(t.Context(), test.dataBlocks, test.onDisks-test.dataBlocks, test.blocksize)
  80. if err != nil {
  81. t.Fatalf("Test %d: failed to create ErasureStorage: %v", i, err)
  82. }
  83. buffer := make([]byte, test.blocksize, 2*test.blocksize)
  84. data := make([]byte, test.data)
  85. if _, err = io.ReadFull(rand.Reader, data); err != nil {
  86. t.Fatalf("Test %d: failed to generate random test data: %v", i, err)
  87. }
  88. writers := make([]io.Writer, len(disks))
  89. for i, disk := range disks {
  90. if disk == OfflineDisk {
  91. continue
  92. }
  93. writers[i] = newBitrotWriter(disk, "", "testbucket", "object", erasure.ShardFileSize(int64(len(data[test.offset:]))), test.algorithm, erasure.ShardSize())
  94. }
  95. n, err := erasure.Encode(t.Context(), bytes.NewReader(data[test.offset:]), writers, buffer, erasure.dataBlocks+1)
  96. closeBitrotWriters(writers)
  97. if err != nil && !test.shouldFail {
  98. t.Errorf("Test %d: should pass but failed with: %v", i, err)
  99. }
  100. if err == nil && test.shouldFail {
  101. t.Errorf("Test %d: should fail but it passed", i)
  102. }
  103. for i, w := range writers {
  104. if w == nil {
  105. disks[i] = OfflineDisk
  106. }
  107. }
  108. if err == nil {
  109. if length := int64(len(data[test.offset:])); n != length {
  110. t.Errorf("Test %d: invalid number of bytes written: got: #%d want #%d", i, n, length)
  111. }
  112. writers := make([]io.Writer, len(disks))
  113. for i, disk := range disks {
  114. if disk == nil {
  115. continue
  116. }
  117. writers[i] = newBitrotWriter(disk, "", "testbucket", "object2", erasure.ShardFileSize(int64(len(data[test.offset:]))), test.algorithm, erasure.ShardSize())
  118. }
  119. for j := range disks[:test.offDisks] {
  120. switch w := writers[j].(type) {
  121. case *wholeBitrotWriter:
  122. w.disk = badDisk{nil}
  123. case *streamingBitrotWriter:
  124. w.closeWithErr(errFaultyDisk)
  125. }
  126. }
  127. if test.offDisks > 0 {
  128. writers[0] = nil
  129. }
  130. n, err = erasure.Encode(t.Context(), bytes.NewReader(data[test.offset:]), writers, buffer, erasure.dataBlocks+1)
  131. closeBitrotWriters(writers)
  132. if err != nil && !test.shouldFailQuorum {
  133. t.Errorf("Test %d: should pass but failed with: %v", i, err)
  134. }
  135. if err == nil && test.shouldFailQuorum {
  136. t.Errorf("Test %d: should fail but it passed", i)
  137. }
  138. if err == nil {
  139. if length := int64(len(data[test.offset:])); n != length {
  140. t.Errorf("Test %d: invalid number of bytes written: got: #%d want #%d", i, n, length)
  141. }
  142. }
  143. }
  144. }
  145. }
  146. // Benchmarks
  147. func benchmarkErasureEncode(data, parity, dataDown, parityDown int, size int64, b *testing.B) {
  148. setup, err := newErasureTestSetup(b, data, parity, blockSizeV2)
  149. if err != nil {
  150. b.Fatalf("failed to create test setup: %v", err)
  151. }
  152. erasure, err := NewErasure(context.Background(), data, parity, blockSizeV2)
  153. if err != nil {
  154. b.Fatalf("failed to create ErasureStorage: %v", err)
  155. }
  156. disks := setup.disks
  157. buffer := make([]byte, blockSizeV2, 2*blockSizeV2)
  158. content := make([]byte, size)
  159. for i := 0; i < dataDown; i++ {
  160. disks[i] = OfflineDisk
  161. }
  162. for i := data; i < data+parityDown; i++ {
  163. disks[i] = OfflineDisk
  164. }
  165. b.ResetTimer()
  166. b.SetBytes(size)
  167. b.ReportAllocs()
  168. for i := 0; i < b.N; i++ {
  169. writers := make([]io.Writer, len(disks))
  170. for i, disk := range disks {
  171. if disk == OfflineDisk {
  172. continue
  173. }
  174. disk.Delete(context.Background(), "testbucket", "object", DeleteOptions{
  175. Recursive: false,
  176. Immediate: false,
  177. })
  178. writers[i] = newBitrotWriter(disk, "", "testbucket", "object", erasure.ShardFileSize(size), DefaultBitrotAlgorithm, erasure.ShardSize())
  179. }
  180. _, err := erasure.Encode(context.Background(), bytes.NewReader(content), writers, buffer, erasure.dataBlocks+1)
  181. closeBitrotWriters(writers)
  182. if err != nil {
  183. panic(err)
  184. }
  185. }
  186. }
  187. func BenchmarkErasureEncodeQuick(b *testing.B) {
  188. const size = 12 * 1024 * 1024
  189. b.Run(" 00|00 ", func(b *testing.B) { benchmarkErasureEncode(2, 2, 0, 0, size, b) })
  190. b.Run(" 00|X0 ", func(b *testing.B) { benchmarkErasureEncode(2, 2, 0, 1, size, b) })
  191. b.Run(" X0|00 ", func(b *testing.B) { benchmarkErasureEncode(2, 2, 1, 0, size, b) })
  192. }
  193. func BenchmarkErasureEncode_4_64KB(b *testing.B) {
  194. const size = 64 * 1024
  195. b.Run(" 00|00 ", func(b *testing.B) { benchmarkErasureEncode(2, 2, 0, 0, size, b) })
  196. b.Run(" 00|X0 ", func(b *testing.B) { benchmarkErasureEncode(2, 2, 0, 1, size, b) })
  197. b.Run(" X0|00 ", func(b *testing.B) { benchmarkErasureEncode(2, 2, 1, 0, size, b) })
  198. }
  199. func BenchmarkErasureEncode_8_20MB(b *testing.B) {
  200. const size = 20 * 1024 * 1024
  201. b.Run(" 0000|0000 ", func(b *testing.B) { benchmarkErasureEncode(4, 4, 0, 0, size, b) })
  202. b.Run(" 0000|X000 ", func(b *testing.B) { benchmarkErasureEncode(4, 4, 0, 1, size, b) })
  203. b.Run(" X000|0000 ", func(b *testing.B) { benchmarkErasureEncode(4, 4, 1, 0, size, b) })
  204. b.Run(" 0000|XXX0 ", func(b *testing.B) { benchmarkErasureEncode(4, 4, 0, 3, size, b) })
  205. b.Run(" XXX0|0000 ", func(b *testing.B) { benchmarkErasureEncode(4, 4, 3, 0, size, b) })
  206. }
  207. func BenchmarkErasureEncode_12_30MB(b *testing.B) {
  208. const size = 30 * 1024 * 1024
  209. b.Run(" 000000|000000 ", func(b *testing.B) { benchmarkErasureEncode(6, 6, 0, 0, size, b) })
  210. b.Run(" 000000|X00000 ", func(b *testing.B) { benchmarkErasureEncode(6, 6, 0, 1, size, b) })
  211. b.Run(" X00000|000000 ", func(b *testing.B) { benchmarkErasureEncode(6, 6, 1, 0, size, b) })
  212. b.Run(" 000000|XXXXX0 ", func(b *testing.B) { benchmarkErasureEncode(6, 6, 0, 5, size, b) })
  213. b.Run(" XXXXX0|000000 ", func(b *testing.B) { benchmarkErasureEncode(6, 6, 5, 0, size, b) })
  214. }
  215. func BenchmarkErasureEncode_16_40MB(b *testing.B) {
  216. const size = 40 * 1024 * 1024
  217. b.Run(" 00000000|00000000 ", func(b *testing.B) { benchmarkErasureEncode(8, 8, 0, 0, size, b) })
  218. b.Run(" 00000000|X0000000 ", func(b *testing.B) { benchmarkErasureEncode(8, 8, 0, 1, size, b) })
  219. b.Run(" X0000000|00000000 ", func(b *testing.B) { benchmarkErasureEncode(8, 8, 1, 0, size, b) })
  220. b.Run(" 00000000|XXXXXXX0 ", func(b *testing.B) { benchmarkErasureEncode(8, 8, 0, 7, size, b) })
  221. b.Run(" XXXXXXX0|00000000 ", func(b *testing.B) { benchmarkErasureEncode(8, 8, 7, 0, size, b) })
  222. }