From bfa5e3bfc34acb23611fbce94de1003c2e72226c Mon Sep 17 00:00:00 2001 From: Benedict Elliott Smith Date: Thu, 29 Jul 2021 17:20:18 +0100 Subject: [PATCH] [CASSANDRA-16926] Phase 1: Mockable Filesystem Co-authored-by: Benedict Elliott Smith Co-authored-by: Aleksey Yeschenko --- checkstyle.xml | 4 +- .../audit/AuditLogEntryCategory.java | 2 +- .../cassandra/cache/AutoSavingCache.java | 33 +- .../config/CassandraRelevantProperties.java | 2 + .../cassandra/config/DatabaseDescriptor.java | 101 +-- .../cassandra/config/EncryptionOptions.java | 2 +- .../config/YamlConfigurationLoader.java | 8 +- .../cql3/conditions/ColumnCondition.java | 2 +- .../cql3/selection/ResultSetBuilder.java | 2 +- .../cql3/statements/StatementType.java | 2 +- .../cassandra/db/ColumnFamilyStore.java | 43 +- .../org/apache/cassandra/db/Directories.java | 95 ++- .../cassandra/db/DisallowedDirectories.java | 8 +- .../db/DisallowedDirectoriesMBean.java | 2 +- .../org/apache/cassandra/db/Keyspace.java | 7 +- .../org/apache/cassandra/db/ReadResponse.java | 1 + .../apache/cassandra/db/SSTableImporter.java | 2 +- .../apache/cassandra/db/SystemKeyspace.java | 5 +- .../VirtualTableSinglePartitionReadQuery.java | 2 +- .../db/WindowsFailedSnapshotTracker.java | 16 +- .../cassandra/db/aggregation/GroupMaker.java | 2 +- .../AbstractCommitLogSegmentManager.java | 2 +- .../cassandra/db/commitlog/CommitLog.java | 17 +- .../db/commitlog/CommitLogArchiver.java | 20 +- .../db/commitlog/CommitLogDescriptor.java | 7 +- .../db/commitlog/CommitLogReader.java | 7 +- .../db/commitlog/CommitLogReplayer.java | 4 +- .../db/commitlog/CommitLogSegment.java | 20 +- .../commitlog/CommitLogSegmentManagerCDC.java | 6 +- .../CommitLogSegmentManagerStandard.java | 1 + .../cassandra/db/commitlog/IntervalSet.java | 2 +- .../db/commitlog/MemoryMappedSegment.java | 2 +- .../commitlog/PeriodicCommitLogService.java | 2 +- .../db/compaction/CompactionLogger.java | 5 +- .../db/compaction/CompactionManager.java | 6 +- .../compaction/CompactionStrategyManager.java | 6 +- .../cassandra/db/compaction/Scrubber.java | 5 +- .../SizeTieredCompactionStrategyOptions.java | 2 +- .../cassandra/db/compaction/Upgrader.java | 4 +- .../cassandra/db/compaction/Verifier.java | 8 +- .../writers/CompactionAwareWriter.java | 1 + .../db/lifecycle/LifecycleTransaction.java | 2 +- .../db/lifecycle/LogAwareFileLister.java | 4 +- .../cassandra/db/lifecycle/LogFile.java | 14 +- .../cassandra/db/lifecycle/LogRecord.java | 26 +- .../cassandra/db/lifecycle/LogReplica.java | 16 +- .../cassandra/db/lifecycle/LogReplicaSet.java | 6 +- .../db/lifecycle/LogTransaction.java | 8 +- .../cassandra/db/lifecycle/Tracker.java | 2 +- .../CassandraEntireSSTableStreamReader.java | 2 +- .../db/streaming/CassandraStreamReader.java | 1 + .../db/streaming/CassandraStreamWriter.java | 2 +- .../db/streaming/ComponentContext.java | 8 +- .../db/streaming/ComponentManifest.java | 3 +- .../tokenallocator/OfflineTokenAllocator.java | 2 +- .../tokenallocator/ReplicationStrategy.java | 2 +- .../dht/tokenallocator/TokenAllocator.java | 2 +- .../TokenAllocatorDiagnostics.java | 2 +- .../tokenallocator/TokenAllocatorEvent.java | 2 +- .../diag/DiagnosticEventService.java | 2 +- .../apache/cassandra/fql/FullQueryLogger.java | 4 +- .../apache/cassandra/gms/EndpointState.java | 1 + .../apache/cassandra/gms/FailureDetector.java | 3 +- .../apache/cassandra/gms/GossipDigest.java | 1 + .../cassandra/gms/GossipDigestAck2.java | 1 + .../apache/cassandra/gms/GossipDigestSyn.java | 2 + .../gms/GossipShutdownVerbHandler.java | 2 +- .../cassandra/gms/GossiperDiagnostics.java | 2 +- .../apache/cassandra/gms/GossiperEvent.java | 2 +- .../apache/cassandra/gms/GossiperMBean.java | 2 +- .../apache/cassandra/gms/HeartBeatState.java | 2 + .../apache/cassandra/gms/TokenSerializer.java | 4 +- .../apache/cassandra/gms/VersionedValue.java | 1 + .../hadoop/cql3/CqlBulkRecordWriter.java | 6 +- .../cassandra/hints/ChecksummedDataInput.java | 2 +- .../hints/CompressedHintsWriter.java | 3 +- .../cassandra/hints/EncryptedHintsWriter.java | 2 +- .../cassandra/hints/HintDiagnostics.java | 2 +- .../org/apache/cassandra/hints/HintEvent.java | 2 +- .../apache/cassandra/hints/HintsCatalog.java | 10 +- .../cassandra/hints/HintsDescriptor.java | 14 +- .../hints/HintsDispatchExecutor.java | 3 +- .../cassandra/hints/HintsDispatcher.java | 4 +- .../apache/cassandra/hints/HintsReader.java | 2 +- .../apache/cassandra/hints/HintsService.java | 2 +- .../apache/cassandra/hints/HintsStore.java | 6 +- .../apache/cassandra/hints/HintsWriter.java | 4 +- .../apache/cassandra/hints/package-info.java | 2 +- .../index/internal/keys/KeysIndex.java | 2 +- .../index/internal/keys/KeysSearcher.java | 2 +- .../index/sasi/SASIIndexBuilder.java | 2 +- .../cassandra/index/sasi/SSTableIndex.java | 2 +- .../sasi/analyzer/DelimiterAnalyzer.java | 2 +- .../sasi/analyzer/filter/StopWordFactory.java | 6 +- .../index/sasi/conf/DataTracker.java | 4 +- .../index/sasi/disk/OnDiskIndex.java | 46 +- .../index/sasi/disk/OnDiskIndexBuilder.java | 12 +- .../sasi/disk/PerSSTableIndexWriter.java | 2 +- .../cassandra/index/sasi/disk/TokenTree.java | 2 +- .../index/sasi/disk/TokenTreeBuilder.java | 2 +- .../index/sasi/memory/KeyRangeIterator.java | 2 +- .../index/sasi/utils/CombinedTerm.java | 2 +- .../index/sasi/utils/trie/Cursor.java | 2 +- .../index/sasi/utils/trie/PatriciaTrie.java | 2 +- src/java/org/apache/cassandra/io/FSError.java | 14 +- .../org/apache/cassandra/io/FSReadError.java | 10 +- .../org/apache/cassandra/io/FSWriteError.java | 10 +- .../compress/CompressedSequentialWriter.java | 2 +- .../io/compress/CompressionMetadata.java | 23 +- .../sstable/AbstractSSTableSimpleWriter.java | 22 +- .../io/sstable/CQLSSTableWriter.java | 4 +- .../io/sstable/CorruptSSTableException.java | 3 +- .../cassandra/io/sstable/Descriptor.java | 37 +- .../io/sstable/IndexSummaryManagerMBean.java | 2 +- .../cassandra/io/sstable/KeyIterator.java | 2 +- .../apache/cassandra/io/sstable/SSTable.java | 18 +- .../io/sstable/SSTableHeaderFix.java | 14 +- .../io/sstable/SSTableIdentityIterator.java | 1 + .../cassandra/io/sstable/SSTableLoader.java | 18 +- .../sstable/SSTableSimpleUnsortedWriter.java | 2 +- .../io/sstable/SSTableSimpleWriter.java | 2 +- .../io/sstable/SnapshotDeletingTask.java | 2 +- .../sstable/format/SSTableFlushObserver.java | 2 +- .../io/sstable/format/SSTableReader.java | 13 +- .../sstable/format/SSTableReaderBuilder.java | 9 +- .../io/sstable/format/big/BigTableWriter.java | 11 +- .../format/big/BigTableZeroCopyWriter.java | 4 +- .../sstable/metadata/MetadataCollector.java | 0 .../sstable/metadata/MetadataSerializer.java | 16 +- .../io/sstable/metadata/StatsMetadata.java | 0 .../io/util/AbstractReaderFileProxy.java | 2 +- .../io/util/BufferedDataOutputStreamPlus.java | 22 - .../cassandra/io/util/ChannelProxy.java | 3 +- .../cassandra/io/util/ChecksumWriter.java | 2 + .../util/ChecksummedRandomAccessReader.java | 3 +- .../io/util/ChecksummedSequentialWriter.java | 3 +- .../apache/cassandra/io/util/ChunkReader.java | 2 +- .../cassandra/io/util/DataInputPlus.java | 2 + .../io/util/DataIntegrityMetadata.java | 2 +- .../org/apache/cassandra/io/util/File.java | 608 +++++++++++++++ .../io/util/FileInputStreamPlus.java | 95 +++ .../io/util/FileOutputStreamPlus.java | 69 ++ .../apache/cassandra/io/util/FileReader.java | 38 + .../apache/cassandra/io/util/FileUtils.java | 592 ++++---------- .../apache/cassandra/io/util/FileWriter.java | 39 + .../apache/cassandra/io/util/PathUtils.java | 727 ++++++++++++++++++ .../cassandra/io/util/RandomAccessReader.java | 2 +- .../cassandra/io/util/ReaderFileProxy.java | 2 +- .../apache/cassandra/io/util/Rebufferer.java | 2 +- .../io/util/RewindableDataInput.java | 2 +- .../util/RewindableDataInputStreamPlus.java | 571 -------------- .../cassandra/io/util/SequentialWriter.java | 5 +- .../cassandra/io/util/SimpleChunkReader.java | 2 +- .../cassandra/locator/CloudstackSnitch.java | 4 +- .../cassandra/locator/RangesByEndpoint.java | 2 +- .../cassandra/net/AsyncChannelOutputPlus.java | 2 +- .../cassandra/net/AsyncMessageOutputPlus.java | 2 +- .../net/AsyncStreamingOutputPlus.java | 2 +- src/java/org/apache/cassandra/net/Verb.java | 2 +- .../repair/asymmetric/HostDifferences.java | 2 +- .../repair/asymmetric/PreferedNodeFilter.java | 2 +- .../schema/SchemaPullVerbHandler.java | 2 +- .../cassandra/schema/TableMetadataRef.java | 2 +- .../org/apache/cassandra/schema/Views.java | 2 +- .../apache/cassandra/security/SSLFactory.java | 2 +- .../cassandra/service/CassandraDaemon.java | 2 +- .../service/DefaultFSErrorHandler.java | 6 +- .../cassandra/service/StartupChecks.java | 14 +- .../cassandra/service/StorageService.java | 9 +- .../service/pager/MultiPartitionPager.java | 2 +- .../service/snapshot/SnapshotManager.java | 2 +- .../service/snapshot/SnapshotManifest.java | 8 +- .../service/snapshot/TableSnapshot.java | 2 +- .../streaming/messages/PrepareSynMessage.java | 1 + .../streaming/messages/ReceivedMessage.java | 1 + .../cassandra/tools/AuditLogViewer.java | 4 +- .../apache/cassandra/tools/BulkLoader.java | 2 +- .../org/apache/cassandra/tools/JMXTool.java | 8 +- .../apache/cassandra/tools/LoaderOptions.java | 4 +- .../org/apache/cassandra/tools/NodeTool.java | 13 +- .../apache/cassandra/tools/SSTableExport.java | 4 +- .../tools/SSTableMetadataViewer.java | 4 +- .../tools/SSTableOfflineRelevel.java | 2 +- .../tools/SSTableRepairedAtSetter.java | 6 +- .../tools/StandaloneSSTableUtil.java | 4 +- .../cassandra/tools/StandaloneScrubber.java | 4 +- .../cassandra/tools/StandaloneSplitter.java | 4 +- .../cassandra/tools/nodetool/Assassinate.java | 2 +- .../tools/nodetool/CfHistograms.java | 2 +- .../cassandra/tools/nodetool/CfStats.java | 2 +- .../cassandra/tools/nodetool/Compact.java | 2 +- .../tools/nodetool/Decommission.java | 2 +- .../tools/nodetool/DisableAuditLog.java | 2 +- .../tools/nodetool/DisableAutoCompaction.java | 2 +- .../tools/nodetool/DisableBackup.java | 2 +- .../tools/nodetool/DisableBinary.java | 2 +- .../tools/nodetool/DisableFullQueryLog.java | 2 +- .../tools/nodetool/DisableGossip.java | 2 +- .../tools/nodetool/DisableHandoff.java | 2 +- .../tools/nodetool/DisableHintsForDC.java | 2 +- .../nodetool/DisableOldProtocolVersions.java | 2 +- .../cassandra/tools/nodetool/Drain.java | 2 +- .../tools/nodetool/EnableAuditLog.java | 2 +- .../tools/nodetool/EnableAutoCompaction.java | 2 +- .../tools/nodetool/EnableBackup.java | 2 +- .../tools/nodetool/EnableBinary.java | 2 +- .../tools/nodetool/EnableFullQueryLog.java | 2 +- .../tools/nodetool/EnableGossip.java | 2 +- .../tools/nodetool/EnableHandoff.java | 2 +- .../nodetool/EnableOldProtocolVersions.java | 2 +- .../cassandra/tools/nodetool/Flush.java | 2 +- .../tools/nodetool/GetSnapshotThrottle.java | 2 +- .../cassandra/tools/nodetool/HostStat.java | 2 +- .../nodetool/InvalidateCounterCache.java | 2 +- .../tools/nodetool/InvalidateKeyCache.java | 2 +- .../tools/nodetool/InvalidateRowCache.java | 2 +- .../apache/cassandra/tools/nodetool/Move.java | 2 +- .../tools/nodetool/PauseHandoff.java | 2 +- .../cassandra/tools/nodetool/Rebuild.java | 2 +- .../tools/nodetool/RebuildIndex.java | 2 +- .../tools/nodetool/ReloadTriggers.java | 2 +- .../tools/nodetool/ResetFullQueryLog.java | 2 +- .../tools/nodetool/ResetLocalSchema.java | 2 +- .../tools/nodetool/ResumeHandoff.java | 2 +- .../nodetool/SetBatchlogReplayThrottle.java | 2 +- .../tools/nodetool/SetCacheCapacity.java | 2 +- .../tools/nodetool/SetCacheKeysToSave.java | 2 +- .../nodetool/SetCompactionThreshold.java | 2 +- .../nodetool/SetCompactionThroughput.java | 2 +- .../SetHintedHandoffThrottleInKB.java | 2 +- .../cassandra/tools/nodetool/SetHostStat.java | 2 +- .../tools/nodetool/SetLoggingLevel.java | 2 +- .../tools/nodetool/SetSnapshotThrottle.java | 2 +- .../tools/nodetool/SetStreamThroughput.java | 2 +- .../tools/nodetool/SetTraceProbability.java | 2 +- .../apache/cassandra/tools/nodetool/Sjk.java | 14 +- .../tools/nodetool/TruncateHints.java | 2 +- .../tools/nodetool/stats/StatsHolder.java | 2 +- .../tools/nodetool/stats/StatsKeyspace.java | 2 +- .../tools/nodetool/stats/StatsPrinter.java | 2 +- .../cassandra/triggers/CustomClassLoader.java | 26 +- .../cassandra/triggers/TriggerExecutor.java | 2 +- .../utils/BloomFilterSerializer.java | 5 +- .../apache/cassandra/utils/ByteArrayUtil.java | 2 +- .../utils/DirectorySizeCalculator.java | 3 +- .../apache/cassandra/utils/ExecutorUtils.java | 2 +- .../apache/cassandra/utils/FBUtilities.java | 12 +- .../org/apache/cassandra/utils/HeapUtils.java | 16 +- .../utils/IndexedSearchIterator.java | 2 +- .../utils/JVMStabilityInspector.java | 3 +- .../apache/cassandra/utils/NativeLibrary.java | 6 +- .../cassandra/utils/ResourceWatcher.java | 2 +- .../org/apache/cassandra/utils/SyncUtil.java | 9 +- .../apache/cassandra/utils/Throwables.java | 4 +- .../apache/cassandra/utils/binlog/BinLog.java | 14 +- .../utils/btree/LeafBTreeSearchIterator.java | 2 +- .../cassandra/utils/btree/NodeCursor.java | 2 +- .../cassandra/utils/obs/OffHeapBitSet.java | 4 +- .../streamhist/HistogramDataConsumer.java | 0 .../StreamingTombstoneHistogramBuilder.java | 0 .../utils/streamhist/TombstoneHistogram.java | 0 .../cassandra/net/GlobalInboundSettings.java | 2 +- .../cassandra/transport/DriverBurnTest.java | 2 +- .../transport/SimpleClientBurnTest.java | 2 +- .../distributed/impl/AbstractCluster.java | 8 +- .../distributed/impl/FileLogAction.java | 4 +- .../cassandra/distributed/impl/Instance.java | 12 +- .../distributed/impl/InstanceConfig.java | 4 +- .../cassandra/distributed/shared/Byteman.java | 14 +- .../distributed/shared/ClusterUtils.java | 2 +- .../distributed/shared/ShutdownException.java | 2 +- .../distributed/test/FrozenUDTTest.java | 2 +- .../distributed/test/LargeColumnTest.java | 2 +- .../distributed/test/MessageFiltersTest.java | 2 +- .../test/MessageForwardingTest.java | 2 +- .../distributed/test/NativeProtocolTest.java | 2 +- .../test/NetstatsRepairStreamingTest.java | 2 +- .../ReadRepairEmptyRangeTombstonesTest.java | 2 +- .../distributed/test/ReadRepairTester.java | 2 +- .../test/ReplicaFilteringProtectionTest.java | 2 +- .../distributed/test/ResourceLeakTest.java | 4 +- .../test/ShortReadProtectionTest.java | 2 +- .../distributed/test/ring/BootstrapTest.java | 2 +- .../CommunicationDuringDecommissionTest.java | 2 +- .../test/ring/PendingWritesTest.java | 2 +- .../upgrade/MixedModeReadRepairWriteTest.java | 2 +- .../apache/cassandra/cql3/CorruptionTest.java | 6 +- .../db/commitlog/CommitLogStressTest.java | 16 +- ...actReplicationAwareTokenAllocatorTest.java | 2 +- .../hints/HintsWriteThenReadTest.java | 4 +- .../io/compress/CompressorPerformance.java | 4 +- .../io/sstable/CQLSSTableWriterLongTest.java | 8 +- .../streaming/LongStreamingTest.java | 12 +- .../BloomFilterSerializerBench.java | 16 +- .../test/microbench/CompactionBench.java | 6 +- .../test/microbench/DirectorySizerBench.java | 6 +- .../test/microbench/OutputStreamBench.java | 2 +- ...reamingTombstoneHistogramBuilderBench.java | 0 .../AbstractSerializationsTester.java | 23 +- .../CassandraBriefJUnitResultFormatter.java | 2 +- .../CassandraIsolatedJunit4ClassRunner.java | 2 +- .../CassandraXMLJUnitResultFormatter.java | 2 +- .../org/apache/cassandra/SchemaLoader.java | 7 +- .../org/apache/cassandra/ServerTestUtils.java | 5 +- test/unit/org/apache/cassandra/Util.java | 8 +- .../cassandra/audit/AuditLogFilterTest.java | 2 +- .../cassandra/audit/AuditLoggerAuthTest.java | 2 +- .../cassandra/audit/BinAuditLoggerTest.java | 1 + .../apache/cassandra/auth/AuthTestUtils.java | 4 +- .../auth/PasswordAuthenticatorTest.java | 2 +- .../CassandraRelevantPropertiesTest.java | 2 +- .../config/DatabaseDescriptorRefTest.java | 4 + .../config/EncryptionOptionsTest.java | 2 +- .../org/apache/cassandra/cql3/CQLTester.java | 4 +- .../SecondaryIndexOnStaticColumnTest.java | 2 +- .../miscellaneous/OverflowTest.java | 2 +- .../operations/CompactStorageTest.java | 2 +- .../DropRecreateAndRestoreTest.java | 2 +- .../cql3/validation/operations/TTLTest.java | 16 +- .../cassandra/db/ColumnFamilyStoreTest.java | 6 +- .../apache/cassandra/db/DirectoriesTest.java | 120 +-- .../cassandra/db/DiskBoundaryManagerTest.java | 2 +- .../org/apache/cassandra/db/ImportTest.java | 31 +- .../org/apache/cassandra/db/MmapFileTest.java | 20 +- .../MutationExceededMaxSizeExceptionTest.java | 2 +- .../apache/cassandra/db/ReadMessageTest.java | 1 + .../db/RecoveryManagerMissingHeaderTest.java | 6 +- .../cassandra/db/RowIndexEntryTest.java | 2 +- .../cassandra/db/SchemaCQLHelperTest.java | 4 +- .../org/apache/cassandra/db/ScrubTest.java | 10 +- .../cassandra/db/SerializationHeaderTest.java | 5 +- .../org/apache/cassandra/db/VerifyTest.java | 4 +- .../db/commitlog/CDCTestReplayer.java | 4 +- .../CommitLogChainedMarkersTest.java | 2 +- .../db/commitlog/CommitLogReaderTest.java | 4 +- .../CommitLogSegmentManagerCDCTest.java | 32 +- .../cassandra/db/commitlog/CommitLogTest.java | 32 +- .../db/commitlog/CommitLogTestReplayer.java | 4 +- .../db/commitlog/CommitLogUpgradeTest.java | 6 +- .../commitlog/CommitLogUpgradeTestMaker.java | 13 +- .../db/commitlog/CommitlogShutdownTest.java | 4 +- .../db/commitlog/SegmentReaderTest.java | 11 +- .../compaction/AntiCompactionBytemanTest.java | 2 +- .../db/compaction/AntiCompactionTest.java | 2 +- .../CompactionStrategyManagerTest.java | 4 +- .../db/compaction/CompactionsTest.java | 4 +- .../db/lifecycle/LogTransactionTest.java | 112 ++- .../db/lifecycle/RealTransactionsTest.java | 14 +- .../db/marshal/TimestampTypeTest.java | 2 +- .../db/rows/UnfilteredRowsGenerator.java | 2 +- .../cassandra/dht/LengthPartitioner.java | 2 +- .../cassandra/fql/FullQueryLoggerTest.java | 30 +- .../cassandra/gms/SerializationsTest.java | 5 +- .../apache/cassandra/hints/AlteredHints.java | 4 +- .../hints/ChecksummedDataInputTest.java | 2 +- .../cassandra/hints/HintWriteTTLTest.java | 4 +- .../cassandra/hints/HintsCatalogTest.java | 8 +- .../cassandra/hints/HintsDescriptorTest.java | 12 +- .../cassandra/hints/HintsReaderTest.java | 14 +- .../cassandra/hints/HintsStoreTest.java | 4 +- .../cassandra/index/CustomIndexTest.java | 2 +- .../cassandra/index/sasi/SASIIndexTest.java | 3 +- .../index/sasi/disk/OnDiskIndexTest.java | 2 +- .../sasi/disk/PerSSTableIndexWriterTest.java | 2 +- .../index/sasi/disk/TokenTreeTest.java | 2 +- .../index/sasi/utils/LongIteratorTest.java | 2 +- .../index/sasi/utils/MappedBufferTest.java | 9 +- .../sasi/utils/RangeUnionIteratorTest.java | 2 +- .../CompressedRandomAccessReaderTest.java | 40 +- .../CompressedSequentialWriterTest.java | 32 +- .../cassandra/io/compress/CompressorTest.java | 3 +- .../io/sstable/BigTableWriterTest.java | 2 +- .../sstable/CQLSSTableWriterClientTest.java | 18 +- .../io/sstable/CQLSSTableWriterTest.java | 19 +- .../cassandra/io/sstable/DescriptorTest.java | 18 +- .../io/sstable/LegacySSTableTest.java | 23 +- .../SSTableCorruptionDetectionTest.java | 2 +- .../io/sstable/SSTableHeaderFixTest.java | 8 +- .../io/sstable/SSTableLoaderTest.java | 16 +- .../io/sstable/SSTableReaderTest.java | 10 +- .../io/sstable/SSTableRewriterTest.java | 33 +- .../cassandra/io/sstable/SSTableUtils.java | 12 +- .../io/sstable/SSTableWriterTest.java | 30 +- .../io/sstable/SSTableWriterTestBase.java | 8 +- .../format/SSTableFlushObserverTest.java | 8 +- .../big/BigTableZeroCopyWriterTest.java | 2 +- .../metadata/MetadataSerializerTest.java | 14 +- .../io/util/BufferedRandomAccessFileTest.java | 17 +- .../ChecksummedRandomAccessReaderTest.java | 3 +- .../util/ChecksummedSequentialWriterTest.java | 3 +- .../cassandra/io/util/DataOutputTest.java | 25 +- .../apache/cassandra/io/util/FileTest.java | 326 ++++++++ .../cassandra/io/util/FileUtilsTest.java | 11 +- .../cassandra/io/util/MmappedRegionsTest.java | 7 +- .../io/util/NIODataInputStreamTest.java | 3 +- .../io/util/RandomAccessReaderTest.java | 19 +- .../RewindableDataInputStreamPlusTest.java | 539 ------------- .../io/util/SequentialWriterTest.java | 13 +- .../cassandra/locator/PendingRangesTest.java | 2 +- .../metrics/HintedHandOffMetricsTest.java | 2 +- .../cassandra/metrics/LatencyMetricsTest.java | 2 +- .../net/AsyncStreamingOutputPlusTest.java | 6 +- .../cassandra/net/ChunkedInputPlusTest.java | 2 +- .../ManyToOneConcurrentLinkedQueueTest.java | 2 +- .../cassandra/net/MockMessagingService.java | 2 +- .../cassandra/net/MockMessagingSpy.java | 2 +- .../cassandra/net/PrunableArrayQueueTest.java | 2 +- .../org/apache/cassandra/net/SocketUtils.java | 2 +- .../schema/MigrationManagerTest.java | 4 +- .../apache/cassandra/schema/MockSchema.java | 14 +- .../security/EncryptionUtilsTest.java | 6 +- .../cassandra/security/SSLFactoryTest.java | 16 +- .../service/SSTablesGlobalTrackerTest.java | 3 +- .../cassandra/service/SerializationsTest.java | 10 +- .../cassandra/service/StartupChecksTest.java | 20 +- .../service/StorageServiceServerTest.java | 20 +- .../reads/SpeculativeRetryParseTest.java | 2 +- .../reads/repair/TestableReadRepair.java | 2 +- .../service/snapshot/SnapshotManagerTest.java | 19 +- .../snapshot/SnapshotManifestTest.java | 26 +- .../service/snapshot/TableSnapshotTest.java | 30 +- .../CompressedInputStreamTest.java | 7 +- .../cassandra/tools/AuditLogViewerTest.java | 5 +- .../cassandra/tools/CompactionStressTest.java | 4 +- .../apache/cassandra/tools/JMXToolTest.java | 2 +- .../cassandra/tools/LoaderOptionsTest.java | 10 +- .../cassandra/tools/OfflineToolUtils.java | 12 +- .../tools/SSTableRepairedAtSetterTest.java | 4 +- .../StandaloneSplitterWithCQLTesterTest.java | 38 +- .../StandaloneUpgraderOnSStablesTest.java | 6 +- .../tools/nodetool/ClearSnapshotTest.java | 2 +- .../InvalidateCredentialsCacheTest.java | 4 +- .../InvalidateJmxPermissionsCacheTest.java | 4 +- ...InvalidateNetworkPermissionsCacheTest.java | 4 +- .../InvalidatePermissionsCacheTest.java | 4 +- .../nodetool/InvalidateRolesCacheTest.java | 4 +- .../nodetool/formatter/TableBuilderTest.java | 2 +- .../cassandra/transport/CQLUserAuditTest.java | 2 +- .../cassandra/utils/AbstractIteratorTest.java | 2 +- .../cassandra/utils/BloomFilterTest.java | 15 +- .../cassandra/utils/GeneratorsTest.java | 2 +- .../apache/cassandra/utils/KeyGenerator.java | 12 +- .../cassandra/utils/NativeLibraryTest.java | 4 +- .../cassandra/utils/SerializationsTest.java | 12 +- .../cassandra/utils/binlog/BinLogTest.java | 6 +- .../utils/binlog/DeletingArchiverTest.java | 14 +- .../utils/binlog/ExternalArchiverTest.java | 54 +- .../utils/concurrent/RefCountedTest.java | 2 +- ...treamingTombstoneHistogramBuilderTest.java | 0 .../cassandra/fqltool/ResultComparator.java | 2 +- .../io/sstable/StressCQLSSTableWriter.java | 4 +- .../stress/generate/values/Bytes.java | 2 +- .../operations/userdefined/CASQuery.java | 2 +- .../apache/cassandra/stress/report/Timer.java | 2 +- .../cassandra/stress/settings/Command.java | 2 +- .../stress/settings/SettingsGraph.java | 2 +- .../stress/util/MultiResultLoggerTest.java | 2 +- 457 files changed, 3573 insertions(+), 3139 deletions(-) mode change 100755 => 100644 src/java/org/apache/cassandra/db/commitlog/AbstractCommitLogSegmentManager.java mode change 100755 => 100644 src/java/org/apache/cassandra/io/sstable/metadata/MetadataCollector.java mode change 100755 => 100644 src/java/org/apache/cassandra/io/sstable/metadata/StatsMetadata.java create mode 100644 src/java/org/apache/cassandra/io/util/File.java create mode 100644 src/java/org/apache/cassandra/io/util/FileInputStreamPlus.java create mode 100644 src/java/org/apache/cassandra/io/util/FileOutputStreamPlus.java create mode 100644 src/java/org/apache/cassandra/io/util/FileReader.java create mode 100644 src/java/org/apache/cassandra/io/util/FileWriter.java create mode 100644 src/java/org/apache/cassandra/io/util/PathUtils.java delete mode 100644 src/java/org/apache/cassandra/io/util/RewindableDataInputStreamPlus.java mode change 100755 => 100644 src/java/org/apache/cassandra/tools/SSTableMetadataViewer.java mode change 100755 => 100644 src/java/org/apache/cassandra/utils/streamhist/HistogramDataConsumer.java mode change 100755 => 100644 src/java/org/apache/cassandra/utils/streamhist/StreamingTombstoneHistogramBuilder.java mode change 100755 => 100644 src/java/org/apache/cassandra/utils/streamhist/TombstoneHistogram.java mode change 100755 => 100644 test/microbench/org/apache/cassandra/test/microbench/StreamingTombstoneHistogramBuilderBench.java create mode 100644 test/unit/org/apache/cassandra/io/util/FileTest.java delete mode 100644 test/unit/org/apache/cassandra/io/util/RewindableDataInputStreamPlusTest.java mode change 100755 => 100644 test/unit/org/apache/cassandra/utils/streamhist/StreamingTombstoneHistogramBuilderTest.java diff --git a/checkstyle.xml b/checkstyle.xml index f81354eb3ecc..60b2d70d6688 100644 --- a/checkstyle.xml +++ b/checkstyle.xml @@ -44,10 +44,10 @@ - + - + diff --git a/src/java/org/apache/cassandra/audit/AuditLogEntryCategory.java b/src/java/org/apache/cassandra/audit/AuditLogEntryCategory.java index 616658c1b846..9db4ce05e9c7 100644 --- a/src/java/org/apache/cassandra/audit/AuditLogEntryCategory.java +++ b/src/java/org/apache/cassandra/audit/AuditLogEntryCategory.java @@ -24,4 +24,4 @@ public enum AuditLogEntryCategory { QUERY, DML, DDL, DCL, OTHER, AUTH, ERROR, PREPARE -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/cache/AutoSavingCache.java b/src/java/org/apache/cassandra/cache/AutoSavingCache.java index 61cbd089554e..c11e463ec72e 100644 --- a/src/java/org/apache/cassandra/cache/AutoSavingCache.java +++ b/src/java/org/apache/cassandra/cache/AutoSavingCache.java @@ -18,15 +18,16 @@ package org.apache.cassandra.cache; import java.io.BufferedInputStream; -import java.io.File; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; +import java.nio.file.NoSuchFileException; import java.util.*; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; +import org.apache.cassandra.io.util.File; import org.cliffc.high_scale_lib.NonBlockingHashSet; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -59,7 +60,7 @@ public class AutoSavingCache extends InstrumentingCache tempCacheFiles() { File dataPath = getCacheDataPath(CURRENT_VERSION); File crcPath = getCacheCrcPath(CURRENT_VERSION); - return Pair.create(FileUtils.createTempFile(dataPath.getName(), null, dataPath.getParentFile()), - FileUtils.createTempFile(crcPath.getName(), null, crcPath.getParentFile())); + return Pair.create(FileUtils.createTempFile(dataPath.name(), null, dataPath.parent()), + FileUtils.createTempFile(crcPath.name(), null, crcPath.parent())); } private void deleteOldCacheFiles() { File savedCachesDir = new File(DatabaseDescriptor.getSavedCachesLocation()); assert savedCachesDir.exists() && savedCachesDir.isDirectory(); - File[] files = savedCachesDir.listFiles(); + File[] files = savedCachesDir.tryList(); if (files != null) { String cacheNameFormat = String.format("%s-%s.db", cacheType.toString(), CURRENT_VERSION); @@ -414,11 +415,11 @@ private void deleteOldCacheFiles() if (!file.isFile()) continue; // someone's been messing with our directory. naughty! - if (file.getName().endsWith(cacheNameFormat) - || file.getName().endsWith(cacheType.toString())) + if (file.name().endsWith(cacheNameFormat) + || file.name().endsWith(cacheType.toString())) { - if (!file.delete()) - logger.warn("Failed to delete {}", file.getAbsolutePath()); + if (!file.tryDelete()) + logger.warn("Failed to delete {}", file.absolutePath()); } } } diff --git a/src/java/org/apache/cassandra/config/CassandraRelevantProperties.java b/src/java/org/apache/cassandra/config/CassandraRelevantProperties.java index dc1671dd2020..3eb5df1c078f 100644 --- a/src/java/org/apache/cassandra/config/CassandraRelevantProperties.java +++ b/src/java/org/apache/cassandra/config/CassandraRelevantProperties.java @@ -170,6 +170,8 @@ public enum CassandraRelevantProperties */ LOG_DIR_AUDIT("cassandra.logdir.audit"), + CONSISTENT_DIRECTORY_LISTINGS("cassandra.consistent_directory_listings", "false"), + //cassandra properties (without the "cassandra." prefix) /** diff --git a/src/java/org/apache/cassandra/config/DatabaseDescriptor.java b/src/java/org/apache/cassandra/config/DatabaseDescriptor.java index 7a0eeb004512..fba13c7abd87 100644 --- a/src/java/org/apache/cassandra/config/DatabaseDescriptor.java +++ b/src/java/org/apache/cassandra/config/DatabaseDescriptor.java @@ -17,13 +17,9 @@ */ package org.apache.cassandra.config; -import java.io.File; import java.io.IOException; import java.net.*; import java.nio.file.FileStore; -import java.nio.file.NoSuchFileException; -import java.nio.file.Path; -import java.nio.file.Paths; import java.util.*; import java.util.concurrent.TimeUnit; import java.util.function.Function; @@ -36,6 +32,7 @@ import com.google.common.primitives.Longs; import com.google.common.util.concurrent.RateLimiter; +import org.apache.cassandra.io.util.File; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -59,6 +56,7 @@ import org.apache.cassandra.io.FSWriteError; import org.apache.cassandra.io.util.DiskOptimizationStrategy; import org.apache.cassandra.io.util.FileUtils; +import org.apache.cassandra.io.util.PathUtils; import org.apache.cassandra.io.util.SpinningDiskOptimizationStrategy; import org.apache.cassandra.io.util.SsdDiskOptimizationStrategy; import org.apache.cassandra.locator.DynamicEndpointSnitch; @@ -549,23 +547,13 @@ else if (conf.repair_session_space_in_mb > (int) (Runtime.getRuntime().maxMemory if (conf.commitlog_total_space_in_mb == null) { final int preferredSizeInMB = 8192; - try - { - // use 1/4 of available space. See discussion on #10013 and #10199 - final long totalSpaceInBytes = guessFileStore(conf.commitlog_directory).getTotalSpace(); - conf.commitlog_total_space_in_mb = calculateDefaultSpaceInMB("commitlog", - conf.commitlog_directory, - "commitlog_total_space_in_mb", - preferredSizeInMB, - totalSpaceInBytes, 1, 4); - - } - catch (IOException e) - { - logger.debug("Error checking disk space", e); - throw new ConfigurationException(String.format("Unable to check disk space available to '%s'. Perhaps the Cassandra user does not have the necessary permissions", - conf.commitlog_directory), e); - } + // use 1/4 of available space. See discussion on #10013 and #10199 + final long totalSpaceInBytes = tryGetSpace(conf.commitlog_directory, FileStore::getTotalSpace); + conf.commitlog_total_space_in_mb = calculateDefaultSpaceInMB("commitlog", + conf.commitlog_directory, + "commitlog_total_space_in_mb", + preferredSizeInMB, + totalSpaceInBytes, 1, 4); } if (conf.cdc_enabled) @@ -582,22 +570,13 @@ else if (conf.repair_session_space_in_mb > (int) (Runtime.getRuntime().maxMemory if (conf.cdc_total_space_in_mb == 0) { final int preferredSizeInMB = 4096; - try - { - // use 1/8th of available space. See discussion on #10013 and #10199 on the CL, taking half that for CDC - final long totalSpaceInBytes = guessFileStore(conf.cdc_raw_directory).getTotalSpace(); - conf.cdc_total_space_in_mb = calculateDefaultSpaceInMB("cdc", - conf.cdc_raw_directory, - "cdc_total_space_in_mb", - preferredSizeInMB, - totalSpaceInBytes, 1, 8); - } - catch (IOException e) - { - logger.debug("Error checking disk space", e); - throw new ConfigurationException(String.format("Unable to check disk space available to '%s'. Perhaps the Cassandra user does not have the necessary permissions", - conf.cdc_raw_directory), e); - } + // use 1/8th of available space. See discussion on #10013 and #10199 on the CL, taking half that for CDC + final long totalSpaceInBytes = tryGetSpace(conf.cdc_raw_directory, FileStore::getTotalSpace); + conf.cdc_total_space_in_mb = calculateDefaultSpaceInMB("cdc", + conf.cdc_raw_directory, + "cdc_total_space_in_mb", + preferredSizeInMB, + totalSpaceInBytes, 1, 8); } logger.info("cdc_enabled is true. Starting casssandra node with Change-Data-Capture enabled."); @@ -609,7 +588,7 @@ else if (conf.repair_session_space_in_mb > (int) (Runtime.getRuntime().maxMemory } if (conf.data_file_directories == null || conf.data_file_directories.length == 0) { - conf.data_file_directories = new String[]{ storagedir("data_file_directories") + File.separator + "data" }; + conf.data_file_directories = new String[]{ storagedir("data_file_directories") + File.pathSeparator() + "data" }; } long dataFreeBytes = 0; @@ -627,7 +606,7 @@ else if (conf.repair_session_space_in_mb > (int) (Runtime.getRuntime().maxMemory if (datadir.equals(conf.saved_caches_directory)) throw new ConfigurationException("saved_caches_directory must not be the same as any data_file_directories", false); - dataFreeBytes = saturatedSum(dataFreeBytes, getUnallocatedSpace(datadir)); + dataFreeBytes = saturatedSum(dataFreeBytes, tryGetSpace(datadir, FileStore::getUnallocatedSpace)); } if (dataFreeBytes < 64 * ONE_GB) // 64 GB logger.warn("Only {} free across all data volumes. Consider adding more capacity to your cluster or removing obsolete snapshots", @@ -642,7 +621,7 @@ else if (conf.repair_session_space_in_mb > (int) (Runtime.getRuntime().maxMemory if (conf.local_system_data_file_directory.equals(conf.hints_directory)) throw new ConfigurationException("local_system_data_file_directory must not be the same as the hints_directory", false); - long freeBytes = getUnallocatedSpace(conf.local_system_data_file_directory); + long freeBytes = tryGetSpace(conf.local_system_data_file_directory, FileStore::getUnallocatedSpace); if (freeBytes < ONE_GB) logger.warn("Only {} free in the system data volume. Consider adding more capacity or removing obsolete snapshots", @@ -871,7 +850,7 @@ static void applyRepairCommandPoolSize(Config config) private static String storagedirFor(String type) { - return storagedir(type + "_directory") + File.separator + type; + return storagedir(type + "_directory") + File.pathSeparator() + type; } private static String storagedir(String errMsgType) @@ -1191,45 +1170,9 @@ private static long saturatedSum(long left, long right) return sum < 0 ? Long.MAX_VALUE : sum; } - private static FileStore guessFileStore(String dir) throws IOException + private static long tryGetSpace(String dir, PathUtils.IOToLongFunction getSpace) { - Path path = Paths.get(dir); - while (true) - { - try - { - return FileUtils.getFileStore(path); - } - catch (IOException e) - { - if (e instanceof NoSuchFileException) - { - path = path.getParent(); - if (path == null) - { - throw new ConfigurationException("Unable to find filesystem for '" + dir + "'."); - } - } - else - { - throw e; - } - } - } - } - - private static long getUnallocatedSpace(String directory) - { - try - { - return guessFileStore(directory).getUnallocatedSpace(); - } - catch (IOException e) - { - logger.debug("Error checking disk space", e); - throw new ConfigurationException(String.format("Unable to check disk space available to %s. Perhaps the Cassandra user does not have the necessary permissions", - directory), e); - } + return PathUtils.tryGetSpace(new File(dir).toPath(), getSpace, e -> { throw new ConfigurationException("Unable check disk space in '" + dir + "'. Perhaps the Cassandra user does not have the necessary permissions"); }); } public static IEndpointSnitch createEndpointSnitch(boolean dynamic, String snitchClassName) throws ConfigurationException diff --git a/src/java/org/apache/cassandra/config/EncryptionOptions.java b/src/java/org/apache/cassandra/config/EncryptionOptions.java index 93668d9d04d6..99c9b52e73e2 100644 --- a/src/java/org/apache/cassandra/config/EncryptionOptions.java +++ b/src/java/org/apache/cassandra/config/EncryptionOptions.java @@ -17,13 +17,13 @@ */ package org.apache.cassandra.config; -import java.io.File; import java.util.List; import java.util.Objects; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableList; +import org.apache.cassandra.io.util.File; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/src/java/org/apache/cassandra/config/YamlConfigurationLoader.java b/src/java/org/apache/cassandra/config/YamlConfigurationLoader.java index a774a8154e9d..b6969f048db5 100644 --- a/src/java/org/apache/cassandra/config/YamlConfigurationLoader.java +++ b/src/java/org/apache/cassandra/config/YamlConfigurationLoader.java @@ -18,7 +18,6 @@ package org.apache.cassandra.config; import java.io.ByteArrayInputStream; -import java.io.File; import java.io.IOException; import java.io.InputStream; import java.lang.annotation.Annotation; @@ -35,6 +34,7 @@ import com.google.common.collect.Sets; import com.google.common.io.ByteStreams; +import org.apache.cassandra.io.util.File; import org.apache.commons.lang3.SystemUtils; import org.slf4j.Logger; @@ -79,14 +79,14 @@ private static URL getStorageConfigURL() throws ConfigurationException url = loader.getResource(configUrl); if (url == null) { - String required = "file:" + File.separator + File.separator; + String required = "file:" + File.pathSeparator() + File.pathSeparator(); if (!configUrl.startsWith(required)) throw new ConfigurationException(String.format( "Expecting URI in variable: [cassandra.config]. Found[%s]. Please prefix the file with [%s%s] for local " + "files and [%s%s] for remote files. If you are executing this from an external tool, it needs " + "to set Config.setClientMode(true) to avoid loading configuration.", - configUrl, required, File.separator, required, File.separator)); - throw new ConfigurationException("Cannot locate " + configUrl + ". If this is a local file, please confirm you've provided " + required + File.separator + " as a URI prefix."); + configUrl, required, File.pathSeparator(), required, File.pathSeparator())); + throw new ConfigurationException("Cannot locate " + configUrl + ". If this is a local file, please confirm you've provided " + required + File.pathSeparator() + " as a URI prefix."); } } diff --git a/src/java/org/apache/cassandra/cql3/conditions/ColumnCondition.java b/src/java/org/apache/cassandra/cql3/conditions/ColumnCondition.java index 93ed6ae941bd..d27fd3fd92b5 100644 --- a/src/java/org/apache/cassandra/cql3/conditions/ColumnCondition.java +++ b/src/java/org/apache/cassandra/cql3/conditions/ColumnCondition.java @@ -861,4 +861,4 @@ public String toString() return ToStringBuilder.reflectionToString(this, ToStringStyle.SHORT_PREFIX_STYLE); } } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/cql3/selection/ResultSetBuilder.java b/src/java/org/apache/cassandra/cql3/selection/ResultSetBuilder.java index 852872a77835..b88411ca7e8c 100644 --- a/src/java/org/apache/cassandra/cql3/selection/ResultSetBuilder.java +++ b/src/java/org/apache/cassandra/cql3/selection/ResultSetBuilder.java @@ -197,4 +197,4 @@ private List getOutputRow() addSize(row); return row; } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/cql3/statements/StatementType.java b/src/java/org/apache/cassandra/cql3/statements/StatementType.java index d3999319508a..f9c0d3d27935 100644 --- a/src/java/org/apache/cassandra/cql3/statements/StatementType.java +++ b/src/java/org/apache/cassandra/cql3/statements/StatementType.java @@ -135,4 +135,4 @@ public boolean allowUseOfSecondaryIndices() { return false; } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/db/ColumnFamilyStore.java b/src/java/org/apache/cassandra/db/ColumnFamilyStore.java index abb9c8b72a32..fc4264dee760 100644 --- a/src/java/org/apache/cassandra/db/ColumnFamilyStore.java +++ b/src/java/org/apache/cassandra/db/ColumnFamilyStore.java @@ -17,7 +17,6 @@ */ package org.apache.cassandra.db; -import java.io.File; import java.io.IOException; import java.io.PrintStream; import java.lang.reflect.Constructor; @@ -47,6 +46,8 @@ import org.apache.cassandra.utils.concurrent.AsyncPromise; import org.apache.cassandra.utils.concurrent.CountDownLatch; +import org.apache.cassandra.io.util.File; +import org.apache.cassandra.io.util.FileOutputStreamPlus; import org.apache.cassandra.utils.concurrent.Future; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -516,7 +517,7 @@ public List getDataPaths() throws IOException List dataPaths = new ArrayList<>(); for (File dataPath : directories.getCFDirectories()) { - dataPaths.add(dataPath.getCanonicalPath()); + dataPaths.add(dataPath.canonicalPath()); } return dataPaths; @@ -671,7 +672,7 @@ public static void scrubDataDirectories(TableMetadata metadata) throws StartupE for (File tmpFile : desc.getTemporaryFiles()) { logger.info("Removing unfinished temporary file {}", tmpFile); - tmpFile.delete(); + tmpFile.tryDelete(); } } @@ -697,10 +698,10 @@ public static void scrubDataDirectories(TableMetadata metadata) throws StartupE if (dir.exists()) { assert dir.isDirectory(); - for (File file : Objects.requireNonNull(dir.listFiles())) - if (tmpCacheFilePattern.matcher(file.getName()).matches()) - if (!file.delete()) - logger.warn("could not delete {}", file.getAbsolutePath()); + for (File file : dir.tryList()) + if (tmpCacheFilePattern.matcher(file.name()).matches()) + if (!file.tryDelete()) + logger.warn("could not delete {}", file.absolutePath()); } // also clean out any index leftovers. @@ -1882,7 +1883,7 @@ public TableSnapshot snapshotWithoutFlush(String snapshotName, Predicate sstables, Instant creationTime) { Set snapshotDirs = sstables.stream() - .map(s -> Directories.getSnapshotDirectory(s.descriptor, tag).getAbsoluteFile()) + .map(s -> Directories.getSnapshotDirectory(s.descriptor, tag).toAbsolute()) .filter(dir -> !Directories.isSecondaryIndexFolder(dir)) // Remove secondary index subdirectory .collect(Collectors.toCollection(HashSet::new)); @@ -1904,14 +1905,14 @@ protected TableSnapshot createSnapshot(String tag, boolean ephemeral, Duration t SnapshotManifest manifest = new SnapshotManifest(mapToDataFilenames(sstables), ttl, creationTime); File manifestFile = getDirectories().getSnapshotManifestFile(tag); writeSnapshotManifest(manifest, manifestFile); - snapshotDirs.add(manifestFile.getParentFile().getAbsoluteFile()); // manifest may create empty snapshot dir + snapshotDirs.add(manifestFile.parent().toAbsolute()); // manifest may create empty snapshot dir // Write snapshot schema if (!SchemaConstants.isLocalSystemKeyspace(metadata.keyspace) && !SchemaConstants.isReplicatedSystemKeyspace(metadata.keyspace)) { File schemaFile = getDirectories().getSnapshotSchemaFile(tag); writeSnapshotSchema(schemaFile); - snapshotDirs.add(schemaFile.getParentFile().getAbsoluteFile()); // schema may create empty snapshot dir + snapshotDirs.add(schemaFile.parent().toAbsolute()); // schema may create empty snapshot dir } // Maybe create ephemeral marker @@ -1919,7 +1920,7 @@ protected TableSnapshot createSnapshot(String tag, boolean ephemeral, Duration t { File ephemeralSnapshotMarker = getDirectories().getNewEphemeralSnapshotMarkerFile(tag); createEphemeralSnapshotMarkerFile(tag, ephemeralSnapshotMarker); - snapshotDirs.add(ephemeralSnapshotMarker.getParentFile().getAbsoluteFile()); // marker may create empty snapshot dir + snapshotDirs.add(ephemeralSnapshotMarker.parent().toAbsolute()); // marker may create empty snapshot dir } TableSnapshot snapshot = new TableSnapshot(metadata.keyspace, metadata.name, tag, manifest.createdAt, @@ -1933,9 +1934,7 @@ private SnapshotManifest writeSnapshotManifest(SnapshotManifest manifest, File m { try { - if (!manifestFile.getParentFile().exists()) - manifestFile.getParentFile().mkdirs(); - + manifestFile.parent().tryCreateDirectories(); manifest.serializeToJsonFile(manifestFile); return manifest; } @@ -1954,10 +1953,10 @@ private void writeSnapshotSchema(File schemaFile) { try { - if (!schemaFile.getParentFile().exists()) - schemaFile.getParentFile().mkdirs(); + if (!schemaFile.parent().exists()) + schemaFile.parent().tryCreateDirectories(); - try (PrintStream out = new PrintStream(schemaFile)) + try (PrintStream out = new PrintStream(new FileOutputStreamPlus(schemaFile))) { SchemaCQLHelper.reCreateStatementsForSchemaCql(metadata(), keyspace.getMetadata().types) @@ -1974,19 +1973,19 @@ private void createEphemeralSnapshotMarkerFile(final String snapshot, File ephem { try { - if (!ephemeralSnapshotMarker.getParentFile().exists()) - ephemeralSnapshotMarker.getParentFile().mkdirs(); + if (!ephemeralSnapshotMarker.parent().exists()) + ephemeralSnapshotMarker.parent().tryCreateDirectories(); Files.createFile(ephemeralSnapshotMarker.toPath()); if (logger.isTraceEnabled()) - logger.trace("Created ephemeral snapshot marker file on {}.", ephemeralSnapshotMarker.getAbsolutePath()); + logger.trace("Created ephemeral snapshot marker file on {}.", ephemeralSnapshotMarker.absolutePath()); } catch (IOException e) { logger.warn(String.format("Could not create marker file %s for ephemeral snapshot %s. " + "In case there is a failure in the operation that created " + "this snapshot, you may need to clean it manually afterwards.", - ephemeralSnapshotMarker.getAbsolutePath(), snapshot), e); + ephemeralSnapshotMarker.absolutePath(), snapshot), e); } } diff --git a/src/java/org/apache/cassandra/db/Directories.java b/src/java/org/apache/cassandra/db/Directories.java index 311cfcb035c2..8600f13077b1 100644 --- a/src/java/org/apache/cassandra/db/Directories.java +++ b/src/java/org/apache/cassandra/db/Directories.java @@ -17,8 +17,6 @@ */ package org.apache.cassandra.db; -import java.io.*; -import java.nio.file.*; import java.time.Instant; import java.util.*; import java.util.concurrent.ThreadLocalRandom; @@ -26,11 +24,23 @@ import java.util.stream.Collectors; import com.google.common.annotations.VisibleForTesting; + +import java.io.IOError; +import java.io.IOException; +import java.nio.file.FileStore; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.*; +import java.util.concurrent.ThreadLocalRandom; +import java.util.function.BiPredicate; + import com.google.common.collect.ImmutableMap; import com.google.common.collect.Iterables; import com.google.common.collect.Maps; import com.google.common.util.concurrent.RateLimiter; +import org.apache.cassandra.io.util.File; import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -44,6 +54,7 @@ import org.apache.cassandra.io.util.FileUtils; import org.apache.cassandra.io.sstable.*; import org.apache.cassandra.schema.SchemaConstants; +import org.apache.cassandra.io.util.PathUtils; import org.apache.cassandra.schema.TableMetadata; import org.apache.cassandra.service.snapshot.SnapshotManifest; import org.apache.cassandra.service.snapshot.TableSnapshot; @@ -149,25 +160,25 @@ public static boolean hasPrivilege(File file, FileAction action) switch (action) { case X: - privilege = file.canExecute(); + privilege = file.isExecutable(); break; case W: - privilege = file.canWrite(); + privilege = file.isWritable(); break; case XW: - privilege = file.canExecute() && file.canWrite(); + privilege = file.isExecutable() && file.isWritable(); break; case R: - privilege = file.canRead(); + privilege = file.isReadable(); break; case XR: - privilege = file.canExecute() && file.canRead(); + privilege = file.isExecutable() && file.isReadable(); break; case RW: - privilege = file.canRead() && file.canWrite(); + privilege = file.isReadable() && file.isWritable(); break; case XRW: - privilege = file.canExecute() && file.canRead() && file.canWrite(); + privilege = file.isExecutable() && file.isReadable() && file.isWritable(); break; } return privilege; @@ -213,7 +224,7 @@ public Directories(final TableMetadata metadata, DataDirectory[] paths) // check if old SSTable directory exists File dataPath = new File(paths[i].location, oldSSTableRelativePath); dataPaths[i] = dataPath; - canonicalPathsBuilder.put(Paths.get(FileUtils.getCanonicalPath(dataPath)), paths[i]); + canonicalPathsBuilder.put(dataPath.toCanonical().toPath(), paths[i]); } boolean olderDirectoryExists = Iterables.any(Arrays.asList(dataPaths), File::exists); if (!olderDirectoryExists) @@ -225,7 +236,7 @@ public Directories(final TableMetadata metadata, DataDirectory[] paths) { File dataPath = new File(paths[i].location, newSSTableRelativePath); dataPaths[i] = dataPath; - canonicalPathsBuilder.put(Paths.get(FileUtils.getCanonicalPath(dataPath)), paths[i]); + canonicalPathsBuilder.put(dataPath.toCanonical().toPath(), paths[i]); } } // if index, then move to its own directory @@ -236,7 +247,7 @@ public Directories(final TableMetadata metadata, DataDirectory[] paths) { File dataPath = new File(dataPaths[i], indexNameWithDot); dataPaths[i] = dataPath; - canonicalPathsBuilder.put(Paths.get(FileUtils.getCanonicalPath(dataPath)), paths[i]); + canonicalPathsBuilder.put(dataPath.toCanonical().toPath(), paths[i]); } } @@ -259,22 +270,16 @@ public Directories(final TableMetadata metadata, DataDirectory[] paths) { for (File dataPath : dataPaths) { - File[] indexFiles = dataPath.getParentFile().listFiles(new FileFilter() - { - @Override - public boolean accept(File file) - { - if (file.isDirectory()) - return false; - - Descriptor desc = SSTable.tryDescriptorFromFilename(file); - return desc != null && desc.ksname.equals(metadata.keyspace) && desc.cfname.equals(metadata.name); + File[] indexFiles = dataPath.parent().tryList(file -> { + if (file.isDirectory()) + return false; - } + Descriptor desc = SSTable.tryDescriptorFromFilename(file); + return desc != null && desc.ksname.equals(metadata.keyspace) && desc.cfname.equals(metadata.name); }); for (File indexFile : indexFiles) { - File destFile = new File(dataPath, indexFile.getName()); + File destFile = new File(dataPath, indexFile.name()); logger.trace("Moving index file {} to {}", indexFile, destFile); FileUtils.renameWithConfirm(indexFile, destFile); } @@ -295,8 +300,8 @@ public File getLocationForDisk(DataDirectory dataDirectory) for (File dir : dataPaths) { // Note that we must compare absolute paths (not canonical) here since keyspace directories might be symlinks - Path dirPath = Paths.get(dir.getAbsolutePath()); - Path locationPath = Paths.get(dataDirectory.location.getAbsolutePath()); + Path dirPath = dir.toAbsolute().toPath(); + Path locationPath = dataDirectory.location.toAbsolute().toPath(); if (dirPath.startsWith(locationPath)) return dir; } @@ -539,7 +544,7 @@ public static File getSnapshotDirectory(File location, String snapshotName) { if (isSecondaryIndexFolder(location)) { - return getOrCreate(location.getParentFile(), SNAPSHOT_SUBDIR, snapshotName, location.getName()); + return getOrCreate(location.parent(), SNAPSHOT_SUBDIR, snapshotName, location.name()); } else { @@ -584,7 +589,7 @@ public static File getBackupsDirectory(File location) { if (isSecondaryIndexFolder(location)) { - return getOrCreate(location.getParentFile(), BACKUPS_SUBDIR, location.getName()); + return getOrCreate(location.parent(), BACKUPS_SUBDIR, location.name()); } else { @@ -626,9 +631,14 @@ public DataDirectory(File location) this.location = location; } + public DataDirectory(Path location) + { + this.location = new File(location); + } + public long getAvailableSpace() { - long availableSpace = FileUtils.getUsableSpace(location) - DatabaseDescriptor.getMinFreeSpacePerDriveInBytes(); + long availableSpace = PathUtils.tryGetSpace(location.toPath(), FileStore::getUsableSpace) - DatabaseDescriptor.getMinFreeSpacePerDriveInBytes(); return availableSpace > 0 ? availableSpace : 0; } @@ -950,6 +960,7 @@ private BiPredicate getFilter() public Map listSnapshots() { +//<<<<<<< HEAD Map> snapshotDirsByTag = listSnapshotDirsByTag(); Map snapshots = Maps.newHashMapWithExpectedSize(snapshotDirsByTag.size()); @@ -1006,7 +1017,7 @@ public List listEphemeralSnapshots() for (File snapshot : listAllSnapshots()) { if (getEphemeralSnapshotMarkerFile(snapshot).exists()) - ephemeralSnapshots.add(snapshot.getName()); + ephemeralSnapshots.add(snapshot.name()); } return ephemeralSnapshots; } @@ -1017,11 +1028,11 @@ private List listAllSnapshots() for (final File dir : dataPaths) { File snapshotDir = isSecondaryIndexFolder(dir) - ? new File(dir.getParent(), SNAPSHOT_SUBDIR) + ? new File(dir.parentPath(), SNAPSHOT_SUBDIR) : new File(dir, SNAPSHOT_SUBDIR); if (snapshotDir.exists() && snapshotDir.isDirectory()) { - final File[] snapshotDirs = snapshotDir.listFiles(); + final File[] snapshotDirs = snapshotDir.tryList(); if (snapshotDirs != null) { for (final File snapshot : snapshotDirs) @@ -1043,17 +1054,17 @@ protected Map> listSnapshotDirsByTag() for (final File dir : dataPaths) { File snapshotDir = isSecondaryIndexFolder(dir) - ? new File(dir.getParent(), SNAPSHOT_SUBDIR) + ? new File(dir.parentPath(), SNAPSHOT_SUBDIR) : new File(dir, SNAPSHOT_SUBDIR); if (snapshotDir.exists() && snapshotDir.isDirectory()) { - final File[] snapshotDirs = snapshotDir.listFiles(); + final File[] snapshotDirs = snapshotDir.tryList(); if (snapshotDirs != null) { for (final File snapshot : snapshotDirs) { if (snapshot.isDirectory()) { - snapshotDirsByTag.computeIfAbsent(snapshot.getName(), k -> new LinkedHashSet<>()).add(snapshot.getAbsoluteFile()); + snapshotDirsByTag.computeIfAbsent(snapshot.name(), k -> new LinkedHashSet<>()).add(snapshot.toAbsolute()); } } } @@ -1069,7 +1080,7 @@ public boolean snapshotExists(String snapshotName) File snapshotDir; if (isSecondaryIndexFolder(dir)) { - snapshotDir = new File(dir.getParentFile(), join(SNAPSHOT_SUBDIR, snapshotName, dir.getName())); + snapshotDir = new File(dir.parent(), join(SNAPSHOT_SUBDIR, snapshotName, dir.name())); } else { @@ -1120,7 +1131,7 @@ public long trueSnapshotsSize() for (File dir : dataPaths) { File snapshotDir = isSecondaryIndexFolder(dir) - ? new File(dir.getParent(), SNAPSHOT_SUBDIR) + ? new File(dir.parentPath(), SNAPSHOT_SUBDIR) : new File(dir, SNAPSHOT_SUBDIR); result += getTrueAllocatedSizeIn(snapshotDir); } @@ -1165,7 +1176,7 @@ public static List getKSChildDirectories(String ksName) for (DataDirectory dataDirectory : dataDirectories.getAllDirectories()) { File ksDir = new File(dataDirectory.location, ksName); - File[] cfDirs = ksDir.listFiles(); + File[] cfDirs = ksDir.tryList(); if (cfDirs == null) continue; for (File cfDir : cfDirs) @@ -1179,7 +1190,7 @@ public static List getKSChildDirectories(String ksName) public static boolean isSecondaryIndexFolder(File dir) { - return dir.getName().startsWith(SECONDARY_INDEX_NAME_SEPARATOR); + return dir.name().startsWith(SECONDARY_INDEX_NAME_SEPARATOR); } public List getCFDirectories() @@ -1201,7 +1212,7 @@ private static File getOrCreate(File base, String... subdirs) if (!dir.isDirectory()) throw new AssertionError(String.format("Invalid directory path %s: path exists but is not a directory", dir)); } - else if (!dir.mkdirs() && !(dir.exists() && dir.isDirectory())) + else if (!dir.tryCreateDirectories() && !(dir.exists() && dir.isDirectory())) { throw new FSWriteError(new IOException("Unable to create directory " + dir), dir); } @@ -1210,7 +1221,7 @@ else if (!dir.mkdirs() && !(dir.exists() && dir.isDirectory())) private static String join(String... s) { - return StringUtils.join(s, File.separator); + return StringUtils.join(s, File.pathSeparator()); } private class SSTableSizeSummer extends DirectorySizeCalculator @@ -1225,7 +1236,7 @@ private class SSTableSizeSummer extends DirectorySizeCalculator @Override public boolean isAcceptable(Path path) { - File file = path.toFile(); + File file = new File(path); Descriptor desc = SSTable.tryDescriptorFromFilename(file); return desc != null && desc.ksname.equals(metadata.keyspace) diff --git a/src/java/org/apache/cassandra/db/DisallowedDirectories.java b/src/java/org/apache/cassandra/db/DisallowedDirectories.java index f030253f70a8..32ada8b128aa 100644 --- a/src/java/org/apache/cassandra/db/DisallowedDirectories.java +++ b/src/java/org/apache/cassandra/db/DisallowedDirectories.java @@ -20,7 +20,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.File; import java.util.Collections; import java.util.Set; import java.util.concurrent.CopyOnWriteArraySet; @@ -28,6 +27,7 @@ import com.google.common.annotations.VisibleForTesting; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.utils.MBeanWrapper; public class DisallowedDirectories implements DisallowedDirectoriesMBean @@ -145,11 +145,11 @@ private static File getDirectory(File file) return file; if (file.isFile()) - return file.getParentFile(); + return file.parent(); // the file with path cannot be read - try determining the directory manually. - if (file.getPath().endsWith(".db")) - return file.getParentFile(); + if (file.path().endsWith(".db")) + return file.parent(); // We may not be able to determine if it's a file or a directory if // we were called because we couldn't create the file/directory. diff --git a/src/java/org/apache/cassandra/db/DisallowedDirectoriesMBean.java b/src/java/org/apache/cassandra/db/DisallowedDirectoriesMBean.java index 64f15e53efbb..7c6d530d5306 100644 --- a/src/java/org/apache/cassandra/db/DisallowedDirectoriesMBean.java +++ b/src/java/org/apache/cassandra/db/DisallowedDirectoriesMBean.java @@ -17,9 +17,9 @@ */ package org.apache.cassandra.db; -import java.io.File; import java.util.Set; +import org.apache.cassandra.io.util.File; public interface DisallowedDirectoriesMBean { public Set getUnreadableDirectories(); diff --git a/src/java/org/apache/cassandra/db/Keyspace.java b/src/java/org/apache/cassandra/db/Keyspace.java index 2a996904e064..795230e71b7f 100644 --- a/src/java/org/apache/cassandra/db/Keyspace.java +++ b/src/java/org/apache/cassandra/db/Keyspace.java @@ -17,7 +17,6 @@ */ package org.apache.cassandra.db; -import java.io.File; import java.io.IOException; import java.time.Instant; import java.util.ArrayList; @@ -37,9 +36,6 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Iterables; import com.google.common.util.concurrent.RateLimiter; - -import org.apache.cassandra.utils.concurrent.AsyncPromise; -import org.apache.cassandra.utils.concurrent.Future; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -55,6 +51,7 @@ import org.apache.cassandra.index.Index; import org.apache.cassandra.index.SecondaryIndexManager; import org.apache.cassandra.index.transactions.UpdateTransaction; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.io.sstable.format.SSTableReader; import org.apache.cassandra.locator.AbstractReplicationStrategy; import org.apache.cassandra.metrics.KeyspaceMetrics; @@ -72,6 +69,8 @@ import org.apache.cassandra.utils.ByteBufferUtil; import org.apache.cassandra.utils.FBUtilities; import org.apache.cassandra.utils.JVMStabilityInspector; +import org.apache.cassandra.utils.concurrent.AsyncPromise; +import org.apache.cassandra.utils.concurrent.Future; import org.apache.cassandra.utils.concurrent.OpOrder; import org.apache.cassandra.utils.concurrent.UncheckedInterruptedException; import org.apache.cassandra.utils.concurrent.Promise; diff --git a/src/java/org/apache/cassandra/db/ReadResponse.java b/src/java/org/apache/cassandra/db/ReadResponse.java index 52e6fd571163..568b1a1bc583 100644 --- a/src/java/org/apache/cassandra/db/ReadResponse.java +++ b/src/java/org/apache/cassandra/db/ReadResponse.java @@ -30,6 +30,7 @@ import org.apache.cassandra.io.util.DataInputPlus; import org.apache.cassandra.io.util.DataOutputBuffer; import org.apache.cassandra.io.util.DataOutputPlus; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.net.MessagingService; import org.apache.cassandra.schema.TableMetadata; import org.apache.cassandra.utils.ByteBufferUtil; diff --git a/src/java/org/apache/cassandra/db/SSTableImporter.java b/src/java/org/apache/cassandra/db/SSTableImporter.java index 989ff12297a7..594955910e20 100644 --- a/src/java/org/apache/cassandra/db/SSTableImporter.java +++ b/src/java/org/apache/cassandra/db/SSTableImporter.java @@ -18,7 +18,6 @@ package org.apache.cassandra.db; -import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; @@ -29,6 +28,7 @@ import com.google.common.annotations.VisibleForTesting; +import org.apache.cassandra.io.util.File; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/src/java/org/apache/cassandra/db/SystemKeyspace.java b/src/java/org/apache/cassandra/db/SystemKeyspace.java index 279c99ece236..a7cbe4043c96 100644 --- a/src/java/org/apache/cassandra/db/SystemKeyspace.java +++ b/src/java/org/apache/cassandra/db/SystemKeyspace.java @@ -17,7 +17,6 @@ */ package org.apache.cassandra.db; -import java.io.File; import java.io.IOError; import java.io.IOException; import java.net.InetAddress; @@ -37,6 +36,8 @@ import com.google.common.collect.SetMultimap; import com.google.common.collect.Sets; import com.google.common.io.ByteStreams; +import com.google.common.util.concurrent.ListenableFuture; +import org.apache.cassandra.io.util.File; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -1505,7 +1506,7 @@ private static String getPreviousVersionString() // from there, but it informs us that this isn't a completely new node. for (File dataDirectory : Directories.getKSChildDirectories(SchemaConstants.SYSTEM_KEYSPACE_NAME)) { - if (dataDirectory.getName().equals("Versions") && dataDirectory.listFiles().length > 0) + if (dataDirectory.name().equals("Versions") && dataDirectory.tryList().length > 0) { logger.trace("Found unreadable versions info in pre 1.2 system.Versions table"); return UNREADABLE_VERSION.toString(); diff --git a/src/java/org/apache/cassandra/db/VirtualTableSinglePartitionReadQuery.java b/src/java/org/apache/cassandra/db/VirtualTableSinglePartitionReadQuery.java index f96f6525d648..32346e85a2ea 100644 --- a/src/java/org/apache/cassandra/db/VirtualTableSinglePartitionReadQuery.java +++ b/src/java/org/apache/cassandra/db/VirtualTableSinglePartitionReadQuery.java @@ -187,4 +187,4 @@ public PartitionIterator execute(ConsistencyLevel consistency, ClientState clien .collect(Collectors.toList())); } } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/db/WindowsFailedSnapshotTracker.java b/src/java/org/apache/cassandra/db/WindowsFailedSnapshotTracker.java index 134fb1161172..2cb5eb1162d1 100644 --- a/src/java/org/apache/cassandra/db/WindowsFailedSnapshotTracker.java +++ b/src/java/org/apache/cassandra/db/WindowsFailedSnapshotTracker.java @@ -19,21 +19,25 @@ package org.apache.cassandra.db; import java.io.BufferedReader; -import java.io.File; -import java.io.FileReader; -import java.io.FileWriter; + import java.io.IOException; +import java.io.OutputStreamWriter; import java.io.PrintWriter; import java.nio.file.Files; import java.nio.file.Paths; import com.google.common.annotations.VisibleForTesting; +import org.apache.cassandra.io.util.File; +import org.apache.cassandra.io.util.FileOutputStreamPlus; +import org.apache.cassandra.io.util.FileReader; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.cassandra.config.DatabaseDescriptor; import org.apache.cassandra.io.util.FileUtils; +import static org.apache.cassandra.io.util.File.WriteMode.APPEND; + public class WindowsFailedSnapshotTracker { @@ -44,7 +48,7 @@ public class WindowsFailedSnapshotTracker // Need to handle null for unit tests public static final String TODELETEFILE = System.getenv("CASSANDRA_HOME") == null ? ".toDelete" - : System.getenv("CASSANDRA_HOME") + File.separator + ".toDelete"; + : System.getenv("CASSANDRA_HOME") + File.pathSeparator() + ".toDelete"; public static void deleteOldSnapshots() { @@ -81,7 +85,7 @@ public static void deleteOldSnapshots() } // Only delete the old .toDelete file if we succeed in deleting all our known bad snapshots. - Files.delete(Paths.get(TODELETEFILE)); + new File(TODELETEFILE).delete(); } catch (IOException e) { @@ -91,7 +95,7 @@ public static void deleteOldSnapshots() try { - _failedSnapshotFile = new PrintWriter(new FileWriter(TODELETEFILE, true)); + _failedSnapshotFile = new PrintWriter(new OutputStreamWriter(new FileOutputStreamPlus(TODELETEFILE, APPEND))); } catch (IOException e) { diff --git a/src/java/org/apache/cassandra/db/aggregation/GroupMaker.java b/src/java/org/apache/cassandra/db/aggregation/GroupMaker.java index 90cdab2d1960..fc62f211b329 100644 --- a/src/java/org/apache/cassandra/db/aggregation/GroupMaker.java +++ b/src/java/org/apache/cassandra/db/aggregation/GroupMaker.java @@ -137,4 +137,4 @@ else if (lastClustering != null && comparator.compare(lastClustering, clustering return isNew; } } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/db/commitlog/AbstractCommitLogSegmentManager.java b/src/java/org/apache/cassandra/db/commitlog/AbstractCommitLogSegmentManager.java old mode 100755 new mode 100644 index a3753d2cd74a..c9b14f403a4a --- a/src/java/org/apache/cassandra/db/commitlog/AbstractCommitLogSegmentManager.java +++ b/src/java/org/apache/cassandra/db/commitlog/AbstractCommitLogSegmentManager.java @@ -17,7 +17,6 @@ */ package org.apache.cassandra.db.commitlog; -import java.io.File; import java.io.IOException; import java.util.*; import java.util.concurrent.ConcurrentLinkedQueue; @@ -39,6 +38,7 @@ import org.apache.cassandra.db.Keyspace; import org.apache.cassandra.db.Mutation; import org.apache.cassandra.io.compress.BufferType; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.io.util.FileUtils; import org.apache.cassandra.io.util.SimpleCachedBufferPool; import org.apache.cassandra.schema.Schema; diff --git a/src/java/org/apache/cassandra/db/commitlog/CommitLog.java b/src/java/org/apache/cassandra/db/commitlog/CommitLog.java index 5245c0a3ff9a..5f386d1addcc 100644 --- a/src/java/org/apache/cassandra/db/commitlog/CommitLog.java +++ b/src/java/org/apache/cassandra/db/commitlog/CommitLog.java @@ -17,13 +17,16 @@ */ package org.apache.cassandra.db.commitlog; -import java.io.*; + +import java.io.IOException; import java.nio.ByteBuffer; import java.util.*; +import java.util.function.BiPredicate; import java.util.function.Function; import java.util.zip.CRC32; import com.google.common.annotations.VisibleForTesting; +import org.apache.cassandra.io.util.File; import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -147,22 +150,22 @@ synchronized public CommitLog start() */ public int recoverSegmentsOnDisk() throws IOException { - FilenameFilter unmanagedFilesFilter = (dir, name) -> CommitLogDescriptor.isValid(name) && CommitLogSegment.shouldReplay(name); + BiPredicate unmanagedFilesFilter = (dir, name) -> CommitLogDescriptor.isValid(name) && CommitLogSegment.shouldReplay(name); // submit all files for this segment manager for archiving prior to recovery - CASSANDRA-6904 // The files may have already been archived by normal CommitLog operation. This may cause errors in this // archiving pass, which we should not treat as serious. - for (File file : new File(segmentManager.storageDirectory).listFiles(unmanagedFilesFilter)) + for (File file : new File(segmentManager.storageDirectory).tryList(unmanagedFilesFilter)) { - archiver.maybeArchive(file.getPath(), file.getName()); - archiver.maybeWaitForArchiving(file.getName()); + archiver.maybeArchive(file.path(), file.name()); + archiver.maybeWaitForArchiving(file.name()); } assert archiver.archivePending.isEmpty() : "Not all commit log archive tasks were completed before restore"; archiver.maybeRestoreArchive(); // List the files again as archiver may have added segments. - File[] files = new File(segmentManager.storageDirectory).listFiles(unmanagedFilesFilter); + File[] files = new File(segmentManager.storageDirectory).tryList(unmanagedFilesFilter); int replayed = 0; if (files.length == 0) { @@ -475,7 +478,7 @@ synchronized public void stopUnsafe(boolean deleteSegments) segmentManager.stopUnsafe(deleteSegments); CommitLogSegment.resetReplayLimit(); if (DatabaseDescriptor.isCDCEnabled() && deleteSegments) - for (File f : new File(DatabaseDescriptor.getCDCLogLocation()).listFiles()) + for (File f : new File(DatabaseDescriptor.getCDCLogLocation()).tryList()) FileUtils.deleteWithConfirm(f); } diff --git a/src/java/org/apache/cassandra/db/commitlog/CommitLogArchiver.java b/src/java/org/apache/cassandra/db/commitlog/CommitLogArchiver.java index f94b2699cb3f..5ee79ff4d420 100644 --- a/src/java/org/apache/cassandra/db/commitlog/CommitLogArchiver.java +++ b/src/java/org/apache/cassandra/db/commitlog/CommitLogArchiver.java @@ -20,7 +20,6 @@ */ package org.apache.cassandra.db.commitlog; -import java.io.File; import java.io.IOException; import java.io.InputStream; import java.text.ParseException; @@ -34,6 +33,7 @@ import org.apache.cassandra.config.DatabaseDescriptor; import org.apache.cassandra.exceptions.ConfigurationException; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.schema.CompressionParams; import org.apache.cassandra.utils.FBUtilities; import org.apache.cassandra.utils.WrappedRunnable; @@ -111,7 +111,7 @@ public static CommitLogArchiver construct() File directory = new File(dir); if (!directory.exists()) { - if (!directory.mkdir()) + if (!directory.tryCreateDirectory()) { throw new RuntimeException("Unable to create directory: " + dir); } @@ -225,7 +225,7 @@ public void maybeRestoreArchive() for (String dir : restoreDirectories.split(DELIMITER)) { - File[] files = new File(dir).listFiles(); + File[] files = new File(dir).tryList(); if (files == null) { throw new RuntimeException("Unable to list directory " + dir); @@ -233,14 +233,14 @@ public void maybeRestoreArchive() for (File fromFile : files) { CommitLogDescriptor fromHeader = CommitLogDescriptor.fromHeader(fromFile, DatabaseDescriptor.getEncryptionContext()); - CommitLogDescriptor fromName = CommitLogDescriptor.isValid(fromFile.getName()) ? CommitLogDescriptor.fromFileName(fromFile.getName()) : null; + CommitLogDescriptor fromName = CommitLogDescriptor.isValid(fromFile.name()) ? CommitLogDescriptor.fromFileName(fromFile.name()) : null; CommitLogDescriptor descriptor; if (fromHeader == null && fromName == null) - throw new IllegalStateException("Cannot safely construct descriptor for segment, either from its name or its header: " + fromFile.getPath()); + throw new IllegalStateException("Cannot safely construct descriptor for segment, either from its name or its header: " + fromFile.path()); else if (fromHeader != null && fromName != null && !fromHeader.equalsIgnoringCompression(fromName)) - throw new IllegalStateException(String.format("Cannot safely construct descriptor for segment, as name and header descriptors do not match (%s vs %s): %s", fromHeader, fromName, fromFile.getPath())); + throw new IllegalStateException(String.format("Cannot safely construct descriptor for segment, as name and header descriptors do not match (%s vs %s): %s", fromHeader, fromName, fromFile.path())); else if (fromName != null && fromHeader == null) - throw new IllegalStateException("Cannot safely construct descriptor for segment, as name descriptor implies a version that should contain a header descriptor, but that descriptor could not be read: " + fromFile.getPath()); + throw new IllegalStateException("Cannot safely construct descriptor for segment, as name descriptor implies a version that should contain a header descriptor, but that descriptor could not be read: " + fromFile.path()); else if (fromHeader != null) descriptor = fromHeader; else descriptor = fromName; @@ -264,12 +264,12 @@ else if (fromHeader != null) if (toFile.exists()) { logger.trace("Skipping restore of archive {} as the segment already exists in the restore location {}", - fromFile.getPath(), toFile.getPath()); + fromFile.path(), toFile.path()); continue; } - String command = FROM.matcher(restoreCommand).replaceAll(Matcher.quoteReplacement(fromFile.getPath())); - command = TO.matcher(command).replaceAll(Matcher.quoteReplacement(toFile.getPath())); + String command = FROM.matcher(restoreCommand).replaceAll(Matcher.quoteReplacement(fromFile.path())); + command = TO.matcher(command).replaceAll(Matcher.quoteReplacement(toFile.path())); try { exec(command); diff --git a/src/java/org/apache/cassandra/db/commitlog/CommitLogDescriptor.java b/src/java/org/apache/cassandra/db/commitlog/CommitLogDescriptor.java index 700f12a242ca..9e95658f3e0d 100644 --- a/src/java/org/apache/cassandra/db/commitlog/CommitLogDescriptor.java +++ b/src/java/org/apache/cassandra/db/commitlog/CommitLogDescriptor.java @@ -22,9 +22,7 @@ import java.io.DataInput; import java.io.EOFException; -import java.io.File; import java.io.IOException; -import java.io.RandomAccessFile; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; import java.util.Collections; @@ -40,6 +38,8 @@ import org.apache.cassandra.config.ParameterizedClass; import org.apache.cassandra.exceptions.ConfigurationException; import org.apache.cassandra.io.FSReadError; +import org.apache.cassandra.io.util.File; +import org.apache.cassandra.io.util.FileInputStreamPlus; import org.apache.cassandra.net.MessagingService; import org.apache.cassandra.security.EncryptionContext; import org.json.simple.JSONValue; @@ -131,9 +131,8 @@ static String constructParametersString(ParameterizedClass compression, Encrypti public static CommitLogDescriptor fromHeader(File file, EncryptionContext encryptionContext) { - try (RandomAccessFile raf = new RandomAccessFile(file, "r")) + try (FileInputStreamPlus raf = new FileInputStreamPlus(file)) { - assert raf.getFilePointer() == 0; return readHeader(raf, encryptionContext); } catch (EOFException e) diff --git a/src/java/org/apache/cassandra/db/commitlog/CommitLogReader.java b/src/java/org/apache/cassandra/db/commitlog/CommitLogReader.java index 51235801942b..451ee37595d0 100644 --- a/src/java/org/apache/cassandra/db/commitlog/CommitLogReader.java +++ b/src/java/org/apache/cassandra/db/commitlog/CommitLogReader.java @@ -25,6 +25,7 @@ import java.util.zip.CRC32; import com.google.common.annotations.VisibleForTesting; +import org.apache.cassandra.io.util.File; import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -99,7 +100,7 @@ static List filterCommitLogFiles(File[] toFilter) { if (shouldSkip(file)) { - logger.info("Skipping playback of empty log: {}", file.getName()); + logger.info("Skipping playback of empty log: {}", file.name()); } else { @@ -172,7 +173,7 @@ public void readCommitLogSegment(CommitLogReadHandler handler, boolean tolerateTruncation) throws IOException { // just transform from the file name (no reading of headers) to determine version - CommitLogDescriptor desc = CommitLogDescriptor.fromFileName(file.getName()); + CommitLogDescriptor desc = CommitLogDescriptor.fromFileName(file.name()); try(RandomAccessReader reader = RandomAccessReader.open(file)) { @@ -263,7 +264,7 @@ public void readCommitLogSegment(CommitLogReadHandler handler, private boolean shouldSkipSegmentId(File file, CommitLogDescriptor desc, CommitLogPosition minPosition) { logger.debug("Reading {} (CL version {}, messaging version {}, compression {})", - file.getPath(), + file.path(), desc.version, desc.getMessagingVersion(), desc.compression); diff --git a/src/java/org/apache/cassandra/db/commitlog/CommitLogReplayer.java b/src/java/org/apache/cassandra/db/commitlog/CommitLogReplayer.java index 501ee02e3430..b59480e05a7e 100644 --- a/src/java/org/apache/cassandra/db/commitlog/CommitLogReplayer.java +++ b/src/java/org/apache/cassandra/db/commitlog/CommitLogReplayer.java @@ -18,7 +18,6 @@ */ package org.apache.cassandra.db.commitlog; -import java.io.File; import java.io.IOException; import java.util.*; import java.util.concurrent.atomic.AtomicInteger; @@ -30,6 +29,7 @@ import com.google.common.collect.Multimap; import com.google.common.collect.Ordering; +import org.apache.cassandra.io.util.File; import org.apache.commons.lang3.StringUtils; import org.apache.cassandra.utils.concurrent.Future; @@ -171,7 +171,7 @@ private void handleCDCReplayCompletion(File f) throws IOException // Can only reach this point if CDC is enabled, thus we have a CDCSegmentManager ((CommitLogSegmentManagerCDC)CommitLog.instance.segmentManager).addCDCSize(f.length()); - File dest = new File(DatabaseDescriptor.getCDCLogLocation(), f.getName()); + File dest = new File(DatabaseDescriptor.getCDCLogLocation(), f.name()); // If hard link already exists, assume it's from a previous node run. If people are mucking around in the cdc_raw // directory that's on them. diff --git a/src/java/org/apache/cassandra/db/commitlog/CommitLogSegment.java b/src/java/org/apache/cassandra/db/commitlog/CommitLogSegment.java index 4a68e8be4c3a..cf976228249c 100644 --- a/src/java/org/apache/cassandra/db/commitlog/CommitLogSegment.java +++ b/src/java/org/apache/cassandra/db/commitlog/CommitLogSegment.java @@ -17,8 +17,6 @@ */ package org.apache.cassandra.db.commitlog; -import java.io.File; -import java.io.FileWriter; import java.io.IOException; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; @@ -31,6 +29,8 @@ import java.util.zip.CRC32; import com.google.common.annotations.VisibleForTesting; +import org.apache.cassandra.io.util.File; +import org.apache.cassandra.io.util.FileWriter; import org.cliffc.high_scale_lib.NonBlockingHashMap; import com.codahale.metrics.Timer; @@ -76,10 +76,10 @@ public enum CDCState static { long maxId = Long.MIN_VALUE; - for (File file : new File(DatabaseDescriptor.getCommitLogLocation()).listFiles()) + for (File file : new File(DatabaseDescriptor.getCommitLogLocation()).tryList()) { - if (CommitLogDescriptor.isValid(file.getName())) - maxId = Math.max(CommitLogDescriptor.fromFileName(file.getName()).id, maxId); + if (CommitLogDescriptor.isValid(file.name())) + maxId = Math.max(CommitLogDescriptor.fromFileName(file.name()).id, maxId); } replayLimitId = idBase = Math.max(currentTimeMillis(), maxId + 1); } @@ -464,7 +464,7 @@ public CommitLogPosition getCurrentCommitLogPosition() */ public String getPath() { - return logFile.getPath(); + return logFile.path(); } /** @@ -472,7 +472,7 @@ public String getPath() */ public String getName() { - return logFile.getName(); + return logFile.name(); } /** @@ -480,7 +480,7 @@ public String getName() */ public File getCDCFile() { - return new File(DatabaseDescriptor.getCDCLogLocation(), logFile.getName()); + return new File(DatabaseDescriptor.getCDCLogLocation(), logFile.name()); } /** @@ -678,8 +678,8 @@ public static class CommitLogSegmentFileComparator implements Comparator { public int compare(File f, File f2) { - CommitLogDescriptor desc = CommitLogDescriptor.fromFileName(f.getName()); - CommitLogDescriptor desc2 = CommitLogDescriptor.fromFileName(f2.getName()); + CommitLogDescriptor desc = CommitLogDescriptor.fromFileName(f.name()); + CommitLogDescriptor desc2 = CommitLogDescriptor.fromFileName(f2.name()); return Long.compare(desc.id, desc2.id); } } diff --git a/src/java/org/apache/cassandra/db/commitlog/CommitLogSegmentManagerCDC.java b/src/java/org/apache/cassandra/db/commitlog/CommitLogSegmentManagerCDC.java index 65611376a4a6..6f6a1c24e77a 100644 --- a/src/java/org/apache/cassandra/db/commitlog/CommitLogSegmentManagerCDC.java +++ b/src/java/org/apache/cassandra/db/commitlog/CommitLogSegmentManagerCDC.java @@ -18,7 +18,6 @@ package org.apache.cassandra.db.commitlog; -import java.io.File; import java.io.IOException; import java.nio.file.FileVisitResult; import java.nio.file.Files; @@ -28,6 +27,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.util.concurrent.RateLimiter; +import org.apache.cassandra.io.util.File; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -165,8 +165,8 @@ void handleReplayedSegment(final File file) super.handleReplayedSegment(file); // delete untracked cdc segment hard link files if their index files do not exist - File cdcFile = new File(DatabaseDescriptor.getCDCLogLocation(), file.getName()); - File cdcIndexFile = new File(DatabaseDescriptor.getCDCLogLocation(), CommitLogDescriptor.fromFileName(file.getName()).cdcIndexFileName()); + File cdcFile = new File(DatabaseDescriptor.getCDCLogLocation(), file.name()); + File cdcIndexFile = new File(DatabaseDescriptor.getCDCLogLocation(), CommitLogDescriptor.fromFileName(file.name()).cdcIndexFileName()); if (cdcFile.exists() && !cdcIndexFile.exists()) { logger.trace("(Unopened) CDC segment {} is no longer needed and will be deleted now", cdcFile); diff --git a/src/java/org/apache/cassandra/db/commitlog/CommitLogSegmentManagerStandard.java b/src/java/org/apache/cassandra/db/commitlog/CommitLogSegmentManagerStandard.java index 0e051cfdfce9..c144d09ea041 100644 --- a/src/java/org/apache/cassandra/db/commitlog/CommitLogSegmentManagerStandard.java +++ b/src/java/org/apache/cassandra/db/commitlog/CommitLogSegmentManagerStandard.java @@ -19,6 +19,7 @@ package org.apache.cassandra.db.commitlog; import org.apache.cassandra.db.Mutation; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.io.util.FileUtils; public class CommitLogSegmentManagerStandard extends AbstractCommitLogSegmentManager diff --git a/src/java/org/apache/cassandra/db/commitlog/IntervalSet.java b/src/java/org/apache/cassandra/db/commitlog/IntervalSet.java index 45db2f6c94f7..110873973ecd 100644 --- a/src/java/org/apache/cassandra/db/commitlog/IntervalSet.java +++ b/src/java/org/apache/cassandra/db/commitlog/IntervalSet.java @@ -207,4 +207,4 @@ public IntervalSet build() } } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/db/commitlog/MemoryMappedSegment.java b/src/java/org/apache/cassandra/db/commitlog/MemoryMappedSegment.java index 6ecdbd3c7764..d564117d3ac5 100644 --- a/src/java/org/apache/cassandra/db/commitlog/MemoryMappedSegment.java +++ b/src/java/org/apache/cassandra/db/commitlog/MemoryMappedSegment.java @@ -90,7 +90,7 @@ protected void flush(int startMarker, int nextMarker) { throw new FSWriteError(e, getPath()); } - NativeLibrary.trySkipCache(fd, startMarker, nextMarker, logFile.getAbsolutePath()); + NativeLibrary.trySkipCache(fd, startMarker, nextMarker, logFile.absolutePath()); } @Override diff --git a/src/java/org/apache/cassandra/db/commitlog/PeriodicCommitLogService.java b/src/java/org/apache/cassandra/db/commitlog/PeriodicCommitLogService.java index 0644f32162fe..ae170a87d51e 100644 --- a/src/java/org/apache/cassandra/db/commitlog/PeriodicCommitLogService.java +++ b/src/java/org/apache/cassandra/db/commitlog/PeriodicCommitLogService.java @@ -43,4 +43,4 @@ protected void maybeWaitForSync(CommitLogSegment.Allocation alloc) pending.decrementAndGet(); } } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/db/compaction/CompactionLogger.java b/src/java/org/apache/cassandra/db/compaction/CompactionLogger.java index e8250b4384df..2d585e6ca5c3 100644 --- a/src/java/org/apache/cassandra/db/compaction/CompactionLogger.java +++ b/src/java/org/apache/cassandra/db/compaction/CompactionLogger.java @@ -44,6 +44,7 @@ import org.apache.cassandra.concurrent.ExecutorPlus; import org.apache.cassandra.db.ColumnFamilyStore; import org.apache.cassandra.io.sstable.format.SSTableReader; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.utils.ExecutorUtils; import org.apache.cassandra.utils.NoSpamLogger; @@ -308,13 +309,13 @@ private static class CompactionLogSerializer implements Writer private static OutputStreamWriter createStream() throws IOException { int count = 0; - Path compactionLog = Paths.get(logDirectory, "compaction.log"); + Path compactionLog = new File(logDirectory, "compaction.log").toPath(); if (Files.exists(compactionLog)) { Path tryPath = compactionLog; while (Files.exists(tryPath)) { - tryPath = Paths.get(logDirectory, String.format("compaction-%d.log", count++)); + tryPath = new File(logDirectory, String.format("compaction-%d.log", count++)).toPath(); } Files.move(compactionLog, tryPath); } diff --git a/src/java/org/apache/cassandra/db/compaction/CompactionManager.java b/src/java/org/apache/cassandra/db/compaction/CompactionManager.java index 47ed34d4026d..f9adcc276bfe 100644 --- a/src/java/org/apache/cassandra/db/compaction/CompactionManager.java +++ b/src/java/org/apache/cassandra/db/compaction/CompactionManager.java @@ -17,7 +17,6 @@ */ package org.apache.cassandra.db.compaction; -import java.io.File; import java.io.IOException; import java.util.*; import java.util.concurrent.Callable; @@ -41,6 +40,7 @@ import org.apache.cassandra.concurrent.ExecutorFactory; import org.apache.cassandra.concurrent.WrappedExecutorPlus; import org.apache.cassandra.dht.AbstractBounds; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.locator.RangesAtEndpoint; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -1006,7 +1006,7 @@ public void forceUserDefinedCompaction(String dataFiles) } // group by keyspace/columnfamily ColumnFamilyStore cfs = Keyspace.open(desc.ksname).getColumnFamilyStore(desc.cfname); - descriptors.put(cfs, cfs.getDirectories().find(new File(filename.trim()).getName())); + descriptors.put(cfs, cfs.getDirectories().find(new File(filename.trim()).name())); } List> futures = new ArrayList<>(descriptors.size()); @@ -1032,7 +1032,7 @@ public void forceUserDefinedCleanup(String dataFiles) } // group by keyspace/columnfamily ColumnFamilyStore cfs = Keyspace.open(desc.ksname).getColumnFamilyStore(desc.cfname); - desc = cfs.getDirectories().find(new File(filename.trim()).getName()); + desc = cfs.getDirectories().find(new File(filename.trim()).name()); if (desc != null) descriptors.put(cfs, desc); } diff --git a/src/java/org/apache/cassandra/db/compaction/CompactionStrategyManager.java b/src/java/org/apache/cassandra/db/compaction/CompactionStrategyManager.java index d7d3ba529765..9ee1a9735d48 100644 --- a/src/java/org/apache/cassandra/db/compaction/CompactionStrategyManager.java +++ b/src/java/org/apache/cassandra/db/compaction/CompactionStrategyManager.java @@ -18,7 +18,6 @@ package org.apache.cassandra.db.compaction; -import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; @@ -39,6 +38,7 @@ import com.google.common.collect.Iterables; import com.google.common.collect.Lists; import com.google.common.primitives.Longs; +import org.apache.cassandra.io.util.File; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -1144,13 +1144,13 @@ public List getStrategyFolders(AbstractCompactionStrategy strategy) { int idx = holder.getStrategyIndex(strategy); if (idx >= 0) - return Collections.singletonList(locations[idx].location.getAbsolutePath()); + return Collections.singletonList(locations[idx].location.absolutePath()); } } List folders = new ArrayList<>(locations.length); for (Directories.DataDirectory location : locations) { - folders.add(location.location.getAbsolutePath()); + folders.add(location.location.absolutePath()); } return folders; } diff --git a/src/java/org/apache/cassandra/db/compaction/Scrubber.java b/src/java/org/apache/cassandra/db/compaction/Scrubber.java index 57ce8f2fa820..cebf8ed0d1ee 100644 --- a/src/java/org/apache/cassandra/db/compaction/Scrubber.java +++ b/src/java/org/apache/cassandra/db/compaction/Scrubber.java @@ -17,8 +17,10 @@ */ package org.apache.cassandra.db.compaction; +import java.io.IOError; +import java.io.IOException; import java.nio.ByteBuffer; -import java.io.*; + import java.util.*; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReadWriteLock; @@ -28,6 +30,7 @@ import com.google.common.base.Throwables; import com.google.common.collect.ImmutableSet; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.schema.TableMetadata; import org.apache.cassandra.db.*; import org.apache.cassandra.db.lifecycle.LifecycleTransaction; diff --git a/src/java/org/apache/cassandra/db/compaction/SizeTieredCompactionStrategyOptions.java b/src/java/org/apache/cassandra/db/compaction/SizeTieredCompactionStrategyOptions.java index 288af2bebb22..eb1d8f97afe2 100644 --- a/src/java/org/apache/cassandra/db/compaction/SizeTieredCompactionStrategyOptions.java +++ b/src/java/org/apache/cassandra/db/compaction/SizeTieredCompactionStrategyOptions.java @@ -101,4 +101,4 @@ public String toString() return String.format("Min sstable size: %d, bucket low: %f, bucket high: %f", minSSTableSize, bucketLow, bucketHigh); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/db/compaction/Upgrader.java b/src/java/org/apache/cassandra/db/compaction/Upgrader.java index 0cb45006b145..7f3b0f14458b 100644 --- a/src/java/org/apache/cassandra/db/compaction/Upgrader.java +++ b/src/java/org/apache/cassandra/db/compaction/Upgrader.java @@ -17,7 +17,6 @@ */ package org.apache.cassandra.db.compaction; -import java.io.File; import java.util.*; import java.util.function.LongPredicate; @@ -33,6 +32,7 @@ import org.apache.cassandra.io.sstable.format.SSTableWriter; import org.apache.cassandra.io.sstable.metadata.MetadataCollector; import org.apache.cassandra.io.sstable.metadata.StatsMetadata; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.utils.FBUtilities; import org.apache.cassandra.utils.OutputHandler; import org.apache.cassandra.utils.UUIDGen; @@ -57,7 +57,7 @@ public Upgrader(ColumnFamilyStore cfs, LifecycleTransaction txn, OutputHandler o this.sstable = txn.onlyOne(); this.outputHandler = outputHandler; - this.directory = new File(sstable.getFilename()).getParentFile(); + this.directory = new File(sstable.getFilename()).parent(); this.controller = new UpgradeController(cfs); diff --git a/src/java/org/apache/cassandra/db/compaction/Verifier.java b/src/java/org/apache/cassandra/db/compaction/Verifier.java index 30e74adb4b35..8b7f0d684aa2 100644 --- a/src/java/org/apache/cassandra/db/compaction/Verifier.java +++ b/src/java/org/apache/cassandra/db/compaction/Verifier.java @@ -36,8 +36,10 @@ import org.apache.cassandra.io.sstable.metadata.MetadataComponent; import org.apache.cassandra.io.sstable.metadata.MetadataType; import org.apache.cassandra.io.sstable.metadata.ValidationMetadata; +import org.apache.cassandra.io.util.DataInputPlus; import org.apache.cassandra.io.util.DataIntegrityMetadata; import org.apache.cassandra.io.util.DataIntegrityMetadata.FileDigestValidator; +import org.apache.cassandra.io.util.FileInputStreamPlus; import org.apache.cassandra.io.util.FileUtils; import org.apache.cassandra.io.util.RandomAccessReader; import org.apache.cassandra.schema.TableMetadata; @@ -52,8 +54,8 @@ import java.io.BufferedInputStream; import java.io.Closeable; +import java.io.DataInput; import java.io.DataInputStream; -import java.io.File; import java.io.IOError; import java.io.IOException; import java.nio.ByteBuffer; @@ -67,6 +69,8 @@ import java.util.function.Function; import java.util.function.LongPredicate; +import org.apache.cassandra.io.util.File; + public class Verifier implements Closeable { private final ColumnFamilyStore cfs; @@ -447,7 +451,7 @@ private void deserializeBloomFilter(SSTableReader sstable) throws IOException Path bfPath = Paths.get(sstable.descriptor.filenameFor(Component.FILTER)); if (Files.exists(bfPath)) { - try (DataInputStream stream = new DataInputStream(new BufferedInputStream(Files.newInputStream(bfPath))); + try (FileInputStreamPlus stream = new File(bfPath).newInputStream(); IFilter bf = BloomFilterSerializer.deserialize(stream, sstable.descriptor.version.hasOldBfFormat())) { } diff --git a/src/java/org/apache/cassandra/db/compaction/writers/CompactionAwareWriter.java b/src/java/org/apache/cassandra/db/compaction/writers/CompactionAwareWriter.java index 2251f6a3f75c..74ebac7d359c 100644 --- a/src/java/org/apache/cassandra/db/compaction/writers/CompactionAwareWriter.java +++ b/src/java/org/apache/cassandra/db/compaction/writers/CompactionAwareWriter.java @@ -23,6 +23,7 @@ import java.util.Set; import java.util.UUID; +import org.apache.cassandra.io.util.File; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/src/java/org/apache/cassandra/db/lifecycle/LifecycleTransaction.java b/src/java/org/apache/cassandra/db/lifecycle/LifecycleTransaction.java index 574c6a449936..ff935f8a6db3 100644 --- a/src/java/org/apache/cassandra/db/lifecycle/LifecycleTransaction.java +++ b/src/java/org/apache/cassandra/db/lifecycle/LifecycleTransaction.java @@ -17,7 +17,6 @@ */ package org.apache.cassandra.db.lifecycle; -import java.io.File; import java.nio.file.Path; import java.util.*; import java.util.function.BiPredicate; @@ -25,6 +24,7 @@ import com.google.common.base.Predicate; import com.google.common.collect.*; +import org.apache.cassandra.io.util.File; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/src/java/org/apache/cassandra/db/lifecycle/LogAwareFileLister.java b/src/java/org/apache/cassandra/db/lifecycle/LogAwareFileLister.java index 254966e9cb87..120f9bc42a7e 100644 --- a/src/java/org/apache/cassandra/db/lifecycle/LogAwareFileLister.java +++ b/src/java/org/apache/cassandra/db/lifecycle/LogAwareFileLister.java @@ -20,7 +20,6 @@ */ package org.apache.cassandra.db.lifecycle; -import java.io.File; import java.io.IOException; import java.nio.file.DirectoryStream; import java.nio.file.Files; @@ -32,6 +31,7 @@ import com.google.common.annotations.VisibleForTesting; +import org.apache.cassandra.io.util.File; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -106,7 +106,7 @@ static List list(DirectoryStream stream) throws IOException try { return StreamSupport.stream(stream.spliterator(), false) - .map(Path::toFile) + .map(File::new) .filter((f) -> !f.isDirectory()) .collect(Collectors.toList()); } diff --git a/src/java/org/apache/cassandra/db/lifecycle/LogFile.java b/src/java/org/apache/cassandra/db/lifecycle/LogFile.java index 1fc371f7d202..5a468f899547 100644 --- a/src/java/org/apache/cassandra/db/lifecycle/LogFile.java +++ b/src/java/org/apache/cassandra/db/lifecycle/LogFile.java @@ -20,7 +20,6 @@ */ package org.apache.cassandra.db.lifecycle; -import java.io.File; import java.nio.file.Path; import java.util.*; import java.util.regex.Matcher; @@ -30,6 +29,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Iterables; +import org.apache.cassandra.io.util.File; import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -77,7 +77,7 @@ final class LogFile implements AutoCloseable static LogFile make(File logReplica) { - return make(logReplica.getName(), Collections.singletonList(logReplica)); + return make(logReplica.name(), Collections.singletonList(logReplica)); } static LogFile make(String fileName, List logReplicas) @@ -139,7 +139,7 @@ Throwable removeUnfinishedLeftovers(Throwable accumulate) static boolean isLogFile(File file) { - return LogFile.FILE_REGEX.matcher(file.getName()).matches(); + return LogFile.FILE_REGEX.matcher(file.name()).matches(); } LogFile(OperationType type, UUID id, List replicas) @@ -324,7 +324,7 @@ Map makeRecords(Type type, Iterable tables) for (SSTableReader sstable : tables) { File directory = sstable.descriptor.directory; - String fileName = StringUtils.join(directory, File.separator, getFileName()); + String fileName = StringUtils.join(directory, File.pathSeparator(), getFileName()); replicas.maybeCreateReplica(directory, fileName, records); } return LogRecord.make(type, tables); @@ -333,7 +333,7 @@ Map makeRecords(Type type, Iterable tables) private LogRecord makeAddRecord(SSTable table) { File directory = table.descriptor.directory; - String fileName = StringUtils.join(directory, File.separator, getFileName()); + String fileName = StringUtils.join(directory, File.pathSeparator(), getFileName()); replicas.maybeCreateReplica(directory, fileName, records); return LogRecord.make(Type.ADD, table); } @@ -348,7 +348,7 @@ private LogRecord makeRecord(Type type, SSTable table, LogRecord record) assert type == Type.ADD || type == Type.REMOVE; File directory = table.descriptor.directory; - String fileName = StringUtils.join(directory, File.separator, getFileName()); + String fileName = StringUtils.join(directory, File.pathSeparator(), getFileName()); replicas.maybeCreateReplica(directory, fileName, records); return record.asType(type); } @@ -444,7 +444,7 @@ LogRecord getLastRecord() private static Set getRecordFiles(NavigableSet files, LogRecord record) { String fileName = record.fileName(); - return files.stream().filter(f -> f.getName().startsWith(fileName)).collect(Collectors.toSet()); + return files.stream().filter(f -> f.name().startsWith(fileName)).collect(Collectors.toSet()); } boolean exists() diff --git a/src/java/org/apache/cassandra/db/lifecycle/LogRecord.java b/src/java/org/apache/cassandra/db/lifecycle/LogRecord.java index 513ad8746084..4fb3947605a7 100644 --- a/src/java/org/apache/cassandra/db/lifecycle/LogRecord.java +++ b/src/java/org/apache/cassandra/db/lifecycle/LogRecord.java @@ -20,11 +20,11 @@ */ package org.apache.cassandra.db.lifecycle; -import java.io.File; -import java.io.FilenameFilter; + import java.nio.file.Path; import java.nio.file.Paths; import java.util.*; +import java.util.function.BiPredicate; import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.stream.Collectors; @@ -33,7 +33,9 @@ import org.apache.cassandra.io.sstable.Component; import org.apache.cassandra.io.sstable.SSTable; import org.apache.cassandra.io.sstable.format.SSTableReader; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.io.util.FileUtils; +import org.apache.cassandra.io.util.PathUtils; import org.apache.cassandra.utils.FBUtilities; /** @@ -283,8 +285,8 @@ private String format() public static List getExistingFiles(String absoluteFilePath) { - Path path = Paths.get(absoluteFilePath); - File[] files = path.getParent().toFile().listFiles((dir, name) -> name.startsWith(path.getFileName().toString())); + File file = new File(absoluteFilePath); + File[] files = file.parent().tryList((dir, name) -> name.startsWith(file.name())); // files may be null if the directory does not exist yet, e.g. when tracking new files return files == null ? Collections.emptyList() : Arrays.asList(files); } @@ -302,13 +304,13 @@ public static Map> getExistingFiles(Set absoluteFileP Map> dirToFileNamePrefix = new HashMap<>(); for (String absolutePath : absoluteFilePaths) { - Path fullPath = Paths.get(absolutePath); + Path fullPath = new File(absolutePath).toPath(); Path path = fullPath.getParent(); if (path != null) - dirToFileNamePrefix.computeIfAbsent(path.toFile(), (k) -> new TreeSet<>()).add(fullPath.getFileName().toString()); + dirToFileNamePrefix.computeIfAbsent(new File(path), (k) -> new TreeSet<>()).add(fullPath.getFileName().toString()); } - FilenameFilter ff = (dir, name) -> { + BiPredicate ff = (dir, name) -> { TreeSet dirSet = dirToFileNamePrefix.get(dir); // if the set contains a prefix of the current file name, the file name we have here should sort directly // after the prefix in the tree set, which means we can use 'floor' to get the prefix (returns the largest @@ -317,7 +319,7 @@ public static Map> getExistingFiles(Set absoluteFileP String baseName = dirSet.floor(name); if (baseName != null && name.startsWith(baseName)) { - String absolutePath = new File(dir, baseName).getPath(); + String absolutePath = new File(dir, baseName).path(); fileMap.computeIfAbsent(absolutePath, k -> new ArrayList<>()).add(new File(dir, name)); } return false; @@ -325,7 +327,7 @@ public static Map> getExistingFiles(Set absoluteFileP // populate the file map: for (File f : dirToFileNamePrefix.keySet()) - f.listFiles(ff); + f.tryList(ff); return fileMap; } @@ -338,14 +340,12 @@ public boolean isFinal() String fileName() { - return absolutePath.isPresent() ? Paths.get(absolutePath.get()).getFileName().toString() : ""; + return absolutePath.isPresent() ? new File(absolutePath.get()).name() : ""; } boolean isInFolder(Path folder) { - return absolutePath.isPresent() - ? FileUtils.isContained(folder.toFile(), Paths.get(absolutePath.get()).toFile()) - : false; + return absolutePath.isPresent() && PathUtils.isContained(folder, new File(absolutePath.get()).toPath()); } String absolutePath() diff --git a/src/java/org/apache/cassandra/db/lifecycle/LogReplica.java b/src/java/org/apache/cassandra/db/lifecycle/LogReplica.java index cdc4c35f7a44..efd56d8cc191 100644 --- a/src/java/org/apache/cassandra/db/lifecycle/LogReplica.java +++ b/src/java/org/apache/cassandra/db/lifecycle/LogReplica.java @@ -18,12 +18,12 @@ package org.apache.cassandra.db.lifecycle; -import java.io.File; import java.util.HashMap; import java.util.List; import java.util.Map; import java.io.IOException; +import org.apache.cassandra.io.util.File; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -54,18 +54,18 @@ final class LogReplica implements AutoCloseable static LogReplica create(File directory, String fileName) { - int folderFD = NativeLibrary.tryOpenDirectory(directory.getPath()); + int folderFD = NativeLibrary.tryOpenDirectory(directory.path()); if (folderFD == -1 && !FBUtilities.isWindows) - throw new FSReadError(new IOException(String.format("Invalid folder descriptor trying to create log replica %s", directory.getPath())), directory.getPath()); + throw new FSReadError(new IOException(String.format("Invalid folder descriptor trying to create log replica %s", directory.path())), directory.path()); return new LogReplica(new File(fileName), folderFD); } static LogReplica open(File file) { - int folderFD = NativeLibrary.tryOpenDirectory(file.getParentFile().getPath()); + int folderFD = NativeLibrary.tryOpenDirectory(file.parent().path()); if (folderFD == -1 && !FBUtilities.isWindows) - throw new FSReadError(new IOException(String.format("Invalid folder descriptor trying to create log replica %s", file.getParentFile().getPath())), file.getParentFile().getPath()); + throw new FSReadError(new IOException(String.format("Invalid folder descriptor trying to create log replica %s", file.parent().path())), file.parent().path()); return new LogReplica(file, folderFD); } @@ -88,12 +88,12 @@ List readLines() String getFileName() { - return file.getName(); + return file.name(); } String getDirectory() { - return file.getParent(); + return file.parentPath(); } void append(LogRecord record) @@ -162,7 +162,7 @@ void setError(String line, String error) void printContentsWithAnyErrors(StringBuilder str) { - str.append(file.getPath()); + str.append(file.path()); str.append(System.lineSeparator()); FileUtils.readLines(file).forEach(line -> printLineWithAnyError(str, line)); } diff --git a/src/java/org/apache/cassandra/db/lifecycle/LogReplicaSet.java b/src/java/org/apache/cassandra/db/lifecycle/LogReplicaSet.java index 0295357e8f0f..316e4b628d4b 100644 --- a/src/java/org/apache/cassandra/db/lifecycle/LogReplicaSet.java +++ b/src/java/org/apache/cassandra/db/lifecycle/LogReplicaSet.java @@ -17,7 +17,6 @@ */ package org.apache.cassandra.db.lifecycle; -import java.io.File; import java.util.Collection; import java.util.Collections; import java.util.LinkedHashMap; @@ -29,6 +28,7 @@ import java.util.stream.Collectors; import com.google.common.annotations.VisibleForTesting; +import org.apache.cassandra.io.util.File; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -61,7 +61,7 @@ void addReplicas(List replicas) void addReplica(File file) { - File directory = file.getParentFile(); + File directory = file.parent(); assert !replicasByFile.containsKey(directory); try { @@ -268,6 +268,6 @@ List getFiles() @VisibleForTesting List getFilePaths() { - return replicas().stream().map(LogReplica::file).map(File::getPath).collect(Collectors.toList()); + return replicas().stream().map(LogReplica::file).map(File::path).collect(Collectors.toList()); } } diff --git a/src/java/org/apache/cassandra/db/lifecycle/LogTransaction.java b/src/java/org/apache/cassandra/db/lifecycle/LogTransaction.java index 85df4d64e04f..09717fcf5d81 100644 --- a/src/java/org/apache/cassandra/db/lifecycle/LogTransaction.java +++ b/src/java/org/apache/cassandra/db/lifecycle/LogTransaction.java @@ -18,7 +18,6 @@ package org.apache.cassandra.db.lifecycle; import java.io.ByteArrayOutputStream; -import java.io.File; import java.io.IOException; import java.io.PrintStream; import java.nio.file.Files; @@ -31,6 +30,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.util.concurrent.Runnables; +import org.apache.cassandra.io.util.File; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -506,16 +506,16 @@ private static final class LogFilesByName void list(File directory) { - Arrays.stream(directory.listFiles(LogFile::isLogFile)).forEach(this::add); + Arrays.stream(directory.tryList(LogFile::isLogFile)).forEach(this::add); } void add(File file) { - List filesByName = files.get(file.getName()); + List filesByName = files.get(file.name()); if (filesByName == null) { filesByName = new ArrayList<>(); - files.put(file.getName(), filesByName); + files.put(file.name(), filesByName); } filesByName.add(file); diff --git a/src/java/org/apache/cassandra/db/lifecycle/Tracker.java b/src/java/org/apache/cassandra/db/lifecycle/Tracker.java index 3d72a113b804..e15347b83159 100644 --- a/src/java/org/apache/cassandra/db/lifecycle/Tracker.java +++ b/src/java/org/apache/cassandra/db/lifecycle/Tracker.java @@ -17,7 +17,6 @@ */ package org.apache.cassandra.db.lifecycle; -import java.io.File; import java.util.*; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.atomic.AtomicReference; @@ -32,6 +31,7 @@ import org.apache.cassandra.db.Directories; import org.apache.cassandra.db.Memtable; import org.apache.cassandra.db.commitlog.CommitLogPosition; +import org.apache.cassandra.io.util.File; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/src/java/org/apache/cassandra/db/streaming/CassandraEntireSSTableStreamReader.java b/src/java/org/apache/cassandra/db/streaming/CassandraEntireSSTableStreamReader.java index 0bfe99311246..e547e0f3ce60 100644 --- a/src/java/org/apache/cassandra/db/streaming/CassandraEntireSSTableStreamReader.java +++ b/src/java/org/apache/cassandra/db/streaming/CassandraEntireSSTableStreamReader.java @@ -18,13 +18,13 @@ package org.apache.cassandra.db.streaming; -import java.io.File; import java.io.IOException; import java.util.Collection; import java.util.function.UnaryOperator; import com.google.common.base.Throwables; import org.apache.cassandra.db.lifecycle.LifecycleNewTracker; +import org.apache.cassandra.io.util.File; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/src/java/org/apache/cassandra/db/streaming/CassandraStreamReader.java b/src/java/org/apache/cassandra/db/streaming/CassandraStreamReader.java index 6835fadb5ec9..17c40b8a8962 100644 --- a/src/java/org/apache/cassandra/db/streaming/CassandraStreamReader.java +++ b/src/java/org/apache/cassandra/db/streaming/CassandraStreamReader.java @@ -25,6 +25,7 @@ import com.google.common.base.Throwables; import com.google.common.collect.UnmodifiableIterator; +import org.apache.cassandra.io.util.File; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/src/java/org/apache/cassandra/db/streaming/CassandraStreamWriter.java b/src/java/org/apache/cassandra/db/streaming/CassandraStreamWriter.java index 6481f4b9764d..cb513ab92c8f 100644 --- a/src/java/org/apache/cassandra/db/streaming/CassandraStreamWriter.java +++ b/src/java/org/apache/cassandra/db/streaming/CassandraStreamWriter.java @@ -17,11 +17,11 @@ */ package org.apache.cassandra.db.streaming; -import java.io.File; import java.io.IOException; import java.nio.ByteBuffer; import java.util.Collection; +import org.apache.cassandra.io.util.File; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/src/java/org/apache/cassandra/db/streaming/ComponentContext.java b/src/java/org/apache/cassandra/db/streaming/ComponentContext.java index b9c60b9f795e..c8c08aa850fe 100644 --- a/src/java/org/apache/cassandra/db/streaming/ComponentContext.java +++ b/src/java/org/apache/cassandra/db/streaming/ComponentContext.java @@ -26,9 +26,7 @@ import org.apache.cassandra.io.sstable.Descriptor; import org.apache.cassandra.io.util.FileUtils; -import java.io.File; import java.io.IOException; -import java.io.RandomAccessFile; import java.nio.channels.FileChannel; import java.util.HashMap; import java.util.Map; @@ -38,6 +36,8 @@ * Mutable SSTable components and their hardlinks to avoid concurrent sstable component modification * during entire-sstable-streaming. */ +import org.apache.cassandra.io.util.File; + public class ComponentContext implements AutoCloseable { private static final Logger logger = LoggerFactory.getLogger(ComponentContext.class); @@ -81,9 +81,9 @@ public ComponentManifest manifest() */ public FileChannel channel(Descriptor descriptor, Component component, long size) throws IOException { - String toTransfer = hardLinks.containsKey(component) ? hardLinks.get(component).getPath() : descriptor.filenameFor(component); + String toTransfer = hardLinks.containsKey(component) ? hardLinks.get(component).path() : descriptor.filenameFor(component); @SuppressWarnings("resource") // file channel will be closed by Caller - FileChannel channel = new RandomAccessFile(toTransfer, "r").getChannel(); + FileChannel channel = new File(toTransfer).newReadChannel(); assert size == channel.size() : String.format("Entire sstable streaming expects %s file size to be %s but got %s.", component, size, channel.size()); diff --git a/src/java/org/apache/cassandra/db/streaming/ComponentManifest.java b/src/java/org/apache/cassandra/db/streaming/ComponentManifest.java index bb896caffa06..71aa0f819094 100644 --- a/src/java/org/apache/cassandra/db/streaming/ComponentManifest.java +++ b/src/java/org/apache/cassandra/db/streaming/ComponentManifest.java @@ -28,13 +28,14 @@ import org.apache.cassandra.io.util.DataInputPlus; import org.apache.cassandra.io.util.DataOutputPlus; -import java.io.File; import java.io.IOException; import java.util.*; /** * SSTable components and their sizes to be tranfered via entire-sstable-streaming */ +import org.apache.cassandra.io.util.File; + public final class ComponentManifest implements Iterable { private static final List STREAM_COMPONENTS = ImmutableList.of(Component.DATA, Component.PRIMARY_INDEX, Component.STATS, diff --git a/src/java/org/apache/cassandra/dht/tokenallocator/OfflineTokenAllocator.java b/src/java/org/apache/cassandra/dht/tokenallocator/OfflineTokenAllocator.java index f8c13df51368..dc7f4070b1ad 100644 --- a/src/java/org/apache/cassandra/dht/tokenallocator/OfflineTokenAllocator.java +++ b/src/java/org/apache/cassandra/dht/tokenallocator/OfflineTokenAllocator.java @@ -187,4 +187,4 @@ private static InetAddressAndPort getLoopbackAddressWithPort(int port) throw new IllegalStateException("Unexpected UnknownHostException", e); } } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/dht/tokenallocator/ReplicationStrategy.java b/src/java/org/apache/cassandra/dht/tokenallocator/ReplicationStrategy.java index 6dbd37c60542..8cb5fe1cebaf 100644 --- a/src/java/org/apache/cassandra/dht/tokenallocator/ReplicationStrategy.java +++ b/src/java/org/apache/cassandra/dht/tokenallocator/ReplicationStrategy.java @@ -26,4 +26,4 @@ interface ReplicationStrategy * @return Some hashable object. */ Object getGroup(Unit unit); -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/dht/tokenallocator/TokenAllocator.java b/src/java/org/apache/cassandra/dht/tokenallocator/TokenAllocator.java index 2eb9a4c352b8..764531779467 100644 --- a/src/java/org/apache/cassandra/dht/tokenallocator/TokenAllocator.java +++ b/src/java/org/apache/cassandra/dht/tokenallocator/TokenAllocator.java @@ -24,4 +24,4 @@ public interface TokenAllocator { Collection addUnit(Unit newUnit, int numTokens); -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/dht/tokenallocator/TokenAllocatorDiagnostics.java b/src/java/org/apache/cassandra/dht/tokenallocator/TokenAllocatorDiagnostics.java index 04d7455439a3..c29980cfcea2 100644 --- a/src/java/org/apache/cassandra/dht/tokenallocator/TokenAllocatorDiagnostics.java +++ b/src/java/org/apache/cassandra/dht/tokenallocator/TokenAllocatorDiagnostics.java @@ -193,4 +193,4 @@ private static boolean isEnabled(TokenAllocatorEventType type) return service.isEnabled(TokenAllocatorEvent.class, type); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/dht/tokenallocator/TokenAllocatorEvent.java b/src/java/org/apache/cassandra/dht/tokenallocator/TokenAllocatorEvent.java index ca59938678d3..acb5ed2b857a 100644 --- a/src/java/org/apache/cassandra/dht/tokenallocator/TokenAllocatorEvent.java +++ b/src/java/org/apache/cassandra/dht/tokenallocator/TokenAllocatorEvent.java @@ -110,4 +110,4 @@ public HashMap toMap() ret.put("tokenInfo", String.valueOf(tokenInfo)); return ret; } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/diag/DiagnosticEventService.java b/src/java/org/apache/cassandra/diag/DiagnosticEventService.java index cab95962436f..8a8391cc633a 100644 --- a/src/java/org/apache/cassandra/diag/DiagnosticEventService.java +++ b/src/java/org/apache/cassandra/diag/DiagnosticEventService.java @@ -335,4 +335,4 @@ public int hashCode() return Objects.hash(wrapped); } } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/fql/FullQueryLogger.java b/src/java/org/apache/cassandra/fql/FullQueryLogger.java index 49e326ecb8b8..ba881274007f 100644 --- a/src/java/org/apache/cassandra/fql/FullQueryLogger.java +++ b/src/java/org/apache/cassandra/fql/FullQueryLogger.java @@ -17,7 +17,6 @@ */ package org.apache.cassandra.fql; -import java.io.File; import java.nio.ByteBuffer; import java.nio.file.Path; import java.util.ArrayList; @@ -30,6 +29,7 @@ import com.google.common.collect.Sets; import com.google.common.primitives.Ints; +import org.apache.cassandra.io.util.File; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -194,7 +194,7 @@ public synchronized void reset(String fullQueryLogPath) //Then decide whether to clean the last used path, possibly configured by JMX if (binLog != null && binLog.path != null) { - File pathFile = binLog.path.toFile(); + File pathFile = new File(binLog.path); if (pathFile.exists()) { pathsToClean.add(pathFile); diff --git a/src/java/org/apache/cassandra/gms/EndpointState.java b/src/java/org/apache/cassandra/gms/EndpointState.java index 9a69de341d5a..a48a8575185a 100644 --- a/src/java/org/apache/cassandra/gms/EndpointState.java +++ b/src/java/org/apache/cassandra/gms/EndpointState.java @@ -24,6 +24,7 @@ import javax.annotation.Nullable; +import org.apache.cassandra.io.util.File; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/src/java/org/apache/cassandra/gms/FailureDetector.java b/src/java/org/apache/cassandra/gms/FailureDetector.java index 3ba96af97f60..522e0829ce6b 100644 --- a/src/java/org/apache/cassandra/gms/FailureDetector.java +++ b/src/java/org/apache/cassandra/gms/FailureDetector.java @@ -28,6 +28,7 @@ import java.util.concurrent.TimeUnit; import java.util.function.Predicate; import javax.management.openmbean.*; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.locator.Replica; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -253,7 +254,7 @@ public void dumpInterArrivalTimes() } catch (IOException e) { - throw new FSWriteError(e, (path == null) ? null : path.toFile()); + throw new FSWriteError(e, path); } } diff --git a/src/java/org/apache/cassandra/gms/GossipDigest.java b/src/java/org/apache/cassandra/gms/GossipDigest.java index 53f6c5c52c59..4115c38521e3 100644 --- a/src/java/org/apache/cassandra/gms/GossipDigest.java +++ b/src/java/org/apache/cassandra/gms/GossipDigest.java @@ -23,6 +23,7 @@ import org.apache.cassandra.io.IVersionedSerializer; import org.apache.cassandra.io.util.DataInputPlus; import org.apache.cassandra.io.util.DataOutputPlus; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.locator.InetAddressAndPort; import static org.apache.cassandra.locator.InetAddressAndPort.Serializer.inetAddressAndPortSerializer; diff --git a/src/java/org/apache/cassandra/gms/GossipDigestAck2.java b/src/java/org/apache/cassandra/gms/GossipDigestAck2.java index 0e4062bb0f44..732476312adf 100644 --- a/src/java/org/apache/cassandra/gms/GossipDigestAck2.java +++ b/src/java/org/apache/cassandra/gms/GossipDigestAck2.java @@ -25,6 +25,7 @@ import org.apache.cassandra.io.IVersionedSerializer; import org.apache.cassandra.io.util.DataInputPlus; import org.apache.cassandra.io.util.DataOutputPlus; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.locator.InetAddressAndPort; import static org.apache.cassandra.locator.InetAddressAndPort.Serializer.inetAddressAndPortSerializer; diff --git a/src/java/org/apache/cassandra/gms/GossipDigestSyn.java b/src/java/org/apache/cassandra/gms/GossipDigestSyn.java index 17c8da3593c0..c2c736f1d973 100644 --- a/src/java/org/apache/cassandra/gms/GossipDigestSyn.java +++ b/src/java/org/apache/cassandra/gms/GossipDigestSyn.java @@ -30,6 +30,8 @@ * This is the first message that gets sent out as a start of the Gossip protocol in a * round. */ +import org.apache.cassandra.io.util.File; + public class GossipDigestSyn { public static final IVersionedSerializer serializer = new GossipDigestSynSerializer(); diff --git a/src/java/org/apache/cassandra/gms/GossipShutdownVerbHandler.java b/src/java/org/apache/cassandra/gms/GossipShutdownVerbHandler.java index 83c8568274f9..f2622ef9b133 100644 --- a/src/java/org/apache/cassandra/gms/GossipShutdownVerbHandler.java +++ b/src/java/org/apache/cassandra/gms/GossipShutdownVerbHandler.java @@ -39,4 +39,4 @@ public void doVerb(Message message) Gossiper.instance.markAsShutdown(message.from()); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/gms/GossiperDiagnostics.java b/src/java/org/apache/cassandra/gms/GossiperDiagnostics.java index 57552cca5a7b..13f8647d7d8a 100644 --- a/src/java/org/apache/cassandra/gms/GossiperDiagnostics.java +++ b/src/java/org/apache/cassandra/gms/GossiperDiagnostics.java @@ -110,4 +110,4 @@ private static boolean isEnabled(GossiperEventType type) { return service.isEnabled(GossiperEvent.class, type); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/gms/GossiperEvent.java b/src/java/org/apache/cassandra/gms/GossiperEvent.java index 4ec0cf4d10a2..71fee7c991ee 100644 --- a/src/java/org/apache/cassandra/gms/GossiperEvent.java +++ b/src/java/org/apache/cassandra/gms/GossiperEvent.java @@ -108,4 +108,4 @@ public HashMap toMap() ret.put("unreachableEndpoints", String.valueOf(unreachableEndpoints)); return ret; } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/gms/GossiperMBean.java b/src/java/org/apache/cassandra/gms/GossiperMBean.java index 92df2cd8d094..47d7207ef86e 100644 --- a/src/java/org/apache/cassandra/gms/GossiperMBean.java +++ b/src/java/org/apache/cassandra/gms/GossiperMBean.java @@ -38,4 +38,4 @@ public interface GossiperMBean /** Returns each node's database release version */ public Map> getReleaseVersionsWithPort(); -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/gms/HeartBeatState.java b/src/java/org/apache/cassandra/gms/HeartBeatState.java index 75f4f56ea7c0..104d957e6cba 100644 --- a/src/java/org/apache/cassandra/gms/HeartBeatState.java +++ b/src/java/org/apache/cassandra/gms/HeartBeatState.java @@ -27,6 +27,8 @@ /** * HeartBeat State associated with any given endpoint. */ +import org.apache.cassandra.io.util.File; + public class HeartBeatState { public static final int EMPTY_VERSION = -1; diff --git a/src/java/org/apache/cassandra/gms/TokenSerializer.java b/src/java/org/apache/cassandra/gms/TokenSerializer.java index 40d14f85c240..d73b077e1073 100644 --- a/src/java/org/apache/cassandra/gms/TokenSerializer.java +++ b/src/java/org/apache/cassandra/gms/TokenSerializer.java @@ -31,6 +31,8 @@ import java.util.Collection; +import org.apache.cassandra.io.util.File; + public class TokenSerializer { private static final Logger logger = LoggerFactory.getLogger(TokenSerializer.class); @@ -62,4 +64,4 @@ public static Collection deserialize(IPartitioner partitioner, DataInput } return tokens; } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/gms/VersionedValue.java b/src/java/org/apache/cassandra/gms/VersionedValue.java index 880cb98e067f..659f61b38b7d 100644 --- a/src/java/org/apache/cassandra/gms/VersionedValue.java +++ b/src/java/org/apache/cassandra/gms/VersionedValue.java @@ -24,6 +24,7 @@ import java.util.UUID; import java.util.stream.Collectors; +import org.apache.cassandra.io.util.File; import static java.nio.charset.StandardCharsets.ISO_8859_1; import com.google.common.annotations.VisibleForTesting; diff --git a/src/java/org/apache/cassandra/hadoop/cql3/CqlBulkRecordWriter.java b/src/java/org/apache/cassandra/hadoop/cql3/CqlBulkRecordWriter.java index 45ffa4ed8156..b34048d002de 100644 --- a/src/java/org/apache/cassandra/hadoop/cql3/CqlBulkRecordWriter.java +++ b/src/java/org/apache/cassandra/hadoop/cql3/CqlBulkRecordWriter.java @@ -18,7 +18,6 @@ package org.apache.cassandra.hadoop.cql3; import java.io.Closeable; -import java.io.File; import java.io.IOException; import java.net.InetAddress; import java.net.InetSocketAddress; @@ -28,6 +27,7 @@ import java.util.concurrent.*; import com.google.common.net.HostAndPort; +import org.apache.cassandra.io.util.File; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -226,9 +226,9 @@ public void write(Object key, List values) throws IOException private File getTableDirectory() throws IOException { - File dir = new File(String.format("%s%s%s%s%s-%s", getOutputLocation(), File.separator, keyspace, File.separator, table, UUID.randomUUID().toString())); + File dir = new File(String.format("%s%s%s%s%s-%s", getOutputLocation(), File.pathSeparator(), keyspace, File.pathSeparator(), table, UUID.randomUUID().toString())); - if (!dir.exists() && !dir.mkdirs()) + if (!dir.exists() && !dir.tryCreateDirectories()) { throw new IOException("Failed to created output directory: " + dir); } diff --git a/src/java/org/apache/cassandra/hints/ChecksummedDataInput.java b/src/java/org/apache/cassandra/hints/ChecksummedDataInput.java index a71702744d5f..463f33e82c71 100644 --- a/src/java/org/apache/cassandra/hints/ChecksummedDataInput.java +++ b/src/java/org/apache/cassandra/hints/ChecksummedDataInput.java @@ -17,7 +17,6 @@ */ package org.apache.cassandra.hints; -import java.io.File; import java.io.IOException; import java.nio.ByteBuffer; import java.util.zip.CRC32; @@ -26,6 +25,7 @@ import org.apache.cassandra.io.compress.BufferType; import org.apache.cassandra.io.util.*; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.utils.Throwables; import org.apache.cassandra.utils.NativeLibrary; diff --git a/src/java/org/apache/cassandra/hints/CompressedHintsWriter.java b/src/java/org/apache/cassandra/hints/CompressedHintsWriter.java index 8792e320ca09..63a59cd625b4 100644 --- a/src/java/org/apache/cassandra/hints/CompressedHintsWriter.java +++ b/src/java/org/apache/cassandra/hints/CompressedHintsWriter.java @@ -18,7 +18,6 @@ package org.apache.cassandra.hints; -import java.io.File; import java.io.IOException; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; @@ -28,6 +27,8 @@ import org.apache.cassandra.io.compress.ICompressor; +import org.apache.cassandra.io.util.File; + public class CompressedHintsWriter extends HintsWriter { // compressed and uncompressed size is stored at the beginning of each compressed block diff --git a/src/java/org/apache/cassandra/hints/EncryptedHintsWriter.java b/src/java/org/apache/cassandra/hints/EncryptedHintsWriter.java index 4786d9c6aa46..f9822d9b2385 100644 --- a/src/java/org/apache/cassandra/hints/EncryptedHintsWriter.java +++ b/src/java/org/apache/cassandra/hints/EncryptedHintsWriter.java @@ -17,7 +17,6 @@ */ package org.apache.cassandra.hints; -import java.io.File; import java.io.IOException; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; @@ -26,6 +25,7 @@ import com.google.common.annotations.VisibleForTesting; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.security.EncryptionUtils; import org.apache.cassandra.io.compress.ICompressor; diff --git a/src/java/org/apache/cassandra/hints/HintDiagnostics.java b/src/java/org/apache/cassandra/hints/HintDiagnostics.java index 3ff0834c7fa6..285193b8a878 100644 --- a/src/java/org/apache/cassandra/hints/HintDiagnostics.java +++ b/src/java/org/apache/cassandra/hints/HintDiagnostics.java @@ -82,4 +82,4 @@ private static boolean isEnabled(HintEventType type) return service.isEnabled(HintEvent.class, type); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/hints/HintEvent.java b/src/java/org/apache/cassandra/hints/HintEvent.java index d8b6943c0b6d..695357e9b63c 100644 --- a/src/java/org/apache/cassandra/hints/HintEvent.java +++ b/src/java/org/apache/cassandra/hints/HintEvent.java @@ -99,4 +99,4 @@ public HashMap toMap() } return ret; } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/hints/HintsCatalog.java b/src/java/org/apache/cassandra/hints/HintsCatalog.java index 81ec98ef8a39..af7940ab23a6 100644 --- a/src/java/org/apache/cassandra/hints/HintsCatalog.java +++ b/src/java/org/apache/cassandra/hints/HintsCatalog.java @@ -17,7 +17,6 @@ */ package org.apache.cassandra.hints; -import java.io.File; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; @@ -27,6 +26,7 @@ import javax.annotation.Nullable; import com.google.common.collect.ImmutableMap; +import org.apache.cassandra.io.util.File; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -148,7 +148,7 @@ void exciseStore(UUID hostId) void fsyncDirectory() { - int fd = NativeLibrary.tryOpenDirectory(hintsDirectory.getAbsolutePath()); + int fd = NativeLibrary.tryOpenDirectory(hintsDirectory.absolutePath()); if (fd != -1) { try @@ -158,14 +158,14 @@ void fsyncDirectory() } catch (FSError e) // trySync failed { - logger.error("Unable to sync directory {}", hintsDirectory.getAbsolutePath(), e); + logger.error("Unable to sync directory {}", hintsDirectory.absolutePath(), e); FileUtils.handleFSErrorAndPropagate(e); } } else if (!FBUtilities.isWindows) { - logger.error("Unable to open directory {}", hintsDirectory.getAbsolutePath()); - FileUtils.handleFSErrorAndPropagate(new FSWriteError(new IOException(String.format("Unable to open hint directory %s", hintsDirectory.getAbsolutePath())), hintsDirectory.getAbsolutePath())); + logger.error("Unable to open directory {}", hintsDirectory.absolutePath()); + FileUtils.handleFSErrorAndPropagate(new FSWriteError(new IOException(String.format("Unable to open hint directory %s", hintsDirectory.absolutePath())), hintsDirectory.absolutePath())); } } diff --git a/src/java/org/apache/cassandra/hints/HintsDescriptor.java b/src/java/org/apache/cassandra/hints/HintsDescriptor.java index 1979637779e4..4c7acf1e56ab 100644 --- a/src/java/org/apache/cassandra/hints/HintsDescriptor.java +++ b/src/java/org/apache/cassandra/hints/HintsDescriptor.java @@ -19,7 +19,6 @@ import java.io.DataInput; import java.io.IOException; -import java.io.RandomAccessFile; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; @@ -35,6 +34,9 @@ import com.google.common.base.MoreObjects; import com.google.common.base.Objects; import com.google.common.collect.ImmutableMap; + +import org.apache.cassandra.io.util.File; +import org.apache.cassandra.io.util.FileInputStreamPlus; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -232,13 +234,13 @@ static boolean isHintFileName(Path path) static Optional readFromFileQuietly(Path path) { - try (RandomAccessFile raf = new RandomAccessFile(path.toFile(), "r")) + try (FileInputStreamPlus raf = new FileInputStreamPlus(path)) { return Optional.of(deserialize(raf)); } catch (ChecksumMismatchException e) { - throw new FSReadError(e, path.toFile()); + throw new FSReadError(e, path); } catch (IOException e) { @@ -271,15 +273,15 @@ static void handleDescriptorIOE(IOException e, Path path) } } - static HintsDescriptor readFromFile(Path path) + static HintsDescriptor readFromFile(File path) { - try (RandomAccessFile raf = new RandomAccessFile(path.toFile(), "r")) + try (FileInputStreamPlus raf = new FileInputStreamPlus(path)) { return deserialize(raf); } catch (IOException e) { - throw new FSReadError(e, path.toFile()); + throw new FSReadError(e, path); } } diff --git a/src/java/org/apache/cassandra/hints/HintsDispatchExecutor.java b/src/java/org/apache/cassandra/hints/HintsDispatchExecutor.java index 705715ce4357..b6de749d9389 100644 --- a/src/java/org/apache/cassandra/hints/HintsDispatchExecutor.java +++ b/src/java/org/apache/cassandra/hints/HintsDispatchExecutor.java @@ -17,7 +17,6 @@ */ package org.apache.cassandra.hints; -import java.io.File; import java.util.Map; import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; @@ -29,12 +28,14 @@ import java.util.function.Supplier; import com.google.common.util.concurrent.RateLimiter; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.cassandra.concurrent.ExecutorPlus; import org.apache.cassandra.config.DatabaseDescriptor; import org.apache.cassandra.io.FSReadError; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.locator.InetAddressAndPort; import org.apache.cassandra.service.StorageService; import org.apache.cassandra.utils.concurrent.UncheckedInterruptedException; diff --git a/src/java/org/apache/cassandra/hints/HintsDispatcher.java b/src/java/org/apache/cassandra/hints/HintsDispatcher.java index 4fe1ae3da040..2b6d9a397696 100644 --- a/src/java/org/apache/cassandra/hints/HintsDispatcher.java +++ b/src/java/org/apache/cassandra/hints/HintsDispatcher.java @@ -17,23 +17,23 @@ */ package org.apache.cassandra.hints; -import java.io.File; import java.nio.ByteBuffer; import java.util.*; import java.util.function.BooleanSupplier; import java.util.function.Function; import com.google.common.util.concurrent.RateLimiter; -import org.apache.cassandra.utils.concurrent.Condition; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.cassandra.net.RequestCallback; import org.apache.cassandra.exceptions.RequestFailureReason; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.locator.InetAddressAndPort; import org.apache.cassandra.metrics.HintsServiceMetrics; import org.apache.cassandra.net.Message; import org.apache.cassandra.net.MessagingService; +import org.apache.cassandra.utils.concurrent.Condition; import static org.apache.cassandra.hints.HintsDispatcher.Callback.Outcome.*; diff --git a/src/java/org/apache/cassandra/hints/HintsReader.java b/src/java/org/apache/cassandra/hints/HintsReader.java index 7514fd42698e..a3944ebede11 100644 --- a/src/java/org/apache/cassandra/hints/HintsReader.java +++ b/src/java/org/apache/cassandra/hints/HintsReader.java @@ -18,7 +18,6 @@ package org.apache.cassandra.hints; import java.io.EOFException; -import java.io.File; import java.io.IOException; import java.nio.ByteBuffer; import java.util.Iterator; @@ -28,6 +27,7 @@ import com.google.common.primitives.Ints; import com.google.common.util.concurrent.RateLimiter; +import org.apache.cassandra.io.util.File; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/src/java/org/apache/cassandra/hints/HintsService.java b/src/java/org/apache/cassandra/hints/HintsService.java index b727b351ac67..8108b500569e 100644 --- a/src/java/org/apache/cassandra/hints/HintsService.java +++ b/src/java/org/apache/cassandra/hints/HintsService.java @@ -17,7 +17,6 @@ */ package org.apache.cassandra.hints; -import java.io.File; import java.net.UnknownHostException; import java.util.Collection; import java.util.Collections; @@ -33,6 +32,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableMap; import org.apache.cassandra.db.Keyspace; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.locator.ReplicaLayout; import org.apache.cassandra.utils.concurrent.Future; import org.slf4j.Logger; diff --git a/src/java/org/apache/cassandra/hints/HintsStore.java b/src/java/org/apache/cassandra/hints/HintsStore.java index 1e72f8be40f1..02a16993267f 100644 --- a/src/java/org/apache/cassandra/hints/HintsStore.java +++ b/src/java/org/apache/cassandra/hints/HintsStore.java @@ -17,7 +17,6 @@ */ package org.apache.cassandra.hints; -import java.io.File; import java.io.IOException; import java.util.*; import java.util.concurrent.ConcurrentHashMap; @@ -28,6 +27,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Iterables; +import org.apache.cassandra.io.util.File; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -187,7 +187,7 @@ private void deleteHints(Predicate predicate) void delete(HintsDescriptor descriptor) { File hintsFile = new File(hintsDirectory, descriptor.fileName()); - if (hintsFile.delete()) + if (hintsFile.tryDelete()) logger.info("Deleted hint file {}", descriptor.fileName()); else if (hintsFile.exists()) logger.error("Failed to delete hint file {}", descriptor.fileName()); @@ -195,7 +195,7 @@ else if (hintsFile.exists()) logger.info("Already deleted hint file {}", descriptor.fileName()); //noinspection ResultOfMethodCallIgnored - new File(hintsDirectory, descriptor.checksumFileName()).delete(); + new File(hintsDirectory, descriptor.checksumFileName()).tryDelete(); } boolean hasFiles() diff --git a/src/java/org/apache/cassandra/hints/HintsWriter.java b/src/java/org/apache/cassandra/hints/HintsWriter.java index 589802b87205..8c7089391f09 100644 --- a/src/java/org/apache/cassandra/hints/HintsWriter.java +++ b/src/java/org/apache/cassandra/hints/HintsWriter.java @@ -17,7 +17,6 @@ */ package org.apache.cassandra.hints; -import java.io.File; import java.io.IOException; import java.io.OutputStream; import java.nio.ByteBuffer; @@ -33,6 +32,7 @@ import org.apache.cassandra.io.FSWriteError; import org.apache.cassandra.io.util.DataOutputBuffer; import org.apache.cassandra.io.util.DataOutputBufferFixed; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.utils.NativeLibrary; import org.apache.cassandra.utils.SyncUtil; import org.apache.cassandra.utils.Throwables; @@ -295,7 +295,7 @@ private void maybeSkipCache() // don't skip page cache for tiny files, on the assumption that if they are tiny, the target node is probably // alive, and if so, the file will be closed and dispatched shortly (within a minute), and the file will be dropped. if (position >= DatabaseDescriptor.getTrickleFsyncIntervalInKb() * 1024L) - NativeLibrary.trySkipCache(fd, 0, position - (position % PAGE_SIZE), file.getPath()); + NativeLibrary.trySkipCache(fd, 0, position - (position % PAGE_SIZE), file.path()); } } } diff --git a/src/java/org/apache/cassandra/hints/package-info.java b/src/java/org/apache/cassandra/hints/package-info.java index faa7b9fa7035..b853f31a9973 100644 --- a/src/java/org/apache/cassandra/hints/package-info.java +++ b/src/java/org/apache/cassandra/hints/package-info.java @@ -41,4 +41,4 @@ * {@link org.apache.cassandra.hints.HintsService} wraps the catalog, the pool, and the two executors, acting as a front-end * for hints. */ -package org.apache.cassandra.hints; \ No newline at end of file +package org.apache.cassandra.hints; diff --git a/src/java/org/apache/cassandra/index/internal/keys/KeysIndex.java b/src/java/org/apache/cassandra/index/internal/keys/KeysIndex.java index b044bbee4d85..4c8e75d634c1 100644 --- a/src/java/org/apache/cassandra/index/internal/keys/KeysIndex.java +++ b/src/java/org/apache/cassandra/index/internal/keys/KeysIndex.java @@ -86,4 +86,4 @@ public boolean isStale(Row row, ByteBuffer indexValue, int nowInSec) || !cell.isLive(nowInSec) || compare(indexValue, cell) != 0); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/index/internal/keys/KeysSearcher.java b/src/java/org/apache/cassandra/index/internal/keys/KeysSearcher.java index 2114d4249e09..9e865d9d18ec 100644 --- a/src/java/org/apache/cassandra/index/internal/keys/KeysSearcher.java +++ b/src/java/org/apache/cassandra/index/internal/keys/KeysSearcher.java @@ -160,4 +160,4 @@ private UnfilteredRowIterator filterIfStale(UnfilteredRowIterator iterator, return iterator; } } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/index/sasi/SASIIndexBuilder.java b/src/java/org/apache/cassandra/index/sasi/SASIIndexBuilder.java index bb42dc2d178a..21716537279a 100644 --- a/src/java/org/apache/cassandra/index/sasi/SASIIndexBuilder.java +++ b/src/java/org/apache/cassandra/index/sasi/SASIIndexBuilder.java @@ -20,10 +20,10 @@ */ package org.apache.cassandra.index.sasi; -import java.io.File; import java.io.IOException; import java.util.*; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.schema.ColumnMetadata; import org.apache.cassandra.db.ColumnFamilyStore; import org.apache.cassandra.db.DecoratedKey; diff --git a/src/java/org/apache/cassandra/index/sasi/SSTableIndex.java b/src/java/org/apache/cassandra/index/sasi/SSTableIndex.java index c67c39c645bc..d756737856ba 100644 --- a/src/java/org/apache/cassandra/index/sasi/SSTableIndex.java +++ b/src/java/org/apache/cassandra/index/sasi/SSTableIndex.java @@ -17,7 +17,6 @@ */ package org.apache.cassandra.index.sasi; -import java.io.File; import java.io.IOException; import java.nio.ByteBuffer; import java.util.concurrent.atomic.AtomicBoolean; @@ -33,6 +32,7 @@ import org.apache.cassandra.index.sasi.utils.RangeIterator; import org.apache.cassandra.io.FSReadError; import org.apache.cassandra.io.sstable.format.SSTableReader; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.io.util.FileUtils; import org.apache.cassandra.utils.concurrent.Ref; diff --git a/src/java/org/apache/cassandra/index/sasi/analyzer/DelimiterAnalyzer.java b/src/java/org/apache/cassandra/index/sasi/analyzer/DelimiterAnalyzer.java index 05dfedc6c49f..b7f297b774df 100644 --- a/src/java/org/apache/cassandra/index/sasi/analyzer/DelimiterAnalyzer.java +++ b/src/java/org/apache/cassandra/index/sasi/analyzer/DelimiterAnalyzer.java @@ -108,4 +108,4 @@ public boolean isCompatibleWith(AbstractType validator) { return VALID_ANALYZABLE_TYPES.containsKey(validator); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/index/sasi/analyzer/filter/StopWordFactory.java b/src/java/org/apache/cassandra/index/sasi/analyzer/filter/StopWordFactory.java index 1548a6a63380..56c07f310759 100644 --- a/src/java/org/apache/cassandra/index/sasi/analyzer/filter/StopWordFactory.java +++ b/src/java/org/apache/cassandra/index/sasi/analyzer/filter/StopWordFactory.java @@ -18,7 +18,6 @@ package org.apache.cassandra.index.sasi.analyzer.filter; import java.io.BufferedReader; -import java.io.File; import java.io.InputStream; import java.io.InputStreamReader; import java.nio.charset.StandardCharsets; @@ -33,6 +32,7 @@ import com.github.benmanes.caffeine.cache.Caffeine; import com.github.benmanes.caffeine.cache.LoadingCache; +import org.apache.cassandra.io.util.File; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -45,7 +45,7 @@ public class StopWordFactory private static final String DEFAULT_RESOURCE_EXT = "_ST.txt"; private static final String DEFAULT_RESOURCE_PREFIX = StopWordFactory.class.getPackage() - .getName().replace(".", File.separator); + .getName().replace(".", File.pathSeparator()); private static final Set SUPPORTED_LANGUAGES = new HashSet<>( Arrays.asList("ar","bg","cs","de","en","es","fi","fr","hi","hu","it", "pl","pt","ro","ru","sv")); @@ -74,7 +74,7 @@ public static Set getStopWordsForLanguage(Locale locale) private static Set getStopWordsFromResource(String language) { Set stopWords = new HashSet<>(); - String resourceName = DEFAULT_RESOURCE_PREFIX + File.separator + language + DEFAULT_RESOURCE_EXT; + String resourceName = DEFAULT_RESOURCE_PREFIX + File.pathSeparator() + language + DEFAULT_RESOURCE_EXT; try (InputStream is = StopWordFactory.class.getClassLoader().getResourceAsStream(resourceName); BufferedReader r = new BufferedReader(new InputStreamReader(is, StandardCharsets.UTF_8))) { diff --git a/src/java/org/apache/cassandra/index/sasi/conf/DataTracker.java b/src/java/org/apache/cassandra/index/sasi/conf/DataTracker.java index 99516b8e98ba..bf2293f4874d 100644 --- a/src/java/org/apache/cassandra/index/sasi/conf/DataTracker.java +++ b/src/java/org/apache/cassandra/index/sasi/conf/DataTracker.java @@ -17,7 +17,6 @@ */ package org.apache.cassandra.index.sasi.conf; -import java.io.File; import java.util.Collection; import java.util.Collections; import java.util.HashSet; @@ -29,6 +28,7 @@ import org.apache.cassandra.index.sasi.SSTableIndex; import org.apache.cassandra.index.sasi.conf.view.View; import org.apache.cassandra.io.sstable.format.SSTableReader; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.utils.Pair; import org.slf4j.Logger; @@ -181,7 +181,7 @@ private Pair, Set> getBuiltIndexes(Collection cmp, Function keyFetcher = keyReader; comparator = cmp; - indexPath = index.getAbsolutePath(); + indexPath = index.absolutePath(); - RandomAccessFile backingFile = null; - try - { - backingFile = new RandomAccessFile(index, "r"); + try (FileInputStreamPlus backingFile = new FileInputStreamPlus(index)) + { descriptor = new Descriptor(backingFile.readUTF()); termSize = OnDiskIndexBuilder.TermSize.of(backingFile.readShort()); @@ -141,32 +142,29 @@ public OnDiskIndex(File index, AbstractType cmp, Function mode = OnDiskIndexBuilder.Mode.mode(backingFile.readUTF()); hasMarkedPartials = backingFile.readBoolean(); - indexSize = backingFile.length(); - indexFile = new MappedBuffer(new ChannelProxy(indexPath, backingFile.getChannel())); - - // start of the levels - indexFile.position(indexFile.getLong(indexSize - 8)); - - int numLevels = indexFile.getInt(); - levels = new PointerLevel[numLevels]; - for (int i = 0; i < levels.length; i++) - { - int blockCount = indexFile.getInt(); - levels[i] = new PointerLevel(indexFile.position(), blockCount); - indexFile.position(indexFile.position() + blockCount * 8); - } - - int blockCount = indexFile.getInt(); - dataLevel = new DataLevel(indexFile.position(), blockCount); + FileChannel channel = index.newReadChannel(); + indexSize = channel.size(); + indexFile = new MappedBuffer(new ChannelProxy(indexPath, channel)); } catch (IOException e) { throw new FSReadError(e, index); } - finally + + // start of the levels + indexFile.position(indexFile.getLong(indexSize - 8)); + + int numLevels = indexFile.getInt(); + levels = new PointerLevel[numLevels]; + for (int i = 0; i < levels.length; i++) { - FileUtils.closeQuietly(backingFile); + int blockCount = indexFile.getInt(); + levels[i] = new PointerLevel(indexFile.position(), blockCount); + indexFile.position(indexFile.position() + blockCount * 8); } + + int blockCount = indexFile.getInt(); + dataLevel = new DataLevel(indexFile.position(), blockCount); } public boolean hasMarkedPartials() diff --git a/src/java/org/apache/cassandra/index/sasi/disk/OnDiskIndexBuilder.java b/src/java/org/apache/cassandra/index/sasi/disk/OnDiskIndexBuilder.java index 02985394c16a..9ba9f9c72951 100644 --- a/src/java/org/apache/cassandra/index/sasi/disk/OnDiskIndexBuilder.java +++ b/src/java/org/apache/cassandra/index/sasi/disk/OnDiskIndexBuilder.java @@ -17,7 +17,6 @@ */ package org.apache.cassandra.index.sasi.disk; -import java.io.File; import java.io.IOException; import java.nio.ByteBuffer; import java.util.*; @@ -32,6 +31,7 @@ import org.apache.cassandra.db.marshal.*; import org.apache.cassandra.io.FSWriteError; import org.apache.cassandra.io.util.*; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.utils.ByteBufferUtil; import org.apache.cassandra.utils.FBUtilities; import org.apache.cassandra.utils.Pair; @@ -247,15 +247,7 @@ protected boolean finish(Descriptor descriptor, File file) throws FSWriteError // no terms means there is nothing to build if (terms.isEmpty()) { - try - { - file.createNewFile(); - } - catch (IOException e) - { - throw new FSWriteError(e, file); - } - + file.createFileIfNotExists(); return false; } diff --git a/src/java/org/apache/cassandra/index/sasi/disk/PerSSTableIndexWriter.java b/src/java/org/apache/cassandra/index/sasi/disk/PerSSTableIndexWriter.java index 444db8370398..fb5e9b9e7f83 100644 --- a/src/java/org/apache/cassandra/index/sasi/disk/PerSSTableIndexWriter.java +++ b/src/java/org/apache/cassandra/index/sasi/disk/PerSSTableIndexWriter.java @@ -17,13 +17,13 @@ */ package org.apache.cassandra.index.sasi.disk; -import java.io.File; import java.nio.ByteBuffer; import java.util.HashSet; import java.util.Map; import java.util.Set; import java.util.concurrent.*; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.concurrent.ExecutorPlus; import org.apache.cassandra.schema.ColumnMetadata; import org.apache.cassandra.db.DecoratedKey; diff --git a/src/java/org/apache/cassandra/index/sasi/disk/TokenTree.java b/src/java/org/apache/cassandra/index/sasi/disk/TokenTree.java index e510cdd80d4b..3a401cae8a2f 100644 --- a/src/java/org/apache/cassandra/index/sasi/disk/TokenTree.java +++ b/src/java/org/apache/cassandra/index/sasi/disk/TokenTree.java @@ -520,4 +520,4 @@ public DecoratedKey computeNext() return index < offsets.length ? keyFetcher.apply(offsets[index++]) : endOfData(); } } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/index/sasi/disk/TokenTreeBuilder.java b/src/java/org/apache/cassandra/index/sasi/disk/TokenTreeBuilder.java index 29cecc81a00e..01a536ccdab9 100644 --- a/src/java/org/apache/cassandra/index/sasi/disk/TokenTreeBuilder.java +++ b/src/java/org/apache/cassandra/index/sasi/disk/TokenTreeBuilder.java @@ -75,4 +75,4 @@ public static EntryType of(int ordinal) int serializedSize(); void write(DataOutputPlus out) throws IOException; -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/index/sasi/memory/KeyRangeIterator.java b/src/java/org/apache/cassandra/index/sasi/memory/KeyRangeIterator.java index 0f681b7fe09d..d60914ddcf59 100644 --- a/src/java/org/apache/cassandra/index/sasi/memory/KeyRangeIterator.java +++ b/src/java/org/apache/cassandra/index/sasi/memory/KeyRangeIterator.java @@ -126,4 +126,4 @@ public Iterator iterator() return keys.iterator(); } } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/index/sasi/utils/CombinedTerm.java b/src/java/org/apache/cassandra/index/sasi/utils/CombinedTerm.java index 81e535def06f..cc327bcfdd58 100644 --- a/src/java/org/apache/cassandra/index/sasi/utils/CombinedTerm.java +++ b/src/java/org/apache/cassandra/index/sasi/utils/CombinedTerm.java @@ -81,4 +81,4 @@ public int compareTo(CombinedValue o) { return term.compareTo(comparator, o.get().getTerm()); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/index/sasi/utils/trie/Cursor.java b/src/java/org/apache/cassandra/index/sasi/utils/trie/Cursor.java index dde3c8a56c9c..7c86b9709ef5 100644 --- a/src/java/org/apache/cassandra/index/sasi/utils/trie/Cursor.java +++ b/src/java/org/apache/cassandra/index/sasi/utils/trie/Cursor.java @@ -80,4 +80,4 @@ enum Decision * Note: Not all operations support {@link Decision#REMOVE}. */ Decision select(Map.Entry entry); -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/index/sasi/utils/trie/PatriciaTrie.java b/src/java/org/apache/cassandra/index/sasi/utils/trie/PatriciaTrie.java index 9187894a8e57..a36af9828c8b 100644 --- a/src/java/org/apache/cassandra/index/sasi/utils/trie/PatriciaTrie.java +++ b/src/java/org/apache/cassandra/index/sasi/utils/trie/PatriciaTrie.java @@ -1258,4 +1258,4 @@ public void remove() } } } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/io/FSError.java b/src/java/org/apache/cassandra/io/FSError.java index e09bac7a429b..65313947a126 100644 --- a/src/java/org/apache/cassandra/io/FSError.java +++ b/src/java/org/apache/cassandra/io/FSError.java @@ -17,17 +17,25 @@ */ package org.apache.cassandra.io; -import java.io.File; import java.io.IOError; +import java.nio.file.Path; + +import org.apache.cassandra.io.util.File; public abstract class FSError extends IOError { - public final File path; + public final String path; public FSError(Throwable cause, File path) { super(cause); - this.path = path; + this.path = path.toString(); + } + + public FSError(Throwable cause, Path path) + { + super(cause); + this.path = path.toString(); } /** diff --git a/src/java/org/apache/cassandra/io/FSReadError.java b/src/java/org/apache/cassandra/io/FSReadError.java index c557fc568b79..688182a324a0 100644 --- a/src/java/org/apache/cassandra/io/FSReadError.java +++ b/src/java/org/apache/cassandra/io/FSReadError.java @@ -17,10 +17,18 @@ */ package org.apache.cassandra.io; -import java.io.File; + +import java.nio.file.Path; + +import org.apache.cassandra.io.util.File; public class FSReadError extends FSError { + public FSReadError(Throwable cause, Path path) + { + super(cause, path); + } + public FSReadError(Throwable cause, File path) { super(cause, path); diff --git a/src/java/org/apache/cassandra/io/FSWriteError.java b/src/java/org/apache/cassandra/io/FSWriteError.java index b419086be095..481795db0e47 100644 --- a/src/java/org/apache/cassandra/io/FSWriteError.java +++ b/src/java/org/apache/cassandra/io/FSWriteError.java @@ -17,10 +17,18 @@ */ package org.apache.cassandra.io; -import java.io.File; + +import java.nio.file.Path; + +import org.apache.cassandra.io.util.File; public class FSWriteError extends FSError { + public FSWriteError(Throwable cause, Path path) + { + super(cause, path); + } + public FSWriteError(Throwable cause, File path) { super(cause, path); diff --git a/src/java/org/apache/cassandra/io/compress/CompressedSequentialWriter.java b/src/java/org/apache/cassandra/io/compress/CompressedSequentialWriter.java index 219082482a19..8321345876a6 100644 --- a/src/java/org/apache/cassandra/io/compress/CompressedSequentialWriter.java +++ b/src/java/org/apache/cassandra/io/compress/CompressedSequentialWriter.java @@ -19,7 +19,6 @@ import java.io.DataOutputStream; import java.io.EOFException; -import java.io.File; import java.io.IOException; import java.nio.ByteBuffer; import java.nio.channels.Channels; @@ -31,6 +30,7 @@ import org.apache.cassandra.io.sstable.CorruptSSTableException; import org.apache.cassandra.io.sstable.metadata.MetadataCollector; import org.apache.cassandra.io.util.*; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.schema.CompressionParams; import org.apache.cassandra.utils.ByteBufferUtil; diff --git a/src/java/org/apache/cassandra/io/compress/CompressionMetadata.java b/src/java/org/apache/cassandra/io/compress/CompressionMetadata.java index e472ca886cc9..cc6ce6bc8b74 100644 --- a/src/java/org/apache/cassandra/io/compress/CompressionMetadata.java +++ b/src/java/org/apache/cassandra/io/compress/CompressionMetadata.java @@ -18,16 +18,15 @@ package org.apache.cassandra.io.compress; import java.nio.file.Files; +import java.nio.file.NoSuchFileException; import java.nio.file.Paths; -import java.io.BufferedOutputStream; import java.io.DataInput; import java.io.DataInputStream; import java.io.DataOutput; -import java.io.DataOutputStream; import java.io.EOFException; -import java.io.File; + +import org.apache.cassandra.io.util.*; import java.io.FileNotFoundException; -import java.io.FileOutputStream; import java.io.IOException; import java.util.Collection; import java.util.HashMap; @@ -48,12 +47,7 @@ import org.apache.cassandra.io.sstable.CorruptSSTableException; import org.apache.cassandra.io.sstable.Descriptor; import org.apache.cassandra.io.sstable.format.SSTableReader; -import org.apache.cassandra.io.util.DataInputPlus; -import org.apache.cassandra.io.util.DataOutputPlus; -import org.apache.cassandra.io.util.Memory; -import org.apache.cassandra.io.util.SafeMemory; import org.apache.cassandra.schema.CompressionParams; -import org.apache.cassandra.utils.SyncUtil; import org.apache.cassandra.utils.concurrent.Transactional; import org.apache.cassandra.utils.concurrent.Ref; @@ -104,7 +98,7 @@ public CompressionMetadata(String indexFilePath, long compressedLength, boolean { this.indexFilePath = indexFilePath; - try (DataInputStream stream = new DataInputStream(Files.newInputStream(Paths.get(indexFilePath)))) + try (FileInputStreamPlus stream = new File(indexFilePath).newInputStream()) { String compressorName = stream.readUTF(); int optionCount = stream.readInt(); @@ -132,7 +126,7 @@ public CompressionMetadata(String indexFilePath, long compressedLength, boolean compressedFileLength = compressedLength; chunkOffsets = readChunkOffsets(stream); } - catch (FileNotFoundException e) + catch (FileNotFoundException | NoSuchFileException e) { throw new RuntimeException(e); } @@ -410,17 +404,16 @@ public void doPrepare() } // flush the data to disk - try (FileOutputStream fos = new FileOutputStream(filePath); - DataOutputStream out = new DataOutputStream(new BufferedOutputStream(fos))) + try (FileOutputStreamPlus out = new FileOutputStreamPlus(filePath)) { writeHeader(out, dataLength, count); for (int i = 0; i < count; i++) out.writeLong(offsets.getLong(i * 8L)); out.flush(); - SyncUtil.sync(fos); + out.sync(); } - catch (FileNotFoundException fnfe) + catch (FileNotFoundException | NoSuchFileException fnfe) { throw Throwables.propagate(fnfe); } diff --git a/src/java/org/apache/cassandra/io/sstable/AbstractSSTableSimpleWriter.java b/src/java/org/apache/cassandra/io/sstable/AbstractSSTableSimpleWriter.java index 4eaf1fe64a01..0e086e656c75 100644 --- a/src/java/org/apache/cassandra/io/sstable/AbstractSSTableSimpleWriter.java +++ b/src/java/org/apache/cassandra/io/sstable/AbstractSSTableSimpleWriter.java @@ -17,8 +17,7 @@ */ package org.apache.cassandra.io.sstable; -import java.io.File; -import java.io.FileFilter; + import java.io.IOException; import java.io.Closeable; import java.nio.ByteBuffer; @@ -31,6 +30,7 @@ import org.apache.cassandra.db.partitions.PartitionUpdate; import org.apache.cassandra.db.rows.EncodingStats; import org.apache.cassandra.io.sstable.format.SSTableFormat; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.schema.TableMetadataRef; import org.apache.cassandra.service.ActiveRepairService; @@ -91,19 +91,15 @@ private static Descriptor createDescriptor(File directory, final String keyspace private static int getNextGeneration(File directory, final String columnFamily) { final Set existing = new HashSet<>(); - directory.listFiles(new FileFilter() - { - public boolean accept(File file) - { - Descriptor desc = SSTable.tryDescriptorFromFilename(file); - if (desc == null) - return false; + directory.tryList(file -> { + Descriptor desc = SSTable.tryDescriptorFromFilename(file); + if (desc == null) + return false; - if (desc.cfname.equals(columnFamily)) - existing.add(desc); + if (desc.cfname.equals(columnFamily)) + existing.add(desc); - return false; - } + return false; }); int maxGen = generation.getAndIncrement(); for (Descriptor desc : existing) diff --git a/src/java/org/apache/cassandra/io/sstable/CQLSSTableWriter.java b/src/java/org/apache/cassandra/io/sstable/CQLSSTableWriter.java index fa8a60fa9b59..6c0703273b16 100644 --- a/src/java/org/apache/cassandra/io/sstable/CQLSSTableWriter.java +++ b/src/java/org/apache/cassandra/io/sstable/CQLSSTableWriter.java @@ -18,7 +18,6 @@ package org.apache.cassandra.io.sstable; import java.io.Closeable; -import java.io.File; import java.io.IOException; import java.nio.ByteBuffer; import java.util.ArrayList; @@ -49,6 +48,7 @@ import org.apache.cassandra.exceptions.InvalidRequestException; import org.apache.cassandra.exceptions.SyntaxException; import org.apache.cassandra.io.sstable.format.SSTableFormat; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.schema.*; import org.apache.cassandra.service.ClientState; import org.apache.cassandra.transport.ProtocolVersion; @@ -379,7 +379,7 @@ public Builder inDirectory(File directory) { if (!directory.exists()) throw new IllegalArgumentException(directory + " doesn't exists"); - if (!directory.canWrite()) + if (!directory.isWritable()) throw new IllegalArgumentException(directory + " exists but is not writable"); this.directory = directory; diff --git a/src/java/org/apache/cassandra/io/sstable/CorruptSSTableException.java b/src/java/org/apache/cassandra/io/sstable/CorruptSSTableException.java index 93be2eec6a7e..991a91d904f8 100644 --- a/src/java/org/apache/cassandra/io/sstable/CorruptSSTableException.java +++ b/src/java/org/apache/cassandra/io/sstable/CorruptSSTableException.java @@ -17,7 +17,8 @@ */ package org.apache.cassandra.io.sstable; -import java.io.File; + +import org.apache.cassandra.io.util.File; public class CorruptSSTableException extends RuntimeException { diff --git a/src/java/org/apache/cassandra/io/sstable/Descriptor.java b/src/java/org/apache/cassandra/io/sstable/Descriptor.java index b781ebf50cd5..faca4873ecff 100644 --- a/src/java/org/apache/cassandra/io/sstable/Descriptor.java +++ b/src/java/org/apache/cassandra/io/sstable/Descriptor.java @@ -17,7 +17,6 @@ */ package org.apache.cassandra.io.sstable; -import java.io.File; import java.io.IOError; import java.io.IOException; import java.util.*; @@ -32,6 +31,7 @@ import org.apache.cassandra.io.sstable.format.Version; import org.apache.cassandra.io.sstable.metadata.IMetadataSerializer; import org.apache.cassandra.io.sstable.metadata.MetadataSerializer; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.utils.Pair; import org.apache.cassandra.utils.UUIDGen; @@ -90,14 +90,7 @@ public Descriptor(Version version, File directory, String ksname, String cfname, { assert version != null && directory != null && ksname != null && cfname != null && formatType.info.getLatestVersion().getClass().equals(version.getClass()); this.version = version; - try - { - this.directory = directory.getCanonicalFile(); - } - catch (IOException e) - { - throw new IOError(e); - } + this.directory = directory.toCanonical(); this.ksname = ksname; this.cfname = cfname; this.generation = generation; @@ -139,7 +132,7 @@ public String filenameFor(Component component) public String baseFilename() { StringBuilder buff = new StringBuilder(); - buff.append(directory).append(File.separatorChar); + buff.append(directory).append(File.pathSeparator()); appendFileName(buff); return buff.toString(); } @@ -156,7 +149,7 @@ public String relativeFilenameFor(Component component) final StringBuilder buff = new StringBuilder(); if (Directories.isSecondaryIndexFolder(directory)) { - buff.append(directory.getName()).append(File.separator); + buff.append(directory.name()).append(File.pathSeparator()); } appendFileName(buff); @@ -172,7 +165,7 @@ public SSTableFormat getFormat() /** Return any temporary files found in the directory */ public List getTemporaryFiles() { - File[] tmpFiles = directory.listFiles((dir, name) -> + File[] tmpFiles = directory.tryList((dir, name) -> name.endsWith(Descriptor.TMP_EXT)); List ret = new ArrayList<>(tmpFiles.length); @@ -184,7 +177,7 @@ public List getTemporaryFiles() public static boolean isValidFile(File file) { - String filename = file.getName(); + String filename = file.name(); return filename.endsWith(".db") && !LEGACY_TMP_REGEX.matcher(filename).matches(); } @@ -242,9 +235,9 @@ public static Pair fromFilenameWithComponent(File file) // We need to extract the keyspace and table names from the parent directories, so make sure we deal with the // absolute path. if (!file.isAbsolute()) - file = file.getAbsoluteFile(); + file = file.toAbsolute(); - String name = file.getName(); + String name = file.name(); List tokens = filenameSplitter.splitToList(name); int size = tokens.size(); @@ -300,25 +293,25 @@ public static Pair fromFilenameWithComponent(File file) String indexName = ""; if (Directories.isSecondaryIndexFolder(tableDir)) { - indexName = tableDir.getName(); + indexName = tableDir.name(); tableDir = parentOf(name, tableDir); } // Then it can be a backup or a snapshot - if (tableDir.getName().equals(Directories.BACKUPS_SUBDIR)) - tableDir = tableDir.getParentFile(); - else if (parentOf(name, tableDir).getName().equals(Directories.SNAPSHOT_SUBDIR)) + if (tableDir.name().equals(Directories.BACKUPS_SUBDIR)) + tableDir = tableDir.parent(); + else if (parentOf(name, tableDir).name().equals(Directories.SNAPSHOT_SUBDIR)) tableDir = parentOf(name, parentOf(name, tableDir)); - String table = tableDir.getName().split("-")[0] + indexName; - String keyspace = parentOf(name, tableDir).getName(); + String table = tableDir.name().split("-")[0] + indexName; + String keyspace = parentOf(name, tableDir).name(); return Pair.create(new Descriptor(version, directory, keyspace, table, generation, format), component); } private static File parentOf(String name, File file) { - File parent = file.getParentFile(); + File parent = file.parent(); if (parent == null) throw invalidSSTable(name, "cannot extract keyspace and table name; make sure the sstable is in the proper sub-directories"); return parent; diff --git a/src/java/org/apache/cassandra/io/sstable/IndexSummaryManagerMBean.java b/src/java/org/apache/cassandra/io/sstable/IndexSummaryManagerMBean.java index 338235052d10..ad791855bd9d 100644 --- a/src/java/org/apache/cassandra/io/sstable/IndexSummaryManagerMBean.java +++ b/src/java/org/apache/cassandra/io/sstable/IndexSummaryManagerMBean.java @@ -42,4 +42,4 @@ public interface IndexSummaryManagerMBean public int getResizeIntervalInMinutes(); public void setResizeIntervalInMinutes(int resizeIntervalInMinutes); -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/io/sstable/KeyIterator.java b/src/java/org/apache/cassandra/io/sstable/KeyIterator.java index 1a5792c4fad2..ceacf87cb766 100644 --- a/src/java/org/apache/cassandra/io/sstable/KeyIterator.java +++ b/src/java/org/apache/cassandra/io/sstable/KeyIterator.java @@ -17,7 +17,6 @@ */ package org.apache.cassandra.io.sstable; -import java.io.File; import java.io.IOException; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; @@ -26,6 +25,7 @@ import org.apache.cassandra.db.RowIndexEntry; import org.apache.cassandra.dht.IPartitioner; import org.apache.cassandra.io.util.DataInputPlus; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.io.util.RandomAccessReader; import org.apache.cassandra.schema.TableMetadata; import org.apache.cassandra.utils.AbstractIterator; diff --git a/src/java/org/apache/cassandra/io/sstable/SSTable.java b/src/java/org/apache/cassandra/io/sstable/SSTable.java index 0471be3238cf..b56a286c6aa2 100644 --- a/src/java/org/apache/cassandra/io/sstable/SSTable.java +++ b/src/java/org/apache/cassandra/io/sstable/SSTable.java @@ -17,9 +17,14 @@ */ package org.apache.cassandra.io.sstable; -import java.io.*; + +import java.io.FileNotFoundException; +import java.io.IOError; +import java.io.IOException; +import java.io.PrintWriter; import java.nio.ByteBuffer; -import java.nio.charset.Charset; +import java.nio.file.Files; +import java.nio.file.NoSuchFileException; import java.util.*; import java.util.concurrent.CopyOnWriteArraySet; @@ -27,7 +32,7 @@ import com.google.common.base.Predicates; import com.google.common.collect.Collections2; import com.google.common.collect.Sets; -import com.google.common.io.Files; +import org.apache.cassandra.io.util.File; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -47,6 +52,7 @@ import org.apache.cassandra.utils.Pair; import org.apache.cassandra.utils.memory.HeapAllocator; +import static org.apache.cassandra.io.util.File.WriteMode.APPEND; import static org.apache.cassandra.service.ActiveRepairService.NO_PENDING_REPAIR; import static org.apache.cassandra.service.ActiveRepairService.UNREPAIRED_SSTABLE; @@ -233,7 +239,7 @@ public static Set componentsFor(final Descriptor desc) { return readTOC(desc); } - catch (FileNotFoundException e) + catch (FileNotFoundException | NoSuchFileException e) { Set components = discoverComponentsFor(desc); if (components.isEmpty()) @@ -317,7 +323,7 @@ protected static Set readTOC(Descriptor descriptor) throws IOExceptio protected static Set readTOC(Descriptor descriptor, boolean skipMissing) throws IOException { File tocFile = new File(descriptor.filenameFor(Component.TOC)); - List componentNames = Files.readLines(tocFile, Charset.defaultCharset()); + List componentNames = Files.readAllLines(tocFile.toPath()); Set components = Sets.newHashSetWithExpectedSize(componentNames.size()); for (String componentName : componentNames) { @@ -336,7 +342,7 @@ protected static Set readTOC(Descriptor descriptor, boolean skipMissi protected static void appendTOC(Descriptor descriptor, Collection components) { File tocFile = new File(descriptor.filenameFor(Component.TOC)); - try (PrintWriter w = new PrintWriter(new FileWriter(tocFile, true))) + try (PrintWriter w = new PrintWriter(tocFile.newWriter(APPEND))) { for (Component component : components) w.println(component.name); diff --git a/src/java/org/apache/cassandra/io/sstable/SSTableHeaderFix.java b/src/java/org/apache/cassandra/io/sstable/SSTableHeaderFix.java index 35772599b8e3..ad0a72246616 100644 --- a/src/java/org/apache/cassandra/io/sstable/SSTableHeaderFix.java +++ b/src/java/org/apache/cassandra/io/sstable/SSTableHeaderFix.java @@ -17,7 +17,6 @@ */ package org.apache.cassandra.io.sstable; -import java.io.File; import java.io.IOException; import java.nio.ByteBuffer; import java.nio.charset.CharacterCodingException; @@ -36,6 +35,7 @@ import java.util.stream.Collectors; import java.util.stream.Stream; +import org.apache.cassandra.io.util.File; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -298,7 +298,7 @@ void processFileOrDirectory(Path path) { Stream.of(path) .flatMap(SSTableHeaderFix::maybeExpandDirectory) - .filter(p -> Descriptor.fromFilenameWithComponent(p.toFile()).right.type == Component.Type.DATA) + .filter(p -> Descriptor.fromFilenameWithComponent(new File(p)).right.type == Component.Type.DATA) .map(Path::toString) .map(Descriptor::fromFilename) .forEach(descriptors::add); @@ -888,26 +888,26 @@ public void prepare() private void scanDataDirectory(Directories.DataDirectory dataDirectory) { info.accept(String.format("Scanning data directory %s", dataDirectory.location)); - File[] ksDirs = dataDirectory.location.listFiles(); + File[] ksDirs = dataDirectory.location.tryList(); if (ksDirs == null) return; for (File ksDir : ksDirs) { - if (!ksDir.isDirectory() || !ksDir.canRead()) + if (!ksDir.isDirectory() || !ksDir.isReadable()) continue; - String name = ksDir.getName(); + String name = ksDir.name(); // silently ignore all system keyspaces if (SchemaConstants.isLocalSystemKeyspace(name) || SchemaConstants.isReplicatedSystemKeyspace(name)) continue; - File[] tabDirs = ksDir.listFiles(); + File[] tabDirs = ksDir.tryList(); if (tabDirs == null) continue; for (File tabDir : tabDirs) { - if (!tabDir.isDirectory() || !tabDir.canRead()) + if (!tabDir.isDirectory() || !tabDir.isReadable()) continue; processFileOrDirectory(tabDir.toPath()); diff --git a/src/java/org/apache/cassandra/io/sstable/SSTableIdentityIterator.java b/src/java/org/apache/cassandra/io/sstable/SSTableIdentityIterator.java index 76e12c891ada..2b322780962a 100644 --- a/src/java/org/apache/cassandra/io/sstable/SSTableIdentityIterator.java +++ b/src/java/org/apache/cassandra/io/sstable/SSTableIdentityIterator.java @@ -19,6 +19,7 @@ import java.io.*; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.schema.TableMetadata; import org.apache.cassandra.db.*; import org.apache.cassandra.db.rows.*; diff --git a/src/java/org/apache/cassandra/io/sstable/SSTableLoader.java b/src/java/org/apache/cassandra/io/sstable/SSTableLoader.java index 47de00c6a21a..7df9258c5298 100644 --- a/src/java/org/apache/cassandra/io/sstable/SSTableLoader.java +++ b/src/java/org/apache/cassandra/io/sstable/SSTableLoader.java @@ -17,13 +17,13 @@ */ package org.apache.cassandra.io.sstable; -import java.io.File; import java.util.*; import com.google.common.collect.HashMultimap; import com.google.common.collect.Multimap; import org.apache.cassandra.db.streaming.CassandraOutgoingFile; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.locator.InetAddressAndPort; import org.apache.cassandra.io.FSError; import org.apache.cassandra.schema.TableMetadataRef; @@ -62,7 +62,7 @@ public SSTableLoader(File directory, Client client, OutputHandler outputHandler) public SSTableLoader(File directory, Client client, OutputHandler outputHandler, int connectionsPerHost, String targetKeyspace) { this.directory = directory; - this.keyspace = targetKeyspace != null ? targetKeyspace : directory.getParentFile().getName(); + this.keyspace = targetKeyspace != null ? targetKeyspace : directory.parent().name(); this.client = client; this.outputHandler = outputHandler; this.connectionsPerHost = connectionsPerHost; @@ -76,8 +76,8 @@ protected Collection openSSTables(final Map { - File dir = file.getParentFile(); - String name = file.getName(); + File dir = file.parent(); + String name = file.name(); if (type != Directories.FileType.FINAL) { @@ -99,15 +99,15 @@ protected Collection openSSTables(final Map components, File sourceFile = new File(descriptor.filenameFor(component)); if (!sourceFile.exists()) continue; - File targetLink = new File(snapshotDirectoryPath, sourceFile.getName()); + File targetLink = new File(snapshotDirectoryPath, sourceFile.name()); FileUtils.createHardLink(sourceFile, targetLink); } } diff --git a/src/java/org/apache/cassandra/io/sstable/format/SSTableReaderBuilder.java b/src/java/org/apache/cassandra/io/sstable/format/SSTableReaderBuilder.java index e8266f8edae0..3386c23d5394 100644 --- a/src/java/org/apache/cassandra/io/sstable/format/SSTableReaderBuilder.java +++ b/src/java/org/apache/cassandra/io/sstable/format/SSTableReaderBuilder.java @@ -28,7 +28,9 @@ import org.apache.cassandra.io.sstable.metadata.StatsMetadata; import org.apache.cassandra.io.sstable.metadata.ValidationMetadata; import org.apache.cassandra.io.util.DiskOptimizationStrategy; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.io.util.FileHandle; +import org.apache.cassandra.io.util.FileInputStreamPlus; import org.apache.cassandra.io.util.FileUtils; import org.apache.cassandra.io.util.RandomAccessReader; import org.apache.cassandra.schema.TableMetadata; @@ -39,7 +41,6 @@ import java.io.BufferedInputStream; import java.io.DataInputStream; -import java.io.File; import java.io.IOException; import java.nio.ByteBuffer; import java.nio.file.Files; @@ -129,7 +130,7 @@ void loadSummary() if (!summariesFile.exists()) { if (logger.isDebugEnabled()) - logger.debug("SSTable Summary File {} does not exist", summariesFile.getAbsolutePath()); + logger.debug("SSTable Summary File {} does not exist", summariesFile.absolutePath()); return; } @@ -148,7 +149,7 @@ void loadSummary() { if (summary != null) summary.close(); - logger.trace("Cannot deserialize SSTable Summary File {}: {}", summariesFile.getPath(), e.getMessage()); + logger.trace("Cannot deserialize SSTable Summary File {}: {}", summariesFile.path(), e.getMessage()); // corrupted; delete it and fall back to creating a new summary FileUtils.closeQuietly(iStream); // delete it and fall back to creating a new summary @@ -237,7 +238,7 @@ void buildSummaryAndBloomFilter(boolean recreateBloomFilter, */ IFilter loadBloomFilter() throws IOException { - try (DataInputStream stream = new DataInputStream(new BufferedInputStream(Files.newInputStream(Paths.get(descriptor.filenameFor(Component.FILTER)))))) + try (FileInputStreamPlus stream = new File(descriptor.filenameFor(Component.FILTER)).newInputStream()) { return BloomFilterSerializer.deserialize(stream, descriptor.version.hasOldBfFormat()); } diff --git a/src/java/org/apache/cassandra/io/sstable/format/big/BigTableWriter.java b/src/java/org/apache/cassandra/io/sstable/format/big/BigTableWriter.java index 281a11d704f0..79290595f13a 100644 --- a/src/java/org/apache/cassandra/io/sstable/format/big/BigTableWriter.java +++ b/src/java/org/apache/cassandra/io/sstable/format/big/BigTableWriter.java @@ -17,13 +17,15 @@ */ package org.apache.cassandra.io.sstable.format.big; -import java.io.*; + +import java.io.IOException; import java.nio.BufferOverflowException; import java.nio.ByteBuffer; import java.util.*; import org.apache.cassandra.db.compaction.OperationType; import org.apache.cassandra.db.lifecycle.LifecycleNewTracker; +import org.apache.cassandra.io.util.File; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -470,7 +472,7 @@ private void writeMetadata(Descriptor desc, Map } catch (IOException e) { - throw new FSWriteError(e, file.getPath()); + throw new FSWriteError(e, file.path()); } } @@ -547,13 +549,12 @@ void flushBf() if (components.contains(Component.FILTER)) { String path = descriptor.filenameFor(Component.FILTER); - try (FileOutputStream fos = new FileOutputStream(path); - DataOutputStreamPlus stream = new BufferedDataOutputStreamPlus(fos)) + try (FileOutputStreamPlus stream = new FileOutputStreamPlus(path)) { // bloom filter BloomFilterSerializer.serialize((BloomFilter) bf, stream); stream.flush(); - SyncUtil.sync(fos); + stream.sync(); } catch (IOException e) { diff --git a/src/java/org/apache/cassandra/io/sstable/format/big/BigTableZeroCopyWriter.java b/src/java/org/apache/cassandra/io/sstable/format/big/BigTableZeroCopyWriter.java index f05ea94cb7ea..717e9d90f170 100644 --- a/src/java/org/apache/cassandra/io/sstable/format/big/BigTableZeroCopyWriter.java +++ b/src/java/org/apache/cassandra/io/sstable/format/big/BigTableZeroCopyWriter.java @@ -18,7 +18,6 @@ package org.apache.cassandra.io.sstable.format.big; import java.io.EOFException; -import java.io.File; import java.io.IOException; import java.util.Collection; import java.util.EnumMap; @@ -28,6 +27,7 @@ import com.google.common.collect.ImmutableSet; import com.google.common.collect.Sets; import org.apache.cassandra.db.lifecycle.LifecycleNewTracker; +import org.apache.cassandra.io.util.File; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -227,4 +227,4 @@ private void write(AsyncStreamingInputPlus in, long size, SequentialWriter write throw new FSWriteError(e, writer.getPath()); } } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/io/sstable/metadata/MetadataCollector.java b/src/java/org/apache/cassandra/io/sstable/metadata/MetadataCollector.java old mode 100755 new mode 100644 diff --git a/src/java/org/apache/cassandra/io/sstable/metadata/MetadataSerializer.java b/src/java/org/apache/cassandra/io/sstable/metadata/MetadataSerializer.java index 042103e26798..91889a70d9e9 100644 --- a/src/java/org/apache/cassandra/io/sstable/metadata/MetadataSerializer.java +++ b/src/java/org/apache/cassandra/io/sstable/metadata/MetadataSerializer.java @@ -17,7 +17,10 @@ */ package org.apache.cassandra.io.sstable.metadata; -import java.io.*; +import org.apache.cassandra.io.util.*; + +import java.io.FileNotFoundException; +import java.io.IOException; import java.util.*; import java.util.function.UnaryOperator; import java.util.zip.CRC32; @@ -25,6 +28,7 @@ import com.google.common.base.Throwables; import com.google.common.collect.Lists; +import org.apache.cassandra.io.util.File; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -33,14 +37,6 @@ import org.apache.cassandra.io.sstable.CorruptSSTableException; import org.apache.cassandra.io.sstable.Descriptor; import org.apache.cassandra.io.sstable.format.Version; -import org.apache.cassandra.io.util.DataInputBuffer; -import org.apache.cassandra.io.util.DataOutputBuffer; -import org.apache.cassandra.io.util.DataOutputPlus; -import org.apache.cassandra.io.util.DataOutputStreamPlus; -import org.apache.cassandra.io.util.FileDataInput; -import org.apache.cassandra.io.util.FileUtils; -import org.apache.cassandra.io.util.BufferedDataOutputStreamPlus; -import org.apache.cassandra.io.util.RandomAccessReader; import org.apache.cassandra.utils.FBUtilities; import static org.apache.cassandra.utils.FBUtilities.updateChecksumInt; @@ -263,7 +259,7 @@ private void mutate(Descriptor descriptor, UnaryOperator transfor public void rewriteSSTableMetadata(Descriptor descriptor, Map currentComponents) throws IOException { String filePath = descriptor.tmpFilenameFor(Component.STATS); - try (DataOutputStreamPlus out = new BufferedDataOutputStreamPlus(new FileOutputStream(filePath))) + try (DataOutputStreamPlus out = new FileOutputStreamPlus(filePath)) { serialize(currentComponents, out, descriptor.version); out.flush(); diff --git a/src/java/org/apache/cassandra/io/sstable/metadata/StatsMetadata.java b/src/java/org/apache/cassandra/io/sstable/metadata/StatsMetadata.java old mode 100755 new mode 100644 diff --git a/src/java/org/apache/cassandra/io/util/AbstractReaderFileProxy.java b/src/java/org/apache/cassandra/io/util/AbstractReaderFileProxy.java index 7962c0f26647..a8b61dd193ca 100644 --- a/src/java/org/apache/cassandra/io/util/AbstractReaderFileProxy.java +++ b/src/java/org/apache/cassandra/io/util/AbstractReaderFileProxy.java @@ -58,4 +58,4 @@ public double getCrcCheckChance() { return 0; // Only valid for compressed files. } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/io/util/BufferedDataOutputStreamPlus.java b/src/java/org/apache/cassandra/io/util/BufferedDataOutputStreamPlus.java index 516508ed5ab6..a490ff6f5269 100644 --- a/src/java/org/apache/cassandra/io/util/BufferedDataOutputStreamPlus.java +++ b/src/java/org/apache/cassandra/io/util/BufferedDataOutputStreamPlus.java @@ -17,9 +17,7 @@ */ package org.apache.cassandra.io.util; -import java.io.FileOutputStream; import java.io.IOException; -import java.io.RandomAccessFile; import java.nio.ByteBuffer; import java.nio.ByteOrder; import java.nio.channels.WritableByteChannel; @@ -42,26 +40,6 @@ public class BufferedDataOutputStreamPlus extends DataOutputStreamPlus protected ByteBuffer buffer; - public BufferedDataOutputStreamPlus(RandomAccessFile ras) - { - this(ras.getChannel()); - } - - public BufferedDataOutputStreamPlus(RandomAccessFile ras, int bufferSize) - { - this(ras.getChannel(), bufferSize); - } - - public BufferedDataOutputStreamPlus(FileOutputStream fos) - { - this(fos.getChannel()); - } - - public BufferedDataOutputStreamPlus(FileOutputStream fos, int bufferSize) - { - this(fos.getChannel(), bufferSize); - } - public BufferedDataOutputStreamPlus(WritableByteChannel wbc) { this(wbc, DEFAULT_BUFFER_SIZE); diff --git a/src/java/org/apache/cassandra/io/util/ChannelProxy.java b/src/java/org/apache/cassandra/io/util/ChannelProxy.java index 9ff46b7977f8..717def75d69d 100644 --- a/src/java/org/apache/cassandra/io/util/ChannelProxy.java +++ b/src/java/org/apache/cassandra/io/util/ChannelProxy.java @@ -17,7 +17,6 @@ */ package org.apache.cassandra.io.util; -import java.io.File; import java.io.IOException; import java.nio.ByteBuffer; import java.nio.MappedByteBuffer; @@ -63,7 +62,7 @@ public ChannelProxy(String path) public ChannelProxy(File file) { - this(file.getPath(), openChannel(file)); + this(file.path(), openChannel(file)); } public ChannelProxy(String filePath, FileChannel channel) diff --git a/src/java/org/apache/cassandra/io/util/ChecksumWriter.java b/src/java/org/apache/cassandra/io/util/ChecksumWriter.java index dc5eaea62d38..d8ba360bb3fa 100644 --- a/src/java/org/apache/cassandra/io/util/ChecksumWriter.java +++ b/src/java/org/apache/cassandra/io/util/ChecksumWriter.java @@ -29,6 +29,8 @@ import org.apache.cassandra.io.FSWriteError; +import org.apache.cassandra.io.util.File; + public class ChecksumWriter { private final CRC32 incrementalChecksum = new CRC32(); diff --git a/src/java/org/apache/cassandra/io/util/ChecksummedRandomAccessReader.java b/src/java/org/apache/cassandra/io/util/ChecksummedRandomAccessReader.java index 2e59e3bcce48..62927f5eeab3 100644 --- a/src/java/org/apache/cassandra/io/util/ChecksummedRandomAccessReader.java +++ b/src/java/org/apache/cassandra/io/util/ChecksummedRandomAccessReader.java @@ -17,7 +17,6 @@ */ package org.apache.cassandra.io.util; -import java.io.File; import java.io.IOException; import org.apache.cassandra.utils.ChecksumType; @@ -32,7 +31,7 @@ public static RandomAccessReader open(File file, File crcFile) throws IOExceptio { DataIntegrityMetadata.ChecksumValidator validator = new DataIntegrityMetadata.ChecksumValidator(ChecksumType.CRC32, RandomAccessReader.open(crcFile), - file.getPath()); + file.path()); Rebufferer rebufferer = new ChecksummedRebufferer(channel, validator); return new RandomAccessReader.RandomAccessReaderWithOwnChannel(rebufferer); } diff --git a/src/java/org/apache/cassandra/io/util/ChecksummedSequentialWriter.java b/src/java/org/apache/cassandra/io/util/ChecksummedSequentialWriter.java index f89e7cc4125b..1477c5c08d36 100644 --- a/src/java/org/apache/cassandra/io/util/ChecksummedSequentialWriter.java +++ b/src/java/org/apache/cassandra/io/util/ChecksummedSequentialWriter.java @@ -17,10 +17,11 @@ */ package org.apache.cassandra.io.util; -import java.io.File; import java.nio.ByteBuffer; import java.util.Optional; +import org.apache.cassandra.io.util.File; + public class ChecksummedSequentialWriter extends SequentialWriter { private static final SequentialWriterOption CRC_WRITER_OPTION = SequentialWriterOption.newBuilder() diff --git a/src/java/org/apache/cassandra/io/util/ChunkReader.java b/src/java/org/apache/cassandra/io/util/ChunkReader.java index 1d3439ef0d69..33bf7921edd6 100644 --- a/src/java/org/apache/cassandra/io/util/ChunkReader.java +++ b/src/java/org/apache/cassandra/io/util/ChunkReader.java @@ -48,4 +48,4 @@ public interface ChunkReader extends RebuffererFactory * This is not guaranteed to be fulfilled. */ BufferType preferredBufferType(); -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/io/util/DataInputPlus.java b/src/java/org/apache/cassandra/io/util/DataInputPlus.java index 7c29ee18a617..41b422afb39e 100644 --- a/src/java/org/apache/cassandra/io/util/DataInputPlus.java +++ b/src/java/org/apache/cassandra/io/util/DataInputPlus.java @@ -60,6 +60,8 @@ public default void skipBytesFully(int n) throws IOException /** * Wrapper around an InputStream that provides no buffering but can decode varints + * + * TODO: probably shouldn't use DataInputStream as a parent */ public class DataInputStreamPlus extends DataInputStream implements DataInputPlus { diff --git a/src/java/org/apache/cassandra/io/util/DataIntegrityMetadata.java b/src/java/org/apache/cassandra/io/util/DataIntegrityMetadata.java index 277b35979b25..f611f91bcb2c 100644 --- a/src/java/org/apache/cassandra/io/util/DataIntegrityMetadata.java +++ b/src/java/org/apache/cassandra/io/util/DataIntegrityMetadata.java @@ -18,7 +18,6 @@ package org.apache.cassandra.io.util; import java.io.Closeable; -import java.io.File; import java.io.IOException; import java.nio.ByteBuffer; import java.util.zip.CheckedInputStream; @@ -28,6 +27,7 @@ import org.apache.cassandra.io.sstable.Component; import org.apache.cassandra.io.sstable.Descriptor; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.utils.ChecksumType; import org.apache.cassandra.utils.Throwables; diff --git a/src/java/org/apache/cassandra/io/util/File.java b/src/java/org/apache/cassandra/io/util/File.java new file mode 100644 index 000000000000..2b609043354e --- /dev/null +++ b/src/java/org/apache/cassandra/io/util/File.java @@ -0,0 +1,608 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.cassandra.io.util; + +import java.io.IOException; +import java.net.URI; +import java.nio.channels.FileChannel; +import java.nio.file.*; +import java.util.Objects; +import java.util.function.BiPredicate; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.function.IntFunction; +import java.util.function.Predicate; +import java.util.stream.Stream; + +import javax.annotation.Nullable; + +import com.google.common.util.concurrent.RateLimiter; + +import org.apache.cassandra.io.FSWriteError; + +import static org.apache.cassandra.io.util.PathUtils.filename; +import static org.apache.cassandra.utils.Throwables.maybeFail; + +/** + * A thin wrapper around java.nio.file.Path to provide more ergonomic functionality. + * + * TODO codebase probably should not use tryList, as unexpected exceptions are hidden; + * probably want to introduce e.g. listIfExists + * TODO codebase probably should not use Paths.get() to ensure we can override the filesystem + */ +public class File implements Comparable +{ + private static final FileSystem filesystem = FileSystems.getDefault(); + public enum WriteMode { OVERWRITE, APPEND } + + public static String pathSeparator() + { + return filesystem.getSeparator(); + } + + @Nullable final Path path; // nullable to support concept of empty path, that resolves to the working directory if converted to an absolute path + + /** + * Construct a File representing the child {@code child} of {@code parent} + */ + public File(String parent, String child) + { + this(parent.isEmpty() ? null : filesystem.getPath(parent), child); + } + + /** + * Construct a File representing the child {@code child} of {@code parent} + */ + public File(File parent, String child) + { + this(parent.path, child); + } + + /** + * Construct a File representing the child {@code child} of {@code parent} + */ + public File(Path parent, String child) + { + // if "empty abstract path" (a la java.io.File) is provided, we should behave as though resolving relative path + if (child.startsWith(pathSeparator())) + child = child.substring(pathSeparator().length()); + this.path = parent == null ? filesystem.getPath(child) : parent.resolve(child); + } + + /** + * Construct a File representing the provided {@code path} + */ + public File(String path) + { + this(path.isEmpty() ? null : filesystem.getPath(path)); + } + + /** + * Create a File equivalent to the java.io.File provided + */ + public File(java.io.File file) + { + this(file.getPath().isEmpty() ? null : file.toPath()); + } + + /** + * Construct a File representing the child {@code child} of {@code parent} + */ + public File(java.io.File parent, String child) + { + this(new File(parent), child); + } + + /** + * Convenience constructor equivalent to {@code new File(Paths.get(path))} + */ + public File(URI path) + { + this(Paths.get(path)); + if (!path.isAbsolute() || path.isOpaque()) throw new IllegalArgumentException(); + } + + /** + * @param path the path to wrap + */ + public File(Path path) + { + if (path != null && path.getFileSystem() != filesystem) + throw new IllegalArgumentException("Incompatible file system"); + + this.path = path; + } + + /** + * Try to delete the file, returning true iff it was deleted by us. Does not ordinarily throw exceptions. + */ + public boolean tryDelete() + { + return path != null && PathUtils.tryDelete(path); + } + + /** + * This file will be deleted, and any exceptions encountered merged with {@code accumulate} to the return value + */ + public Throwable delete(Throwable accumulate) + { + return delete(accumulate, null); + } + + /** + * This file will be deleted, obeying the provided rate limiter. + * Any exceptions encountered will be merged with {@code accumulate} to the return value + */ + public Throwable delete(Throwable accumulate, RateLimiter rateLimiter) + { + return PathUtils.delete(toPathForWrite(), accumulate, rateLimiter); + } + + /** + * This file will be deleted, with any failures being reported with an FSError + * @throws FSWriteError if cannot be deleted + */ + public void delete() + { + maybeFail(delete(null, null)); + } + + /** + * This file will be deleted, obeying the provided rate limiter. + * @throws FSWriteError if cannot be deleted + */ + public void delete(RateLimiter rateLimiter) + { + maybeFail(delete(null, rateLimiter)); + } + + /** + * Deletes all files and subdirectories under "dir". + * @throws FSWriteError if any part of the tree cannot be deleted + */ + public void deleteRecursive(RateLimiter rateLimiter) + { + PathUtils.deleteRecursive(toPathForWrite(), rateLimiter); + } + + /** + * Deletes all files and subdirectories under "dir". + * @throws FSWriteError if any part of the tree cannot be deleted + */ + public void deleteRecursive() + { + PathUtils.deleteRecursive(toPathForWrite()); + } + + /** + * Try to delete the file on process exit. + */ + public void deleteOnExit() + { + if (path != null) PathUtils.deleteOnExit(path); + } + + /** + * This file will be deleted on clean shutdown; if it is a directory, its entire contents + * at the time of shutdown will be deleted + */ + public void deleteRecursiveOnExit() + { + if (path != null) + PathUtils.deleteRecursiveOnExit(path); + } + + /** + * Try to rename the file atomically, if the system supports it. + * @return true iff successful, false if it fails for any reason. + */ + public boolean tryMove(File to) + { + return path != null && PathUtils.tryRename(path, to.path); + } + + /** + * Atomically (if supported) rename/move this file to {@code to} + * @throws FSWriteError if any part of the tree cannot be deleted + */ + public void move(File to) + { + PathUtils.rename(toPathForRead(), to.toPathForWrite()); + } + + /** + * @return the length of the file if it exists and if we can read it; 0 otherwise. + */ + public long length() + { + return path == null ? 0L : PathUtils.tryGetLength(path); + } + + /** + * @return the last modified time in millis of the path if it exists and we can read it; 0 otherwise. + */ + public long lastModified() + { + return path == null ? 0L : PathUtils.tryGetLastModified(path); + } + + /** + * Try to set the last modified time in millis of the path + * @return true if it exists and we can write it; return false otherwise. + */ + public boolean trySetLastModified(long value) + { + return path != null && PathUtils.trySetLastModified(path, value); + } + + /** + * Try to set if the path is readable by its owner + * @return true if it exists and we can write it; return false otherwise. + */ + public boolean trySetReadable(boolean value) + { + return path != null && PathUtils.trySetReadable(path, value); + } + + /** + * Try to set if the path is writable by its owner + * @return true if it exists and we can write it; return false otherwise. + */ + public boolean trySetWritable(boolean value) + { + return path != null && PathUtils.trySetWritable(path, value); + } + + /** + * Try to set if the path is executable by its owner + * @return true if it exists and we can write it; return false otherwise. + */ + public boolean trySetExecutable(boolean value) + { + return path != null && PathUtils.trySetExecutable(path, value); + } + + /** + * @return true if the path exists, false if it does not, or we cannot determine due to some exception + */ + public boolean exists() + { + return path != null && PathUtils.exists(path); + } + + /** + * @return true if the path refers to a directory + */ + public boolean isDirectory() + { + return path != null && PathUtils.isDirectory(path); + } + + /** + * @return true if the path refers to a regular file + */ + public boolean isFile() + { + return path != null && PathUtils.isFile(path); + } + + /** + * @return true if the path can be read by us + */ + public boolean isReadable() + { + return path != null && Files.isReadable(path); + } + + /** + * @return true if the path can be written by us + */ + public boolean isWritable() + { + return path != null && Files.isWritable(path); + } + + /** + * @return true if the path can be executed by us + */ + public boolean isExecutable() + { + return path != null && Files.isExecutable(path); + } + + /** + * Try to create a new regular file at this path. + * @return true if successful, false if it already exists + */ + public boolean createFileIfNotExists() + { + return PathUtils.createFileIfNotExists(toPathForWrite()); + } + + /** + * Try to create a directory at this path. + * Return true if a new directory was created at this path, and false otherwise. + */ + public boolean tryCreateDirectory() + { + return path != null && PathUtils.tryCreateDirectory(path); + } + + /** + * Try to create a directory at this path, creating any parent directories as necessary. + * @return true if a new directory was created at this path, and false otherwise. + */ + public boolean tryCreateDirectories() + { + return path != null && PathUtils.tryCreateDirectories(path); + } + + /** + * @return the parent file, or null if none + */ + public File parent() + { + if (path == null) return null; + Path parent = path.getParent(); + if (parent == null) return null; + return new File(parent); + } + + /** + * @return the parent file's path, or null if none + */ + public String parentPath() + { + File parent = parent(); + return parent == null ? null : parent.toString(); + } + + /** + * @return true if the path has no relative path elements + */ + public boolean isAbsolute() + { + return path != null && path.isAbsolute(); + } + + public boolean isAncestorOf(File child) + { + return PathUtils.isContained(toPath(), child.toPath()); + } + + /** + * @return a File that represents the same path as this File with any relative path elements resolved. + * If this is the empty File, returns the working directory. + */ + public File toAbsolute() + { + return new File(toPath().toAbsolutePath()); + } + + /** {@link #toAbsolute} */ + public String absolutePath() + { + return toPath().toAbsolutePath().toString(); + } + + /** + * @return a File that represents the same path as this File with any relative path elements and links resolved. + * If this is the empty File, returns the working directory. + */ + public File toCanonical() + { + Path canonical = PathUtils.toCanonicalPath(toPath()); + return canonical == path ? this : new File(canonical); + } + + /** {@link #toCanonical} */ + public String canonicalPath() + { + return toCanonical().toString(); + } + + /** + * @return the last path element for this file + */ + public String name() + { + return path == null ? "" : filename(path); + } + + public void forEach(Consumer forEach) + { + PathUtils.forEach(path, path -> forEach.accept(new File(path))); + } + + public void forEachRecursive(Consumer forEach) + { + PathUtils.forEachRecursive(path, path -> forEach.accept(new File(path))); + } + + /** + * @return if a directory, the names of the files within; null otherwise + */ + public String[] tryListNames() + { + return tryListNames(path, Function.identity()); + } + + /** + * @return if a directory, the names of the files within, filtered by the provided predicate; null otherwise + */ + public String[] tryListNames(BiPredicate filter) + { + return tryList(path, stream -> stream.map(PathUtils::filename).filter(filename -> filter.test(this, filename)), String[]::new); + } + + /** + * @return if a directory, the files within; null otherwise + */ + public File[] tryList() + { + return tryList(path, Function.identity()); + } + + /** + * @return if a directory, the files within, filtered by the provided predicate; null otherwise + */ + public File[] tryList(Predicate filter) + { + return tryList(path, stream -> stream.filter(filter)); + } + + /** + * @return if a directory, the files within, filtered by the provided predicate; null otherwise + */ + public File[] tryList(BiPredicate filter) + { + return tryList(path, stream -> stream.filter(file -> filter.test(this, file.name()))); + } + + private static String[] tryListNames(Path path, Function, Stream> toFiles) + { + if (path == null) + return null; + return PathUtils.tryList(path, stream -> toFiles.apply(stream.map(File::new)).map(File::name), String[]::new); + } + + private static T[] tryList(Path path, Function, Stream> transformation, IntFunction constructor) + { + if (path == null) + return null; + return PathUtils.tryList(path, transformation, constructor); + } + + private static File[] tryList(Path path, Function, Stream> toFiles) + { + if (path == null) + return null; + return PathUtils.tryList(path, stream -> toFiles.apply(stream.map(File::new)), File[]::new); + } + + /** + * @return the path of this file + */ + public String path() + { + return toString(); + } + + /** + * @return the {@link Path} of this file + */ + public Path toPath() + { + return path == null ? filesystem.getPath("") : path; + } + + /** + * @return the path of this file + */ + @Override + public String toString() + { + return path == null ? "" : path.toString(); + } + + @Override + public int hashCode() + { + return path == null ? 0 : path.hashCode(); + } + + @Override + public boolean equals(Object obj) + { + return obj instanceof File && Objects.equals(path, ((File) obj).path); + } + + @Override + public int compareTo(File that) + { + if (this.path == null || that.path == null) + return this.path == null && that.path == null ? 0 : this.path == null ? -1 : 1; + return this.path.compareTo(that.path); + } + + public java.io.File toJavaIOFile() + { + return path == null ? new java.io.File("") : path.toFile(); + } + + /** + * @return a new {@link FileChannel} for reading + */ + public FileChannel newReadChannel() throws NoSuchFileException + { + return PathUtils.newReadChannel(toPathForRead()); + } + + /** + * @return a new {@link FileChannel} for reading or writing; file will be created if it doesn't exist + */ + public FileChannel newReadWriteChannel() throws NoSuchFileException + { + return PathUtils.newReadWriteChannel(toPathForRead()); + } + + /** + * @param mode whether or not the channel appends to the underlying file + * @return a new {@link FileChannel} for writing; file will be created if it doesn't exist + */ + public FileChannel newWriteChannel(WriteMode mode) throws NoSuchFileException + { + switch (mode) + { + default: throw new AssertionError(); + case APPEND: return PathUtils.newWriteAppendChannel(toPathForWrite()); + case OVERWRITE: return PathUtils.newWriteOverwriteChannel(toPathForWrite()); + } + } + + public FileWriter newWriter(WriteMode mode) throws IOException + { + return new FileWriter(this, mode); + } + + public FileOutputStreamPlus newOutputStream(WriteMode mode) throws NoSuchFileException + { + return new FileOutputStreamPlus(this, mode); + } + + public FileInputStreamPlus newInputStream() throws NoSuchFileException + { + return new FileInputStreamPlus(this); + } + + private Path toPathForWrite() + { + if (path == null) + throw new IllegalStateException("Cannot write to an empty path"); + return path; + } + + private Path toPathForRead() + { + if (path == null) + throw new IllegalStateException("Cannot read from an empty path"); + return path; + } +} + diff --git a/src/java/org/apache/cassandra/io/util/FileInputStreamPlus.java b/src/java/org/apache/cassandra/io/util/FileInputStreamPlus.java new file mode 100644 index 000000000000..79e8438108e8 --- /dev/null +++ b/src/java/org/apache/cassandra/io/util/FileInputStreamPlus.java @@ -0,0 +1,95 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.cassandra.io.util; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; +import java.nio.file.NoSuchFileException; +import java.nio.file.Path; + +public class FileInputStreamPlus extends RebufferingInputStream +{ + final FileChannel channel; + + public FileInputStreamPlus(String file) throws NoSuchFileException + { + this(new File(file)); + } + + public FileInputStreamPlus(File file) throws NoSuchFileException + { + this(file.newReadChannel()); + } + + public FileInputStreamPlus(Path path) throws NoSuchFileException + { + this(PathUtils.newReadChannel(path)); + } + + public FileInputStreamPlus(Path path, int bufferSize) throws NoSuchFileException + { + this(PathUtils.newReadChannel(path), bufferSize); + } + + private FileInputStreamPlus(FileChannel channel) + { + this(channel, 1 << 14); + } + + private FileInputStreamPlus(FileChannel channel, int bufferSize) + { + super(ByteBuffer.allocateDirect(bufferSize)); + this.channel = channel; + this.buffer.limit(0); + } + + @Override + protected void reBuffer() throws IOException + { + buffer.clear(); + channel.read(buffer); + buffer.flip(); + } + + public FileChannel getChannel() + { + return channel; + } + + @Override + public void close() throws IOException + { + try + { + super.close(); + } + finally + { + try + { + FileUtils.clean(buffer); + } + finally + { + channel.close(); + } + } + } +} diff --git a/src/java/org/apache/cassandra/io/util/FileOutputStreamPlus.java b/src/java/org/apache/cassandra/io/util/FileOutputStreamPlus.java new file mode 100644 index 000000000000..0cfd3e3828b1 --- /dev/null +++ b/src/java/org/apache/cassandra/io/util/FileOutputStreamPlus.java @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.cassandra.io.util; + +import java.io.IOException; +import java.nio.channels.FileChannel; +import java.nio.file.NoSuchFileException; +import java.nio.file.Path; + +import static org.apache.cassandra.io.util.File.WriteMode.OVERWRITE; + +public class FileOutputStreamPlus extends BufferedDataOutputStreamPlus +{ + public FileOutputStreamPlus(String path) throws NoSuchFileException + { + this(path, OVERWRITE); + } + + public FileOutputStreamPlus(String path, File.WriteMode mode) throws NoSuchFileException + { + this(new File(path), mode); + } + + public FileOutputStreamPlus(File file) throws NoSuchFileException + { + this(file, OVERWRITE); + } + + public FileOutputStreamPlus(File file, File.WriteMode mode) throws NoSuchFileException + { + super(file.newWriteChannel(mode)); + } + + public FileOutputStreamPlus(Path path) throws NoSuchFileException + { + this(path, OVERWRITE); + } + + public FileOutputStreamPlus(Path path, File.WriteMode mode) throws NoSuchFileException + { + this(new File(path), mode); + } + + public void sync() throws IOException + { + ((FileChannel)channel).force(true); + } + + public FileChannel getChannel() + { + return (FileChannel) channel; + } +} diff --git a/src/java/org/apache/cassandra/io/util/FileReader.java b/src/java/org/apache/cassandra/io/util/FileReader.java new file mode 100644 index 000000000000..55b8fbbcc8c6 --- /dev/null +++ b/src/java/org/apache/cassandra/io/util/FileReader.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.cassandra.io.util; + +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.OutputStreamWriter; + +public class FileReader extends InputStreamReader +{ + @SuppressWarnings("resource") // FISP is closed by ISR::close + public FileReader(String file) throws IOException + { + super(new FileInputStreamPlus(file)); + } + + @SuppressWarnings("resource") // FISP is closed by ISR::close + public FileReader(File file) throws IOException + { + super(new FileInputStreamPlus(file)); + } +} diff --git a/src/java/org/apache/cassandra/io/util/FileUtils.java b/src/java/org/apache/cassandra/io/util/FileUtils.java index 7798bd785d4d..45abd7b024e8 100644 --- a/src/java/org/apache/cassandra/io/util/FileUtils.java +++ b/src/java/org/apache/cassandra/io/util/FileUtils.java @@ -17,7 +17,10 @@ */ package org.apache.cassandra.io.util; -import java.io.*; +import java.io.BufferedWriter; +import java.io.Closeable; +import java.io.IOException; +import java.io.OutputStreamWriter; import java.lang.invoke.MethodHandle; import java.lang.invoke.MethodHandles; import java.lang.reflect.Method; @@ -27,18 +30,23 @@ import java.nio.channels.FileChannel; import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; -import java.nio.file.*; +import java.nio.file.DirectoryNotEmptyException; +import java.nio.file.FileVisitResult; +import java.nio.file.Files; +import java.nio.file.NoSuchFileException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.SimpleFileVisitor; +import java.nio.file.StandardCopyOption; +import java.nio.file.StandardOpenOption; import java.nio.file.attribute.BasicFileAttributes; -import java.nio.file.attribute.FileAttributeView; -import java.nio.file.attribute.FileStoreAttributeView; import java.text.DecimalFormat; import java.util.Arrays; import java.util.Collections; -import java.util.HashSet; +import java.util.EnumSet; import java.util.List; import java.util.Optional; import java.util.Set; -import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; @@ -50,28 +58,22 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.cassandra.concurrent.ScheduledExecutors; import org.apache.cassandra.io.FSError; import org.apache.cassandra.io.FSErrorHandler; -import org.apache.cassandra.io.FSReadError; import org.apache.cassandra.io.FSWriteError; import org.apache.cassandra.io.sstable.CorruptSSTableException; -import org.apache.cassandra.service.StorageService; import org.apache.cassandra.utils.JVMStabilityInspector; -import org.apache.cassandra.utils.NoSpamLogger; import org.apache.cassandra.utils.SyncUtil; import static com.google.common.base.Throwables.propagate; import static org.apache.cassandra.config.CassandraRelevantProperties.JAVA_IO_TMPDIR; import static org.apache.cassandra.utils.Throwables.maybeFail; -import static org.apache.cassandra.utils.Throwables.merge; public final class FileUtils { public static final Charset CHARSET = StandardCharsets.UTF_8; private static final Logger logger = LoggerFactory.getLogger(FileUtils.class); - private static final NoSpamLogger nospam1m = NoSpamLogger.getLogger(logger, 1, TimeUnit.MINUTES); public static final long ONE_KB = 1024; public static final long ONE_MB = 1024 * ONE_KB; @@ -81,9 +83,9 @@ public final class FileUtils private static final DecimalFormat df = new DecimalFormat("#.##"); private static final AtomicReference> fsErrorHandler = new AtomicReference<>(Optional.empty()); - private static Class clsDirectBuffer; - private static MethodHandle mhDirectBufferCleaner; - private static MethodHandle mhCleanerClean; + private static final Class clsDirectBuffer; + private static final MethodHandle mhDirectBufferCleaner; + private static final MethodHandle mhCleanerClean; static { @@ -123,39 +125,32 @@ public static File getTempDir() } /** - * Pretty much like {@link File#createTempFile(String, String, File)}, but with + * Pretty much like {@link java.io.File#createTempFile(String, String, java.io.File)}, but with * the guarantee that the "random" part of the generated file name between * {@code prefix} and {@code suffix} is a positive, increasing {@code long} value. */ public static File createTempFile(String prefix, String suffix, File directory) { - try - { - // Do not use java.io.File.createTempFile(), because some tests rely on the - // behavior that the "random" part in the temp file name is a positive 'long'. - // However, at least since Java 9 the code to generate the "random" part - // uses an _unsigned_ random long generated like this: - // Long.toUnsignedString(new java.util.Random.nextLong()) + // Do not use java.io.File.createTempFile(), because some tests rely on the + // behavior that the "random" part in the temp file name is a positive 'long'. + // However, at least since Java 9 the code to generate the "random" part + // uses an _unsigned_ random long generated like this: + // Long.toUnsignedString(new java.util.Random.nextLong()) - while (true) - { - // The contract of File.createTempFile() says, that it must not return - // the same file name again. We do that here in a very simple way, - // that probably doesn't cover all edge cases. Just rely on system - // wall clock and return strictly increasing values from that. - long num = tempFileNum.getAndIncrement(); - - // We have a positive long here, which is safe to use for example - // for CommitLogTest. - String fileName = prefix + Long.toString(num) + suffix; - File candidate = new File(directory, fileName); - if (candidate.createNewFile()) - return candidate; - } - } - catch (IOException e) + while (true) { - throw new FSWriteError(e, directory); + // The contract of File.createTempFile() says, that it must not return + // the same file name again. We do that here in a very simple way, + // that probably doesn't cover all edge cases. Just rely on system + // wall clock and return strictly increasing values from that. + long num = tempFileNum.getAndIncrement(); + + // We have a positive long here, which is safe to use for example + // for CommitLogTest. + String fileName = prefix + num + suffix; + File candidate = new File(directory, fileName); + if (candidate.createFileIfNotExists()) + return candidate; } } @@ -193,6 +188,11 @@ public static void createHardLink(File from, File to) } } + public static void createHardLinkWithConfirm(String from, String to) + { + createHardLinkWithConfirm(new File(from), new File(to)); + } + public static void createHardLinkWithConfirm(File from, File to) { try @@ -209,11 +209,6 @@ public static void createHardLinkWithConfirm(File from, File to) } } - public static void createHardLinkWithConfirm(String from, String to) - { - createHardLinkWithConfirm(new File(from), new File(to)); - } - public static void createHardLinkWithoutConfirm(String from, String to) { try @@ -227,57 +222,6 @@ public static void createHardLinkWithoutConfirm(String from, String to) } } - public static Throwable deleteWithConfirm(String filePath, Throwable accumulate) - { - return deleteWithConfirm(new File(filePath), accumulate, null); - } - - public static Throwable deleteWithConfirm(File file, Throwable accumulate) - { - return deleteWithConfirm(file, accumulate, null); - } - - public static Throwable deleteWithConfirm(File file, Throwable accumulate, RateLimiter rateLimiter) - { - try - { - if (rateLimiter != null) - { - double throttled = rateLimiter.acquire(); - if (throttled > 0.0) - nospam1m.warn("Throttling file deletion: waited {} seconds to delete {}", throttled, file); - } - Files.delete(file.toPath()); - } - catch (Throwable t) - { - try - { - throw new FSWriteError(t, file); - } - catch (Throwable t2) - { - accumulate = merge(accumulate, t2); - } - } - return accumulate; - } - - public static void deleteWithConfirm(String file) - { - deleteWithConfirm(new File(file)); - } - - public static void deleteWithConfirm(File file) - { - maybeFail(deleteWithConfirm(file, null, null)); - } - - public static void deleteWithConfirmWithThrottle(File file, RateLimiter rateLimiter) - { - maybeFail(deleteWithConfirm(file, null, rateLimiter)); - } - public static void copyWithOutConfirm(String from, String to) { try @@ -300,7 +244,7 @@ public static void copyWithConfirm(File from, File to) { assert from.exists(); if (logger.isTraceEnabled()) - logger.trace("Copying {} to {}", from.getPath(), to.getPath()); + logger.trace("Copying {} to {}", from.path(), to.path()); try { @@ -312,74 +256,16 @@ public static void copyWithConfirm(File from, File to) } } - public static void renameWithOutConfirm(String from, String to) - { - try - { - atomicMoveWithFallback(new File(from).toPath(), new File(to).toPath()); - } - catch (IOException e) - { - if (logger.isTraceEnabled()) - logger.trace("Could not move file "+from+" to "+to, e); - } - } - - public static void renameWithConfirm(String from, String to) - { - renameWithConfirm(new File(from), new File(to)); - } - - public static void renameWithConfirm(File from, File to) - { - assert from.exists(); - if (logger.isTraceEnabled()) - logger.trace("Renaming {} to {}", from.getPath(), to.getPath()); - // this is not FSWE because usually when we see it it's because we didn't close the file before renaming it, - // and Windows is picky about that. - try - { - atomicMoveWithFallback(from.toPath(), to.toPath()); - } - catch (IOException e) - { - throw new RuntimeException(String.format("Failed to rename %s to %s", from.getPath(), to.getPath()), e); - } - } - - /** - * Move a file atomically, if it fails, it falls back to a non-atomic operation - * @param from - * @param to - * @throws IOException - */ - private static void atomicMoveWithFallback(Path from, Path to) throws IOException - { - try - { - Files.move(from, to, StandardCopyOption.REPLACE_EXISTING, StandardCopyOption.ATOMIC_MOVE); - } - catch (AtomicMoveNotSupportedException e) - { - logger.trace("Could not do an atomic move", e); - Files.move(from, to, StandardCopyOption.REPLACE_EXISTING); - } - - } - public static void truncate(String path, long size) { - try(FileChannel channel = FileChannel.open(Paths.get(path), StandardOpenOption.READ, StandardOpenOption.WRITE)) + File file = new File(path); + try (FileChannel channel = file.newReadWriteChannel()) { channel.truncate(size); } - catch (NoSuchFileException | FileNotFoundException nfe) - { - throw new RuntimeException(nfe); - } catch (IOException e) { - throw new FSWriteError(e, path); + throw PathUtils.propagateUnchecked(e, file.toPath(), true); } } @@ -452,49 +338,18 @@ public static void closeQuietly(Iterable cs) public static String getCanonicalPath(String filename) { - try - { - return new File(filename).getCanonicalPath(); - } - catch (IOException e) - { - throw new FSReadError(e, filename); - } + return new File(filename).canonicalPath(); } public static String getCanonicalPath(File file) { - try - { - return file.getCanonicalPath(); - } - catch (IOException e) - { - throw new FSReadError(e, file); - } + return file.canonicalPath(); } /** Return true if file is contained in folder */ public static boolean isContained(File folder, File file) { - Path folderPath = Paths.get(getCanonicalPath(folder)); - Path filePath = Paths.get(getCanonicalPath(file)); - - return filePath.startsWith(folderPath); - } - - /** Convert absolute path into a path relative to the base path */ - public static String getRelativePath(String basePath, String path) - { - try - { - return Paths.get(basePath).relativize(Paths.get(path)).toString(); - } - catch(Exception ex) - { - String absDataPath = FileUtils.getCanonicalPath(basePath); - return Paths.get(absDataPath).relativize(Paths.get(path)).toString(); - } + return folder.isAncestorOf(file); } public static void clean(ByteBuffer buffer) @@ -525,52 +380,6 @@ public static void clean(ByteBuffer buffer) } } - public static void createDirectory(String directory) - { - createDirectory(new File(directory)); - } - - public static void createDirectory(File directory) - { - if (!directory.exists()) - { - if (!directory.mkdirs()) - throw new FSWriteError(new IOException("Failed to mkdirs " + directory), directory); - } - } - - public static boolean delete(String file) - { - if (!StorageService.instance.isDaemonSetupCompleted()) - logger.info("Deleting file during startup: {}", file); - - File f = new File(file); - return f.delete(); - } - - public static void delete(File... files) - { - for ( File file : files ) - { - if (!StorageService.instance.isDaemonSetupCompleted()) - logger.info("Deleting file during startup: {}", file); - - file.delete(); - } - } - - public static void deleteAsync(final String file) - { - Runnable runnable = new Runnable() - { - public void run() - { - deleteWithConfirm(new File(file)); - } - }; - ScheduledExecutors.nonPeriodicTasks.execute(runnable); - } - public static long parseFileSize(String value) { long result; @@ -644,72 +453,6 @@ else if ( value >= ONE_KB ) } } - /** - * Deletes all files and subdirectories under "dir". - * @param dir Directory to be deleted - * @throws FSWriteError if any part of the tree cannot be deleted - */ - public static void deleteRecursiveWithThrottle(File dir, RateLimiter rateLimiter) - { - if (dir.isDirectory()) - { - String[] children = dir.list(); - for (String child : children) - deleteRecursiveWithThrottle(new File(dir, child), rateLimiter); - } - - // The directory is now empty so now it can be smoked - deleteWithConfirmWithThrottle(dir, rateLimiter); - } - - - /** - * Deletes the specified directory after having deleted its content. - * - * @param dir Directory to be deleted - * @throws FSWriteError if any part of the tree cannot be deleted - */ - public static void deleteRecursive(File dir) - { - deleteChildrenRecursive(dir); - - // The directory is now empty so now it can be smoked - deleteWithConfirm(dir); - } - - /** - * Deletes all files and subdirectories under "dir". - * - * @param dir Directory to be deleted - * @throws FSWriteError if any part of the tree cannot be deleted - */ - public static void deleteChildrenRecursive(File dir) - { - if (dir.isDirectory()) - { - String[] children = dir.list(); - for (String child : children) - deleteRecursive(new File(dir, child)); - } - } - - /** - * Schedules deletion of all file and subdirectories under "dir" on JVM shutdown. - * @param dir Directory to be deleted - */ - public static void deleteRecursiveOnExit(File dir) - { - if (dir.isDirectory()) - { - String[] children = dir.list(); - for (String child : children) - deleteRecursiveOnExit(new File(dir, child)); - } - - logger.trace("Scheduling deferred deletion of file: {}", dir); - dir.deleteOnExit(); - } - public static void handleCorruptSSTable(CorruptSSTableException e) { fsErrorHandler.get().ifPresent(handler -> handler.handleCorruptSSTable(e)); @@ -761,41 +504,6 @@ public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) return sizeArr[0]; } - public static void copyTo(DataInput in, OutputStream out, int length) throws IOException - { - byte[] buffer = new byte[64 * 1024]; - int copiedBytes = 0; - - while (copiedBytes + buffer.length < length) - { - in.readFully(buffer); - out.write(buffer); - copiedBytes += buffer.length; - } - - if (copiedBytes < length) - { - int left = length - copiedBytes; - in.readFully(buffer, 0, left); - out.write(buffer, 0, left); - } - } - - public static boolean isSubDirectory(File parent, File child) throws IOException - { - parent = parent.getCanonicalFile(); - child = child.getCanonicalFile(); - - File toCheck = child; - while (toCheck != null) - { - if (parent.equals(toCheck)) - return true; - toCheck = toCheck.getParentFile(); - } - return false; - } - public static void append(File file, String ... lines) { if (file.exists()) @@ -829,7 +537,10 @@ public static void replace(File file, String ... lines) */ public static void write(File file, List lines, StandardOpenOption ... options) { - Set optionsSet = new HashSet<>(Arrays.asList(options)); + Set optionsSet = EnumSet.noneOf(StandardOpenOption.class); + for (StandardOpenOption option : options) + optionsSet.add(option); + //Emulate the old FileSystemProvider.newOutputStream behavior for open options. if (optionsSet.isEmpty()) { @@ -888,159 +599,116 @@ public static void setFSErrorHandler(FSErrorHandler handler) fsErrorHandler.getAndSet(Optional.ofNullable(handler)); } - /** - * Returns the size of the specified partition. - *

This method handles large file system by returning {@code Long.MAX_VALUE} if the size overflow. - * See JDK-8179320 for more information.

- * - * @param file the partition - * @return the size, in bytes, of the partition or {@code 0L} if the abstract pathname does not name a partition - */ - public static long getTotalSpace(File file) + @Deprecated + public static void createDirectory(String directory) { - return handleLargeFileSystem(file.getTotalSpace()); + createDirectory(new File(directory)); } - /** - * Returns the number of unallocated bytes on the specified partition. - *

This method handles large file system by returning {@code Long.MAX_VALUE} if the number of unallocated bytes - * overflow. See JDK-8179320 for more information

- * - * @param file the partition - * @return the number of unallocated bytes on the partition or {@code 0L} - * if the abstract pathname does not name a partition. - */ - public static long getFreeSpace(File file) + @Deprecated + public static void createDirectory(File directory) { - return handleLargeFileSystem(file.getFreeSpace()); + PathUtils.createDirectoriesIfNotExists(directory.toPath()); } - /** - * Returns the number of available bytes on the specified partition. - *

This method handles large file system by returning {@code Long.MAX_VALUE} if the number of available bytes - * overflow. See JDK-8179320 for more information

- * - * @param file the partition - * @return the number of available bytes on the partition or {@code 0L} - * if the abstract pathname does not name a partition. - */ - public static long getUsableSpace(File file) + @Deprecated + public static boolean delete(String file) { - return handleLargeFileSystem(file.getUsableSpace()); + return new File(file).tryDelete(); } - /** - * Returns the {@link FileStore} representing the file store where a file - * is located. This {@link FileStore} handles large file system by returning {@code Long.MAX_VALUE} - * from {@code FileStore#getTotalSpace()}, {@code FileStore#getUnallocatedSpace()} and {@code FileStore#getUsableSpace()} - * it the value is bigger than {@code Long.MAX_VALUE}. See JDK-8162520 - * for more information. - * - * @param path the path to the file - * @return the file store where the file is stored - */ - public static FileStore getFileStore(Path path) throws IOException + @Deprecated + public static void delete(File... files) { - return new SafeFileStore(Files.getFileStore(path)); + for (File file : files) + file.tryDelete(); } /** - * Handle large file system by returning {@code Long.MAX_VALUE} when the size overflows. - * @param size returned by the Java's FileStore methods - * @return the size or {@code Long.MAX_VALUE} if the size was bigger than {@code Long.MAX_VALUE} + * Deletes all files and subdirectories under "dir". + * @param dir Directory to be deleted + * @throws FSWriteError if any part of the tree cannot be deleted */ - private static long handleLargeFileSystem(long size) + @Deprecated + public static void deleteRecursiveWithThrottle(File dir, RateLimiter rateLimiter) { - return size < 0 ? Long.MAX_VALUE : size; + dir.deleteRecursive(rateLimiter); } /** - * Private constructor as the class contains only static methods. + * Deletes all files and subdirectories under "dir". + * @param dir Directory to be deleted + * @throws FSWriteError if any part of the tree cannot be deleted */ - private FileUtils() + @Deprecated + public static void deleteRecursive(File dir) { + dir.deleteRecursive(); } /** - * FileStore decorator used to safely handle large file system. - * - *

Java's FileStore methods (getTotalSpace/getUnallocatedSpace/getUsableSpace) are limited to reporting bytes as - * signed long (2^63-1), if the filesystem is any bigger, then the size overflows. {@code SafeFileStore} will - * return {@code Long.MAX_VALUE} if the size overflow.

- * - * @see JDK-8162520. + * Schedules deletion of all file and subdirectories under "dir" on JVM shutdown. + * @param dir Directory to be deleted */ - private static final class SafeFileStore extends FileStore + @Deprecated + public static void deleteRecursiveOnExit(File dir) { - /** - * The decorated {@code FileStore} - */ - private final FileStore fileStore; - - public SafeFileStore(FileStore fileStore) - { - this.fileStore = fileStore; - } - - @Override - public String name() - { - return fileStore.name(); - } + dir.deleteRecursiveOnExit(); + } - @Override - public String type() - { - return fileStore.type(); - } + @Deprecated + public static boolean isSubDirectory(File parent, File child) + { + return parent.isAncestorOf(child); + } - @Override - public boolean isReadOnly() - { - return fileStore.isReadOnly(); - } + @Deprecated + public static Throwable deleteWithConfirm(File file, Throwable accumulate) + { + return file.delete(accumulate, null); + } - @Override - public long getTotalSpace() throws IOException - { - return handleLargeFileSystem(fileStore.getTotalSpace()); - } + @Deprecated + public static Throwable deleteWithConfirm(File file, Throwable accumulate, RateLimiter rateLimiter) + { + return file.delete(accumulate, rateLimiter); + } - @Override - public long getUsableSpace() throws IOException - { - return handleLargeFileSystem(fileStore.getUsableSpace()); - } + @Deprecated + public static void deleteWithConfirm(String file) + { + deleteWithConfirm(new File(file)); + } - @Override - public long getUnallocatedSpace() throws IOException - { - return handleLargeFileSystem(fileStore.getUnallocatedSpace()); - } + @Deprecated + public static void deleteWithConfirm(File file) + { + file.delete(); + } - @Override - public boolean supportsFileAttributeView(Class type) - { - return fileStore.supportsFileAttributeView(type); - } + @Deprecated + public static void renameWithOutConfirm(String from, String to) + { + new File(from).tryMove(new File(to)); + } - @Override - public boolean supportsFileAttributeView(String name) - { - return fileStore.supportsFileAttributeView(name); - } + @Deprecated + public static void renameWithConfirm(String from, String to) + { + renameWithConfirm(new File(from), new File(to)); + } - @Override - public V getFileStoreAttributeView(Class type) - { - return fileStore.getFileStoreAttributeView(type); - } + @Deprecated + public static void renameWithConfirm(File from, File to) + { + from.move(to); + } - @Override - public Object getAttribute(String attribute) throws IOException - { - return fileStore.getAttribute(attribute); - } + /** + * Private constructor as the class contains only static methods. + */ + private FileUtils() + { } /** @@ -1060,9 +728,9 @@ public static void moveRecursively(Path source, Path target) throws IOException { Files.createDirectories(target); - for (File f : source.toFile().listFiles()) + for (File f : new File(source).tryList()) { - String fileName = f.getName(); + String fileName = f.name(); moveRecursively(source.resolve(fileName), target.resolve(fileName)); } diff --git a/src/java/org/apache/cassandra/io/util/FileWriter.java b/src/java/org/apache/cassandra/io/util/FileWriter.java new file mode 100644 index 000000000000..bbfb59543140 --- /dev/null +++ b/src/java/org/apache/cassandra/io/util/FileWriter.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.cassandra.io.util; + +import java.io.IOException; +import java.io.OutputStreamWriter; + +import org.apache.cassandra.io.util.File.WriteMode; + +public class FileWriter extends OutputStreamWriter +{ + @SuppressWarnings("resource") // FOSP is closed by OSW::close + public FileWriter(File file) throws IOException + { + super(new FileOutputStreamPlus(file)); + } + + @SuppressWarnings("resource") // FOSP is closed by OSW::close + public FileWriter(File file, WriteMode mode) throws IOException + { + super(new FileOutputStreamPlus(file, mode)); + } +} diff --git a/src/java/org/apache/cassandra/io/util/PathUtils.java b/src/java/org/apache/cassandra/io/util/PathUtils.java new file mode 100644 index 000000000000..9eef8a429435 --- /dev/null +++ b/src/java/org/apache/cassandra/io/util/PathUtils.java @@ -0,0 +1,727 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.cassandra.io.util; + +import java.io.*; +import java.nio.channels.FileChannel; +import java.nio.file.*; +import java.nio.file.attribute.*; +import java.util.*; +import java.util.concurrent.TimeUnit; +import java.util.function.*; +import java.util.stream.Stream; + +import javax.annotation.Nullable; + +import com.google.common.base.Preconditions; +import com.google.common.util.concurrent.RateLimiter; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.cassandra.config.CassandraRelevantProperties; +import org.apache.cassandra.io.FSError; +import org.apache.cassandra.io.FSReadError; +import org.apache.cassandra.io.FSWriteError; +import org.apache.cassandra.service.StorageService; +import org.apache.cassandra.utils.NoSpamLogger; + +import static java.nio.file.StandardOpenOption.*; +import static java.nio.file.StandardOpenOption.CREATE; +import static java.nio.file.StandardOpenOption.WRITE; +import static java.util.Collections.unmodifiableSet; +import static org.apache.cassandra.utils.Throwables.merge; + +/** + * Vernacular: tryX means return false or 0L on any failure; XIfNotY means propagate any exceptions besides those caused by Y + * + * This class tries to apply uniform IOException handling, and does not propagate IOException except for NoSuchFileException. + * Any harmless/application error exceptions are propagated as UncheckedIOException, and anything else as an FSReadError or FSWriteError. + * Semantically this is a little incoherent throughout the codebase, as we intercept IOException haphazardly and treaat + * it inconsistently - we should ideally migrate to using {@link #propagate(IOException, Path, boolean)} et al globally. + */ +public final class PathUtils +{ + private static final boolean consistentDirectoryListings = CassandraRelevantProperties.CONSISTENT_DIRECTORY_LISTINGS.getBoolean(); + + private static final Set READ_OPTIONS = unmodifiableSet(EnumSet.of(READ)); + private static final Set WRITE_OPTIONS = unmodifiableSet(EnumSet.of(WRITE, CREATE)); + private static final Set WRITE_APPEND_OPTIONS = unmodifiableSet(EnumSet.of(WRITE, CREATE, APPEND)); + private static final Set READ_WRITE_OPTIONS = unmodifiableSet(EnumSet.of(READ, WRITE, CREATE)); + private static final FileAttribute[] NO_ATTRIBUTES = new FileAttribute[0]; + + private static final Logger logger = LoggerFactory.getLogger(PathUtils.class); + private static final NoSpamLogger nospam1m = NoSpamLogger.getLogger(logger, 1, TimeUnit.MINUTES); + + private static Consumer onDeletion = path -> { + if (StorageService.instance.isDaemonSetupCompleted()) + setDeletionListener(ignore -> {}); + else + logger.info("Deleting file during startup: {}", path); + }; + + public static FileChannel newReadChannel(Path path) throws NoSuchFileException + { + return newFileChannel(path, READ_OPTIONS); + } + + public static FileChannel newReadWriteChannel(Path path) throws NoSuchFileException + { + return newFileChannel(path, READ_WRITE_OPTIONS); + } + + public static FileChannel newWriteOverwriteChannel(Path path) throws NoSuchFileException + { + return newFileChannel(path, WRITE_OPTIONS); + } + + public static FileChannel newWriteAppendChannel(Path path) throws NoSuchFileException + { + return newFileChannel(path, WRITE_APPEND_OPTIONS); + } + + private static FileChannel newFileChannel(Path path, Set options) throws NoSuchFileException + { + try + { + return FileChannel.open(path, options, PathUtils.NO_ATTRIBUTES); + } + catch (IOException e) + { + throw propagateUncheckedOrNoSuchFileException(e, path, options.contains(WRITE)); + } + } + + public static void setDeletionListener(Consumer newOnDeletion) + { + onDeletion = newOnDeletion; + } + + public static String filename(Path path) + { + return path.getFileName().toString(); + } + + public static T[] list(Path path, Function, Stream> transform, IntFunction arrayFactory) + { + try (Stream stream = Files.list(path)) + { + return transform.apply(consistentDirectoryListings ? stream.sorted() : stream) + .toArray(arrayFactory); + } + catch (NoSuchFileException e) + { + return null; + } + catch (IOException e) + { + throw propagateUnchecked(e, path, false); + } + } + + public static T[] tryList(Path path, Function, Stream> transform, IntFunction arrayFactory) + { + try (Stream stream = Files.list(path)) + { + return transform.apply(consistentDirectoryListings ? stream.sorted() : stream) + .toArray(arrayFactory); + } + catch (IOException e) + { + return null; + } + } + + public static void forEach(Path path, Consumer forEach) + { + try (Stream stream = Files.list(path)) + { + (consistentDirectoryListings ? stream.sorted() : stream).forEach(forEach); + } + catch (IOException e) + { + throw propagateUnchecked(e, path, false); + } + } + + public static void forEachRecursive(Path path, Consumer forEach) + { + Consumer forEachRecursive = new Consumer() + { + @Override + public void accept(Path child) + { + forEach.accept(child); + forEach(child, this); + } + }; + forEach(path, forEachRecursive); + } + + public static long tryGetLength(Path path) + { + return tryOnPath(path, Files::size); + } + + public static long tryGetLastModified(Path path) + { + return tryOnPath(path, p -> Files.getLastModifiedTime(p).toMillis()); + } + + public static boolean trySetLastModified(Path path, long lastModified) + { + try + { + Files.setLastModifiedTime(path, FileTime.fromMillis(lastModified)); + return true; + } + catch (IOException e) + { + return false; + } + } + + public static boolean trySetReadable(Path path, boolean readable) + { + return trySet(path, PosixFilePermission.OWNER_READ, readable); + } + + public static boolean trySetWritable(Path path, boolean writeable) + { + return trySet(path, PosixFilePermission.OWNER_WRITE, writeable); + } + + public static boolean trySetExecutable(Path path, boolean executable) + { + return trySet(path, PosixFilePermission.OWNER_EXECUTE, executable); + } + + public static boolean trySet(Path path, PosixFilePermission permission, boolean set) + { + try + { + PosixFileAttributeView view = path.getFileSystem().provider().getFileAttributeView(path, PosixFileAttributeView.class); + PosixFileAttributes attributes = view.readAttributes(); + Set permissions = attributes.permissions(); + if (set == permissions.contains(permission)) + return true; + if (set) permissions.add(permission); + else permissions.remove(permission); + view.setPermissions(permissions); + return true; + } + catch (IOException e) + { + return false; + } + } + + public static Throwable delete(Path file, Throwable accumulate) + { + try + { + delete(file); + } + catch (FSError t) + { + accumulate = merge(accumulate, t); + } + return accumulate; + } + + public static void delete(Path file) + { + try + { + Files.delete(file); + onDeletion.accept(file); + } + catch (IOException e) + { + throw propagateUnchecked(e, file, true); + } + } + + public static boolean tryDelete(Path file) + { + try + { + Files.delete(file); + onDeletion.accept(file); + return true; + } + catch (IOException e) + { + return false; + } + } + + public static void delete(Path file, @Nullable RateLimiter rateLimiter) + { + if (rateLimiter != null) + { + double throttled = rateLimiter.acquire(); + if (throttled > 0.0) + nospam1m.warn("Throttling file deletion: waited {} seconds to delete {}", throttled, file); + } + delete(file); + } + + public static Throwable delete(Path file, Throwable accumulate, @Nullable RateLimiter rateLimiter) + { + try + { + delete(file, rateLimiter); + } + catch (Throwable t) + { + accumulate = merge(accumulate, t); + } + return accumulate; + } + + /** + * Deletes all files and subdirectories under "path". + * @param path file to be deleted + * @throws FSWriteError if any part of the tree cannot be deleted + */ + public static void deleteRecursive(Path path) + { + if (isDirectory(path)) + forEach(path, PathUtils::deleteRecursive); + + // The directory is now empty so now it can be smoked + delete(path); + } + + /** + * Deletes all files and subdirectories under "path". + * @param path file to be deleted + * @throws FSWriteError if any part of the tree cannot be deleted + */ + public static void deleteRecursive(Path path, RateLimiter rateLimiter) + { + deleteRecursive(path, rateLimiter, p -> deleteRecursive(p, rateLimiter)); + } + + /** + * Deletes all files and subdirectories under "path". + * @param path file to be deleted + * @throws FSWriteError if any part of the tree cannot be deleted + */ + private static void deleteRecursive(Path path, RateLimiter rateLimiter, Consumer deleteRecursive) + { + if (isDirectory(path)) + forEach(path, deleteRecursive); + + // The directory is now empty so now it can be smoked + delete(path, rateLimiter); + } + + /** + * Schedules deletion of all file and subdirectories under "dir" on JVM shutdown. + * @param dir Directory to be deleted + */ + public synchronized static void deleteRecursiveOnExit(Path dir) + { + ON_EXIT.add(dir, true); + } + + /** + * Schedules deletion of the file only on JVM shutdown. + * @param file File to be deleted + */ + public synchronized static void deleteOnExit(Path file) + { + ON_EXIT.add(file, false); + } + + public static boolean tryRename(Path from, Path to) + { + logger.trace("Renaming {} to {}", from, to); + // this is not FSWE because usually when we see it it's because we didn't close the file before renaming it, + // and Windows is picky about that. + try + { + atomicMoveWithFallback(from, to); + return true; + } + catch (IOException e) + { + logger.trace("Could not move file {} to {}", from, to, e); + return false; + } + } + + public static void rename(Path from, Path to) + { + logger.trace("Renaming {} to {}", from, to); + // this is not FSWE because usually when we see it it's because we didn't close the file before renaming it, + // and Windows is picky about that. + try + { + atomicMoveWithFallback(from, to); + } + catch (IOException e) + { + logger.trace("Could not move file {} to {}", from, to, e); + + // TODO: this should be an FSError (either read or write)? + // (but for now this is maintaining legacy semantics) + throw new RuntimeException(String.format("Failed to rename %s to %s", from, to), e); + } + } + + /** + * Move a file atomically, if it fails, it falls back to a non-atomic operation + */ + private static void atomicMoveWithFallback(Path from, Path to) throws IOException + { + try + { + Files.move(from, to, StandardCopyOption.REPLACE_EXISTING, StandardCopyOption.ATOMIC_MOVE); + } + catch (AtomicMoveNotSupportedException e) + { + logger.trace("Could not do an atomic move", e); + Files.move(from, to, StandardCopyOption.REPLACE_EXISTING); + } + } + + // true if can determine exists, false if any exception occurs + public static boolean exists(Path path) + { + return Files.exists(path); + } + + // true if can determine is a directory, false if any exception occurs + public static boolean isDirectory(Path path) + { + return Files.isDirectory(path); + } + + // true if can determine is a regular file, false if any exception occurs + public static boolean isFile(Path path) + { + return Files.isRegularFile(path); + } + + /** + * @param path create file if not exists + * @throws IOError if cannot perform the operation + * @return true if a new file was created + */ + public static boolean createFileIfNotExists(Path path) + { + return ifNotExists(path, Files::createFile); + } + + /** + * @param path create directory if not exists + * @throws IOError if cannot perform the operation + * @return true if a new directory was created + */ + public static boolean createDirectoryIfNotExists(Path path) + { + return ifNotExists(path, Files::createDirectory); + } + + /** + * @param path create directory (and parents) if not exists + * @throws IOError if cannot perform the operation + * @return true if a new directory was created + */ + public static boolean createDirectoriesIfNotExists(Path path) + { + return ifNotExists(path, Files::createDirectories); + } + + /** + * @param path create directory if not exists and action can be performed + * @return true if a new directory was created, false otherwise (for any reason) + */ + public static boolean tryCreateDirectory(Path path) + { + return tryConsume(path, Files::createDirectory); + } + + /** + * @param path create directory (and parents) if not exists and action can be performed + * @return true if the new directory was created, false otherwise (for any reason) + */ + public static boolean tryCreateDirectories(Path path) + { + if (exists(path)) + return false; + + tryCreateDirectories(path.toAbsolutePath().getParent()); + return tryCreateDirectory(path); + } + + /** + * @return file if exists, otherwise nearest parent that exists; null if nothing in path exists + */ + public static Path findExistingAncestor(Path file) + { + if (!file.equals(file.normalize())) + throw new IllegalArgumentException("Must be invoked on a path without redundant elements"); + + Path parent = file; + while (parent != null && !Files.exists(parent)) + parent = parent.getParent(); + return parent; + } + + /** + * 1) Convert to an absolute path without redundant path elements; + * 2) If the file exists, resolve any links to the underlying fille; + * 3) If the file does not exist, find the first ancestor that does and resolve the path from there + */ + public static Path toCanonicalPath(Path file) + { + Preconditions.checkNotNull(file); + + file = file.toAbsolutePath().normalize(); + Path parent = findExistingAncestor(file); + + if (parent == null) + return file; + if (parent == file) + return toRealPath(file); + return toRealPath(parent).resolve(parent.relativize(file)); + } + + private static Path toRealPath(Path path) + { + try + { + return path.toRealPath(); + } + catch (IOException e) + { + throw propagateUnchecked(e, path, false); + } + } + + /** + * Return true if file's canonical path is contained in folder's canonical path. + * + * Propagates any exceptions encountered finding canonical paths. + */ + public static boolean isContained(Path folder, Path file) + { + Path realFolder = toCanonicalPath(folder), realFile = toCanonicalPath(file); + return realFile.startsWith(realFolder); + } + + private static final class DeleteOnExit implements Runnable + { + private boolean isRegistered; + private final Set deleteRecursivelyOnExit = new HashSet<>(); + private final Set deleteOnExit = new HashSet<>(); + + synchronized void add(Path path, boolean recursive) + { + if (!isRegistered) + { + Runtime.getRuntime().addShutdownHook(new Thread(this)); + isRegistered = true; + } + logger.trace("Scheduling deferred {}deletion of file: {}", recursive ? "recursive " : "", path); + (recursive ? deleteRecursivelyOnExit : deleteOnExit).add(path); + } + + public void run() + { + for (Path path : deleteOnExit) + { + try + { + if (exists(path)) + delete(path); + } + catch (Throwable t) + { + logger.warn("Failed to delete {} on exit", path, t); + } + } + for (Path path : deleteRecursivelyOnExit) + { + try + { + if (exists(path)) + deleteRecursive(path); + } + catch (Throwable t) + { + logger.warn("Failed to delete {} on exit", path, t); + } + } + } + } + private static final DeleteOnExit ON_EXIT = new DeleteOnExit(); + + public interface IOConsumer { void accept(Path path) throws IOException; } + public interface IOToLongFunction { long apply(V path) throws IOException; } + + private static boolean ifNotExists(Path path, IOConsumer consumer) + { + try + { + consumer.accept(path); + return true; + } + catch (FileAlreadyExistsException fae) + { + return false; + } + catch (IOException e) + { + throw propagateUnchecked(e, path, true); + } + } + + private static boolean tryConsume(Path path, IOConsumer function) + { + try + { + function.accept(path); + return true; + } + catch (IOException e) + { + return false; + } + } + + private static long tryOnPath(Path path, IOToLongFunction function) + { + try + { + return function.apply(path); + } + catch (IOException e) + { + return 0L; + } + } + + private static long tryOnFileStore(Path path, IOToLongFunction function) + { + return tryOnFileStore(path, function, ignore -> {}); + } + + private static long tryOnFileStore(Path path, IOToLongFunction function, Consumer orElse) + { + try + { + Path ancestor = findExistingAncestor(path); + if (ancestor == null) + { + orElse.accept(new NoSuchFileException(path.toString())); + return 0L; + } + return function.apply(Files.getFileStore(ancestor)); + } + catch (IOException e) + { + orElse.accept(e); + return 0L; + } + } + + /** + * Returns the number of bytes (determined by the provided MethodHandle) on the specified partition. + *

This method handles large file system by returning {@code Long.MAX_VALUE} if the number of available bytes + * overflow. See JDK-8179320 for more information

+ * + * @param path the partition (or a file within it) + */ + public static long tryGetSpace(Path path, IOToLongFunction getSpace) + { + return handleLargeFileSystem(tryOnFileStore(path, getSpace)); + } + + public static long tryGetSpace(Path path, IOToLongFunction getSpace, Consumer orElse) + { + return handleLargeFileSystem(tryOnFileStore(path, getSpace, orElse)); + } + + /** + * Handle large file system by returning {@code Long.MAX_VALUE} when the size overflows. + * @param size returned by the Java's FileStore methods + * @return the size or {@code Long.MAX_VALUE} if the size was bigger than {@code Long.MAX_VALUE} + */ + private static long handleLargeFileSystem(long size) + { + return size < 0 ? Long.MAX_VALUE : size; + } + + /** + * Private constructor as the class contains only static methods. + */ + private PathUtils() + { + } + + /** + * propagate an IOException as an FSWriteError, FSReadError or UncheckedIOException + */ + public static RuntimeException propagateUnchecked(IOException ioe, Path path, boolean write) + { + if (ioe instanceof FileAlreadyExistsException + || ioe instanceof NoSuchFileException + || ioe instanceof AtomicMoveNotSupportedException + || ioe instanceof java.nio.file.DirectoryNotEmptyException + || ioe instanceof java.nio.file.FileSystemLoopException + || ioe instanceof java.nio.file.NotDirectoryException + || ioe instanceof java.nio.file.NotLinkException) + throw new UncheckedIOException(ioe); + + if (write) throw new FSWriteError(ioe, path); + else throw new FSReadError(ioe, path); + } + + /** + * propagate an IOException as an FSWriteError, FSReadError or UncheckedIOException - except for NoSuchFileException + */ + public static NoSuchFileException propagateUncheckedOrNoSuchFileException(IOException ioe, Path path, boolean write) throws NoSuchFileException + { + if (ioe instanceof NoSuchFileException) + throw (NoSuchFileException) ioe; + + throw propagateUnchecked(ioe, path, write); + } + + /** + * propagate an IOException either as itself or an FSWriteError or FSReadError + */ + public static E propagate(E ioe, Path path, boolean write) throws E + { + if (ioe instanceof FileAlreadyExistsException + || ioe instanceof NoSuchFileException + || ioe instanceof AtomicMoveNotSupportedException + || ioe instanceof java.nio.file.DirectoryNotEmptyException + || ioe instanceof java.nio.file.FileSystemLoopException + || ioe instanceof java.nio.file.NotDirectoryException + || ioe instanceof java.nio.file.NotLinkException) + throw ioe; + + if (write) throw new FSWriteError(ioe, path); + else throw new FSReadError(ioe, path); + } +} diff --git a/src/java/org/apache/cassandra/io/util/RandomAccessReader.java b/src/java/org/apache/cassandra/io/util/RandomAccessReader.java index 33d01276ce98..4118bb3afd82 100644 --- a/src/java/org/apache/cassandra/io/util/RandomAccessReader.java +++ b/src/java/org/apache/cassandra/io/util/RandomAccessReader.java @@ -17,7 +17,6 @@ */ package org.apache.cassandra.io.util; -import java.io.File; import java.io.IOException; import java.nio.ByteOrder; @@ -26,6 +25,7 @@ import com.google.common.primitives.Ints; import org.apache.cassandra.io.compress.BufferType; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.io.util.Rebufferer.BufferHolder; @NotThreadSafe diff --git a/src/java/org/apache/cassandra/io/util/ReaderFileProxy.java b/src/java/org/apache/cassandra/io/util/ReaderFileProxy.java index 3ddb1437c786..5d38e80e3fb0 100644 --- a/src/java/org/apache/cassandra/io/util/ReaderFileProxy.java +++ b/src/java/org/apache/cassandra/io/util/ReaderFileProxy.java @@ -33,4 +33,4 @@ public interface ReaderFileProxy extends AutoCloseable * Needed for tests. Returns the table's CRC check chance, which is only set for compressed tables. */ double getCrcCheckChance(); -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/io/util/Rebufferer.java b/src/java/org/apache/cassandra/io/util/Rebufferer.java index 2fc7ffa8aec3..9920de90d93b 100644 --- a/src/java/org/apache/cassandra/io/util/Rebufferer.java +++ b/src/java/org/apache/cassandra/io/util/Rebufferer.java @@ -81,4 +81,4 @@ public void release() // nothing to do } }; -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/io/util/RewindableDataInput.java b/src/java/org/apache/cassandra/io/util/RewindableDataInput.java index c202f601ac48..0a0eee4caf22 100644 --- a/src/java/org/apache/cassandra/io/util/RewindableDataInput.java +++ b/src/java/org/apache/cassandra/io/util/RewindableDataInput.java @@ -27,4 +27,4 @@ public interface RewindableDataInput extends DataInputPlus void reset(DataPosition mark) throws IOException; long bytesPastMark(DataPosition mark); -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/io/util/RewindableDataInputStreamPlus.java b/src/java/org/apache/cassandra/io/util/RewindableDataInputStreamPlus.java deleted file mode 100644 index a1842bcfc9a6..000000000000 --- a/src/java/org/apache/cassandra/io/util/RewindableDataInputStreamPlus.java +++ /dev/null @@ -1,571 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.cassandra.io.util; - -import java.io.Closeable; -import java.io.File; -import java.io.FilterInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.RandomAccessFile; -import java.util.concurrent.atomic.AtomicBoolean; - -import static org.apache.cassandra.utils.Throwables.maybeFail; -import static org.apache.cassandra.utils.Throwables.merge; - -/** - * Adds mark/reset functionality to another input stream by caching read bytes to a memory buffer and - * spilling to disk if necessary. - * - * When the stream is marked via {@link #mark()} or {@link #mark(int)}, up to - * maxMemBufferSize will be cached in memory (heap). If more than - * maxMemBufferSize bytes are read while the stream is marked, the - * following bytes are cached on the spillFile for up to maxDiskBufferSize. - * - * Please note that successive calls to {@link #mark()} and {@link #reset()} will write - * sequentially to the same spillFile until maxDiskBufferSize is reached. - * At this point, if less than maxDiskBufferSize bytes are currently cached on the - * spillFile, the remaining bytes are written to the beginning of the file, - * treating the spillFile as a circular buffer. - * - * If more than maxMemBufferSize + maxDiskBufferSize are cached while the stream is marked, - * the following {@link #reset()} invocation will throw a {@link IllegalStateException}. - * - */ -public class RewindableDataInputStreamPlus extends FilterInputStream implements RewindableDataInput, Closeable -{ - private boolean marked = false; - private boolean exhausted = false; - private AtomicBoolean closed = new AtomicBoolean(false); - - protected int memAvailable = 0; - protected int diskTailAvailable = 0; - protected int diskHeadAvailable = 0; - - private final File spillFile; - private final int initialMemBufferSize; - private final int maxMemBufferSize; - private final int maxDiskBufferSize; - - private volatile byte memBuffer[]; - private int memBufferSize; - private RandomAccessFile spillBuffer; - - private final DataInputPlus dataReader; - - public RewindableDataInputStreamPlus(InputStream in, int initialMemBufferSize, int maxMemBufferSize, - File spillFile, int maxDiskBufferSize) - { - super(in); - dataReader = new DataInputStreamPlus(this); - this.initialMemBufferSize = initialMemBufferSize; - this.maxMemBufferSize = maxMemBufferSize; - this.spillFile = spillFile; - this.maxDiskBufferSize = maxDiskBufferSize; - } - - /* RewindableDataInput methods */ - - /** - * Marks the current position of a stream to return to this position later via the {@link #reset(DataPosition)} method. - * @return An empty @link{DataPosition} object - */ - public DataPosition mark() - { - mark(0); - return new RewindableDataInputPlusMark(); - } - - /** - * Rewinds to the previously marked position via the {@link #mark()} method. - * @param mark it's not possible to return to a custom position, so this parameter is ignored. - * @throws IOException if an error ocurs while resetting - */ - public void reset(DataPosition mark) throws IOException - { - reset(); - } - - public long bytesPastMark(DataPosition mark) - { - return maxMemBufferSize - memAvailable + (diskTailAvailable == -1? 0 : maxDiskBufferSize - diskHeadAvailable - diskTailAvailable); - } - - - protected static class RewindableDataInputPlusMark implements DataPosition - { - } - - /* InputStream methods */ - - public boolean markSupported() - { - return true; - } - - /** - * Marks the current position of a stream to return to this position - * later via the {@link #reset()} method. - * @param readlimit the maximum amount of bytes to cache - */ - public synchronized void mark(int readlimit) - { - if (marked) - throw new IllegalStateException("Cannot mark already marked stream."); - - if (memAvailable > 0 || diskHeadAvailable > 0 || diskTailAvailable > 0) - throw new IllegalStateException("Can only mark stream after reading previously marked data."); - - marked = true; - memAvailable = maxMemBufferSize; - diskHeadAvailable = -1; - diskTailAvailable = -1; - } - - public synchronized void reset() throws IOException - { - if (!marked) - throw new IOException("Must call mark() before calling reset()."); - - if (exhausted) - throw new IOException(String.format("Read more than capacity: %d bytes.", maxMemBufferSize + maxDiskBufferSize)); - - memAvailable = maxMemBufferSize - memAvailable; - memBufferSize = memAvailable; - - if (diskTailAvailable == -1) - { - diskHeadAvailable = 0; - diskTailAvailable = 0; - } - else - { - int initialPos = diskTailAvailable > 0 ? 0 : (int)getIfNotClosed(spillBuffer).getFilePointer(); - int diskMarkpos = initialPos + diskHeadAvailable; - getIfNotClosed(spillBuffer).seek(diskMarkpos); - - diskHeadAvailable = diskMarkpos - diskHeadAvailable; - diskTailAvailable = (maxDiskBufferSize - diskTailAvailable) - diskMarkpos; - } - - marked = false; - } - - public int available() throws IOException - { - - return super.available() + (marked? 0 : memAvailable + diskHeadAvailable + diskTailAvailable); - } - - public int read() throws IOException - { - int read = readOne(); - if (read == -1) - return read; - - if (marked) - { - //mark exhausted - if (isExhausted(1)) - { - exhausted = true; - return read; - } - - writeOne(read); - } - - return read; - } - - public int read(byte[] b, int off, int len) throws IOException - { - int readBytes = readMulti(b, off, len); - if (readBytes == -1) - return readBytes; - - if (marked) - { - //check we have space on buffer - if (isExhausted(readBytes)) - { - exhausted = true; - return readBytes; - } - - writeMulti(b, off, readBytes); - } - - return readBytes; - } - - private void maybeCreateDiskBuffer() throws IOException - { - if (spillBuffer == null) - { - if (!spillFile.getParentFile().exists()) - spillFile.getParentFile().mkdirs(); - spillFile.createNewFile(); - - this.spillBuffer = new RandomAccessFile(spillFile, "rw"); - } - } - - - private int readOne() throws IOException - { - if (!marked) - { - if (memAvailable > 0) - { - int pos = memBufferSize - memAvailable; - memAvailable--; - return getIfNotClosed(memBuffer)[pos] & 0xff; - } - - if (diskTailAvailable > 0 || diskHeadAvailable > 0) - { - int read = getIfNotClosed(spillBuffer).read(); - if (diskTailAvailable > 0) - diskTailAvailable--; - else if (diskHeadAvailable > 0) - diskHeadAvailable++; - if (diskTailAvailable == 0) - spillBuffer.seek(0); - return read; - } - } - - return getIfNotClosed(in).read(); - } - - private boolean isExhausted(int readBytes) - { - return exhausted || readBytes > memAvailable + (long)(diskTailAvailable == -1? maxDiskBufferSize : diskTailAvailable + diskHeadAvailable); - } - - private int readMulti(byte[] b, int off, int len) throws IOException - { - int readBytes = 0; - if (!marked) - { - if (memAvailable > 0) - { - readBytes += memAvailable < len ? memAvailable : len; - int pos = memBufferSize - memAvailable; - System.arraycopy(memBuffer, pos, b, off, readBytes); - memAvailable -= readBytes; - off += readBytes; - len -= readBytes; - } - if (len > 0 && diskTailAvailable > 0) - { - int readFromTail = diskTailAvailable < len? diskTailAvailable : len; - readFromTail = getIfNotClosed(spillBuffer).read(b, off, readFromTail); - readBytes += readFromTail; - diskTailAvailable -= readFromTail; - off += readFromTail; - len -= readFromTail; - if (diskTailAvailable == 0) - spillBuffer.seek(0); - } - if (len > 0 && diskHeadAvailable > 0) - { - int readFromHead = diskHeadAvailable < len? diskHeadAvailable : len; - readFromHead = getIfNotClosed(spillBuffer).read(b, off, readFromHead); - readBytes += readFromHead; - diskHeadAvailable -= readFromHead; - off += readFromHead; - len -= readFromHead; - } - } - - if (len > 0) - readBytes += getIfNotClosed(in).read(b, off, len); - - return readBytes; - } - - private void writeMulti(byte[] b, int off, int len) throws IOException - { - if (memAvailable > 0) - { - if (memBuffer == null) - memBuffer = new byte[initialMemBufferSize]; - int pos = maxMemBufferSize - memAvailable; - int memWritten = memAvailable < len? memAvailable : len; - if (pos + memWritten >= getIfNotClosed(memBuffer).length) - growMemBuffer(pos, memWritten); - System.arraycopy(b, off, memBuffer, pos, memWritten); - off += memWritten; - len -= memWritten; - memAvailable -= memWritten; - } - - if (len > 0) - { - if (diskTailAvailable == -1) - { - maybeCreateDiskBuffer(); - diskHeadAvailable = (int)spillBuffer.getFilePointer(); - diskTailAvailable = maxDiskBufferSize - diskHeadAvailable; - } - - if (len > 0 && diskTailAvailable > 0) - { - int diskTailWritten = diskTailAvailable < len? diskTailAvailable : len; - getIfNotClosed(spillBuffer).write(b, off, diskTailWritten); - off += diskTailWritten; - len -= diskTailWritten; - diskTailAvailable -= diskTailWritten; - if (diskTailAvailable == 0) - spillBuffer.seek(0); - } - - if (len > 0 && diskTailAvailable > 0) - { - int diskHeadWritten = diskHeadAvailable < len? diskHeadAvailable : len; - getIfNotClosed(spillBuffer).write(b, off, diskHeadWritten); - } - } - } - - private void writeOne(int value) throws IOException - { - if (memAvailable > 0) - { - if (memBuffer == null) - memBuffer = new byte[initialMemBufferSize]; - int pos = maxMemBufferSize - memAvailable; - if (pos == getIfNotClosed(memBuffer).length) - growMemBuffer(pos, 1); - getIfNotClosed(memBuffer)[pos] = (byte)value; - memAvailable--; - return; - } - - if (diskTailAvailable == -1) - { - maybeCreateDiskBuffer(); - diskHeadAvailable = (int)spillBuffer.getFilePointer(); - diskTailAvailable = maxDiskBufferSize - diskHeadAvailable; - } - - if (diskTailAvailable > 0 || diskHeadAvailable > 0) - { - getIfNotClosed(spillBuffer).write(value); - if (diskTailAvailable > 0) - diskTailAvailable--; - else if (diskHeadAvailable > 0) - diskHeadAvailable--; - if (diskTailAvailable == 0) - spillBuffer.seek(0); - return; - } - } - - public int read(byte[] b) throws IOException - { - return read(b, 0, b.length); - } - - private void growMemBuffer(int pos, int writeSize) - { - int newSize = Math.min(2 * (pos + writeSize), maxMemBufferSize); - byte newBuffer[] = new byte[newSize]; - System.arraycopy(memBuffer, 0, newBuffer, 0, pos); - memBuffer = newBuffer; - } - - public long skip(long n) throws IOException - { - long skipped = 0; - - if (marked) - { - //if marked, we need to cache skipped bytes - while (n-- > 0 && read() != -1) - { - skipped++; - } - return skipped; - } - - if (memAvailable > 0) - { - skipped += memAvailable < n ? memAvailable : n; - memAvailable -= skipped; - n -= skipped; - } - if (n > 0 && diskTailAvailable > 0) - { - int skipFromTail = diskTailAvailable < n? diskTailAvailable : (int)n; - getIfNotClosed(spillBuffer).skipBytes(skipFromTail); - diskTailAvailable -= skipFromTail; - skipped += skipFromTail; - n -= skipFromTail; - if (diskTailAvailable == 0) - spillBuffer.seek(0); - } - if (n > 0 && diskHeadAvailable > 0) - { - int skipFromHead = diskHeadAvailable < n? diskHeadAvailable : (int)n; - getIfNotClosed(spillBuffer).skipBytes(skipFromHead); - diskHeadAvailable -= skipFromHead; - skipped += skipFromHead; - n -= skipFromHead; - } - - if (n > 0) - skipped += getIfNotClosed(in).skip(n); - - return skipped; - } - - private T getIfNotClosed(T in) throws IOException - { - if (closed.get()) - throw new IOException("Stream closed"); - return in; - } - - public void close() throws IOException - { - close(true); - } - - public void close(boolean closeUnderlying) throws IOException - { - if (closed.compareAndSet(false, true)) - { - Throwable fail = null; - if (closeUnderlying) - { - try - { - super.close(); - } - catch (IOException e) - { - fail = merge(fail, e); - } - } - try - { - if (spillBuffer != null) - { - this.spillBuffer.close(); - this.spillBuffer = null; - } - } catch (IOException e) - { - fail = merge(fail, e); - } - try - { - if (spillFile.exists()) - { - spillFile.delete(); - } - } - catch (Throwable e) - { - fail = merge(fail, e); - } - maybeFail(fail, IOException.class); - } - } - - /* DataInputPlus methods */ - - public void readFully(byte[] b) throws IOException - { - dataReader.readFully(b); - } - - public void readFully(byte[] b, int off, int len) throws IOException - { - dataReader.readFully(b, off, len); - } - - public int skipBytes(int n) throws IOException - { - return dataReader.skipBytes(n); - } - - public boolean readBoolean() throws IOException - { - return dataReader.readBoolean(); - } - - public byte readByte() throws IOException - { - return dataReader.readByte(); - } - - public int readUnsignedByte() throws IOException - { - return dataReader.readUnsignedByte(); - } - - public short readShort() throws IOException - { - return dataReader.readShort(); - } - - public int readUnsignedShort() throws IOException - { - return dataReader.readUnsignedShort(); - } - - public char readChar() throws IOException - { - return dataReader.readChar(); - } - - public int readInt() throws IOException - { - return dataReader.readInt(); - } - - public long readLong() throws IOException - { - return dataReader.readLong(); - } - - public float readFloat() throws IOException - { - return dataReader.readFloat(); - } - - public double readDouble() throws IOException - { - return dataReader.readDouble(); - } - - public String readLine() throws IOException - { - return dataReader.readLine(); - } - - public String readUTF() throws IOException - { - return dataReader.readUTF(); - } -} diff --git a/src/java/org/apache/cassandra/io/util/SequentialWriter.java b/src/java/org/apache/cassandra/io/util/SequentialWriter.java index 9ad944be3bc0..431ece397614 100644 --- a/src/java/org/apache/cassandra/io/util/SequentialWriter.java +++ b/src/java/org/apache/cassandra/io/util/SequentialWriter.java @@ -17,7 +17,6 @@ */ package org.apache.cassandra.io.util; -import java.io.File; import java.io.IOException; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; @@ -114,7 +113,7 @@ private static FileChannel openChannel(File file) FileChannel channel = FileChannel.open(file.toPath(), StandardOpenOption.READ, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW); try { - SyncUtil.trySyncDir(file.getParentFile()); + SyncUtil.trySyncDir(file.parent()); } catch (Throwable t) { @@ -163,7 +162,7 @@ public SequentialWriter(File file, SequentialWriterOption option, boolean strict this.strictFlushing = strictFlushing; this.fchannel = (FileChannel)channel; - this.filePath = file.getAbsolutePath(); + this.filePath = file.absolutePath(); this.option = option; } diff --git a/src/java/org/apache/cassandra/io/util/SimpleChunkReader.java b/src/java/org/apache/cassandra/io/util/SimpleChunkReader.java index bc1a5297a9bc..05fdb6b0aff7 100644 --- a/src/java/org/apache/cassandra/io/util/SimpleChunkReader.java +++ b/src/java/org/apache/cassandra/io/util/SimpleChunkReader.java @@ -69,4 +69,4 @@ public String toString() bufferSize, fileLength()); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/locator/CloudstackSnitch.java b/src/java/org/apache/cassandra/locator/CloudstackSnitch.java index be6d3c432e14..d8579534fb58 100644 --- a/src/java/org/apache/cassandra/locator/CloudstackSnitch.java +++ b/src/java/org/apache/cassandra/locator/CloudstackSnitch.java @@ -20,9 +20,7 @@ import java.io.DataInputStream; import java.io.BufferedInputStream; import java.io.BufferedReader; -import java.io.FileReader; import java.io.IOException; -import java.io.File; import java.net.HttpURLConnection; import java.net.URL; import java.net.URI; @@ -31,6 +29,8 @@ import java.util.regex.Pattern; import java.util.regex.Matcher; +import org.apache.cassandra.io.util.File; +import org.apache.cassandra.io.util.FileReader; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.cassandra.db.SystemKeyspace; diff --git a/src/java/org/apache/cassandra/locator/RangesByEndpoint.java b/src/java/org/apache/cassandra/locator/RangesByEndpoint.java index cbf5a637cd57..023d7ee2b437 100644 --- a/src/java/org/apache/cassandra/locator/RangesByEndpoint.java +++ b/src/java/org/apache/cassandra/locator/RangesByEndpoint.java @@ -53,4 +53,4 @@ public RangesByEndpoint build() } } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/net/AsyncChannelOutputPlus.java b/src/java/org/apache/cassandra/net/AsyncChannelOutputPlus.java index 163981c901d3..7fcdc051e31a 100644 --- a/src/java/org/apache/cassandra/net/AsyncChannelOutputPlus.java +++ b/src/java/org/apache/cassandra/net/AsyncChannelOutputPlus.java @@ -265,4 +265,4 @@ protected WritableByteChannel newDefaultChannel() throw new UnsupportedOperationException(); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/net/AsyncMessageOutputPlus.java b/src/java/org/apache/cassandra/net/AsyncMessageOutputPlus.java index 8ef0a8f0e631..e1bcfed19394 100644 --- a/src/java/org/apache/cassandra/net/AsyncMessageOutputPlus.java +++ b/src/java/org/apache/cassandra/net/AsyncMessageOutputPlus.java @@ -128,4 +128,4 @@ public void discard() buffer = null; } } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/net/AsyncStreamingOutputPlus.java b/src/java/org/apache/cassandra/net/AsyncStreamingOutputPlus.java index 3a9c07500cc7..9c0f265cdf46 100644 --- a/src/java/org/apache/cassandra/net/AsyncStreamingOutputPlus.java +++ b/src/java/org/apache/cassandra/net/AsyncStreamingOutputPlus.java @@ -267,4 +267,4 @@ public void discard() buffer = null; } } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/net/Verb.java b/src/java/org/apache/cassandra/net/Verb.java index dd7fab39dc0a..9e3f5f60d1cc 100644 --- a/src/java/org/apache/cassandra/net/Verb.java +++ b/src/java/org/apache/cassandra/net/Verb.java @@ -448,4 +448,4 @@ class VerbTimeouts static final ToLongFunction pingTimeout = DatabaseDescriptor::getPingTimeout; static final ToLongFunction longTimeout = units -> Math.max(DatabaseDescriptor.getRpcTimeout(units), units.convert(5L, TimeUnit.MINUTES)); static final ToLongFunction noTimeout = units -> { throw new IllegalStateException(); }; -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/repair/asymmetric/HostDifferences.java b/src/java/org/apache/cassandra/repair/asymmetric/HostDifferences.java index 8b123a7bbc23..76ff5aac896b 100644 --- a/src/java/org/apache/cassandra/repair/asymmetric/HostDifferences.java +++ b/src/java/org/apache/cassandra/repair/asymmetric/HostDifferences.java @@ -99,4 +99,4 @@ public String toString() "perHostDifferences=" + perHostDifferences + '}'; } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/repair/asymmetric/PreferedNodeFilter.java b/src/java/org/apache/cassandra/repair/asymmetric/PreferedNodeFilter.java index e8ca85d02e14..b2622ef365ac 100644 --- a/src/java/org/apache/cassandra/repair/asymmetric/PreferedNodeFilter.java +++ b/src/java/org/apache/cassandra/repair/asymmetric/PreferedNodeFilter.java @@ -25,4 +25,4 @@ public interface PreferedNodeFilter { public Set apply(InetAddressAndPort streamingNode, Set toStream); -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/schema/SchemaPullVerbHandler.java b/src/java/org/apache/cassandra/schema/SchemaPullVerbHandler.java index ed30792fd490..863c68c4bc92 100644 --- a/src/java/org/apache/cassandra/schema/SchemaPullVerbHandler.java +++ b/src/java/org/apache/cassandra/schema/SchemaPullVerbHandler.java @@ -44,4 +44,4 @@ public void doVerb(Message message) Message> response = message.responseWith(SchemaKeyspace.convertSchemaToMutations()); MessagingService.instance().send(response, message.from()); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/schema/TableMetadataRef.java b/src/java/org/apache/cassandra/schema/TableMetadataRef.java index 3325510b8d6e..dc4ff1da8ae5 100644 --- a/src/java/org/apache/cassandra/schema/TableMetadataRef.java +++ b/src/java/org/apache/cassandra/schema/TableMetadataRef.java @@ -96,4 +96,4 @@ public String toString() { return get().toString(); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/schema/Views.java b/src/java/org/apache/cassandra/schema/Views.java index f926c07f0497..15d13f35afe8 100644 --- a/src/java/org/apache/cassandra/schema/Views.java +++ b/src/java/org/apache/cassandra/schema/Views.java @@ -252,4 +252,4 @@ private static ViewsDiff diff(Views before, Views after) return new ViewsDiff(created, dropped, altered.build()); } } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/security/SSLFactory.java b/src/java/org/apache/cassandra/security/SSLFactory.java index 22f0a9da7239..215a90641489 100644 --- a/src/java/org/apache/cassandra/security/SSLFactory.java +++ b/src/java/org/apache/cassandra/security/SSLFactory.java @@ -18,7 +18,6 @@ package org.apache.cassandra.security; -import java.io.File; import java.io.IOException; import java.io.InputStream; import java.nio.file.Files; @@ -45,6 +44,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableList; +import org.apache.cassandra.io.util.File; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/src/java/org/apache/cassandra/service/CassandraDaemon.java b/src/java/org/apache/cassandra/service/CassandraDaemon.java index 2bf1646d41cb..bce1f3a13cfa 100644 --- a/src/java/org/apache/cassandra/service/CassandraDaemon.java +++ b/src/java/org/apache/cassandra/service/CassandraDaemon.java @@ -17,7 +17,6 @@ */ package org.apache.cassandra.service; -import java.io.File; import java.io.IOException; import java.lang.management.ManagementFactory; import java.lang.management.MemoryPoolMXBean; @@ -37,6 +36,7 @@ import javax.management.remote.JMXConnectorServer; import com.google.common.annotations.VisibleForTesting; +import org.apache.cassandra.io.util.File; import com.google.common.collect.ImmutableList; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/src/java/org/apache/cassandra/service/DefaultFSErrorHandler.java b/src/java/org/apache/cassandra/service/DefaultFSErrorHandler.java index d5e3e531c1c5..04cb11c6ee83 100644 --- a/src/java/org/apache/cassandra/service/DefaultFSErrorHandler.java +++ b/src/java/org/apache/cassandra/service/DefaultFSErrorHandler.java @@ -18,8 +18,8 @@ package org.apache.cassandra.service; -import java.io.File; +import org.apache.cassandra.io.util.File; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -78,10 +78,10 @@ public void handleFSError(FSError e) } // for both read and write errors mark the path as unwritable. - DisallowedDirectories.maybeMarkUnwritable(e.path); + DisallowedDirectories.maybeMarkUnwritable(new File(e.path)); if (e instanceof FSReadError) { - File directory = DisallowedDirectories.maybeMarkUnreadable(e.path); + File directory = DisallowedDirectories.maybeMarkUnreadable(new File(e.path)); if (directory != null) Keyspace.removeUnreadableSSTables(directory); } diff --git a/src/java/org/apache/cassandra/service/StartupChecks.java b/src/java/org/apache/cassandra/service/StartupChecks.java index 4f9b82f9d3d0..5cb938b24449 100644 --- a/src/java/org/apache/cassandra/service/StartupChecks.java +++ b/src/java/org/apache/cassandra/service/StartupChecks.java @@ -18,7 +18,6 @@ package org.apache.cassandra.service; import java.io.BufferedReader; -import java.io.File; import java.io.IOException; import java.lang.management.ManagementFactory; import java.lang.management.RuntimeMXBean; @@ -32,12 +31,14 @@ import com.google.common.base.Throwables; import com.google.common.collect.ImmutableList; import com.google.common.collect.Iterables; +import org.apache.cassandra.io.util.File; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import net.jpountz.lz4.LZ4Factory; import org.apache.cassandra.cql3.QueryProcessor; import org.apache.cassandra.cql3.UntypedResultSet; +import org.apache.cassandra.io.util.PathUtils; import org.apache.cassandra.schema.TableMetadata; import org.apache.cassandra.config.Config; import org.apache.cassandra.config.DatabaseDescriptor; @@ -339,8 +340,7 @@ public void execute() Iterable dirs = Iterables.concat(Arrays.asList(DatabaseDescriptor.getAllDataFileLocations()), Arrays.asList(DatabaseDescriptor.getCommitLogLocation(), DatabaseDescriptor.getSavedCachesLocation(), - DatabaseDescriptor.getHintsDirectory().getAbsolutePath())); - + DatabaseDescriptor.getHintsDirectory().absolutePath())); for (String dataDir : dirs) { logger.debug("Checking directory {}", dataDir); @@ -351,7 +351,7 @@ public void execute() { logger.warn("Directory {} doesn't exist", dataDir); // if they don't, failing their creation, stop cassandra. - if (!dir.mkdirs()) + if (!dir.tryCreateDirectories()) throw new StartupException(StartupException.ERR_WRONG_DISK_STATE, "Has no permission to create directory "+ dataDir); } @@ -377,7 +377,7 @@ public void execute() throws StartupException { public FileVisitResult visitFile(Path path, BasicFileAttributes attrs) { - File file = path.toFile(); + File file = new File(path); if (!Descriptor.isValidFile(file)) return FileVisitResult.CONTINUE; @@ -398,7 +398,7 @@ public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) th String name = dir.getFileName().toString(); return (name.equals(Directories.SNAPSHOT_SUBDIR) || name.equals(Directories.BACKUPS_SUBDIR) - || nonSSTablePaths.contains(dir.toFile().getCanonicalPath())) + || nonSSTablePaths.contains(PathUtils.toCanonicalPath(dir).toString())) ? FileVisitResult.SKIP_SUBTREE : FileVisitResult.CONTINUE; } @@ -408,7 +408,7 @@ public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) th { try { - Files.walkFileTree(Paths.get(dataDir), sstableVisitor); + Files.walkFileTree(new File(dataDir).toPath(), sstableVisitor); } catch (IOException e) { diff --git a/src/java/org/apache/cassandra/service/StorageService.java b/src/java/org/apache/cassandra/service/StorageService.java index 803c5b49f695..5c0b8f475033 100644 --- a/src/java/org/apache/cassandra/service/StorageService.java +++ b/src/java/org/apache/cassandra/service/StorageService.java @@ -17,7 +17,11 @@ */ package org.apache.cassandra.service; -import java.io.*; + +import java.io.ByteArrayInputStream; +import java.io.DataInputStream; +import java.io.IOError; +import java.io.IOException; import java.net.InetAddress; import java.net.UnknownHostException; import java.nio.ByteBuffer; @@ -61,6 +65,7 @@ import org.apache.cassandra.fql.FullQueryLogger; import org.apache.cassandra.fql.FullQueryLoggerOptions; import org.apache.cassandra.fql.FullQueryLoggerOptionsCompositeData; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.locator.ReplicaCollection.Builder.Conflict; import org.apache.cassandra.utils.concurrent.Future; import org.apache.cassandra.utils.concurrent.ImmediateFuture; @@ -3986,7 +3991,7 @@ public void clearSnapshot(String tag, String... keyspaceNames) throws IOExceptio Set keyspaces = new HashSet<>(); for (String dataDir : DatabaseDescriptor.getAllDataFileLocations()) { - for(String keyspaceDir : new File(dataDir).list()) + for(String keyspaceDir : new File(dataDir).tryListNames()) { // Only add a ks if it has been specified as a param, assuming params were actually provided. if (keyspaceNames.length > 0 && !Arrays.asList(keyspaceNames).contains(keyspaceDir)) diff --git a/src/java/org/apache/cassandra/service/pager/MultiPartitionPager.java b/src/java/org/apache/cassandra/service/pager/MultiPartitionPager.java index 7e64e036756f..bc2c79e10083 100644 --- a/src/java/org/apache/cassandra/service/pager/MultiPartitionPager.java +++ b/src/java/org/apache/cassandra/service/pager/MultiPartitionPager.java @@ -232,4 +232,4 @@ public int maxRemaining() { return remaining; } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/service/snapshot/SnapshotManager.java b/src/java/org/apache/cassandra/service/snapshot/SnapshotManager.java index 554bd51e9e15..7f1c4e9a7ed8 100644 --- a/src/java/org/apache/cassandra/service/snapshot/SnapshotManager.java +++ b/src/java/org/apache/cassandra/service/snapshot/SnapshotManager.java @@ -18,7 +18,6 @@ package org.apache.cassandra.service.snapshot; -import java.io.File; import java.time.Instant; import java.util.Collection; import java.util.Comparator; @@ -42,6 +41,7 @@ import com.google.common.annotations.VisibleForTesting; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.utils.ExecutorUtils; import static org.apache.cassandra.concurrent.ExecutorFactory.Global.executorFactory; diff --git a/src/java/org/apache/cassandra/service/snapshot/SnapshotManifest.java b/src/java/org/apache/cassandra/service/snapshot/SnapshotManifest.java index 5d44acbf4f77..d8d900a8a956 100644 --- a/src/java/org/apache/cassandra/service/snapshot/SnapshotManifest.java +++ b/src/java/org/apache/cassandra/service/snapshot/SnapshotManifest.java @@ -18,8 +18,7 @@ package org.apache.cassandra.service.snapshot; -import java.io.File; -import java.io.IOException; +import java.io.*; import java.time.Instant; import java.util.List; import java.util.Objects; @@ -30,6 +29,7 @@ import org.apache.cassandra.config.Duration; import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule; import com.fasterxml.jackson.databind.DeserializationFeature; +import org.apache.cassandra.io.util.File; // Only serialize fields @JsonAutoDetect(fieldVisibility = JsonAutoDetect.Visibility.ANY, @@ -84,12 +84,12 @@ public Instant getExpiresAt() public void serializeToJsonFile(File outputFile) throws IOException { - mapper.writeValue(outputFile, this); + mapper.writeValue(outputFile.toJavaIOFile(), this); } public static SnapshotManifest deserializeFromJsonFile(File file) throws IOException { - return mapper.readValue(file, SnapshotManifest.class); + return mapper.readValue(file.toJavaIOFile(), SnapshotManifest.class); } @Override diff --git a/src/java/org/apache/cassandra/service/snapshot/TableSnapshot.java b/src/java/org/apache/cassandra/service/snapshot/TableSnapshot.java index 7e852ec0a16a..185cd45c4bb9 100644 --- a/src/java/org/apache/cassandra/service/snapshot/TableSnapshot.java +++ b/src/java/org/apache/cassandra/service/snapshot/TableSnapshot.java @@ -17,7 +17,6 @@ */ package org.apache.cassandra.service.snapshot; -import java.io.File; import java.time.Instant; import java.util.Collection; import java.util.Map; @@ -26,6 +25,7 @@ import java.util.function.Function; import java.util.stream.Collectors; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.io.util.FileUtils; public class TableSnapshot diff --git a/src/java/org/apache/cassandra/streaming/messages/PrepareSynMessage.java b/src/java/org/apache/cassandra/streaming/messages/PrepareSynMessage.java index e378af7a17c6..bce47a1dede2 100644 --- a/src/java/org/apache/cassandra/streaming/messages/PrepareSynMessage.java +++ b/src/java/org/apache/cassandra/streaming/messages/PrepareSynMessage.java @@ -23,6 +23,7 @@ import org.apache.cassandra.io.util.DataInputPlus; import org.apache.cassandra.io.util.DataOutputStreamPlus; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.streaming.StreamRequest; import org.apache.cassandra.streaming.StreamSession; import org.apache.cassandra.streaming.StreamSummary; diff --git a/src/java/org/apache/cassandra/streaming/messages/ReceivedMessage.java b/src/java/org/apache/cassandra/streaming/messages/ReceivedMessage.java index ff2cdecc9697..a1dd03c787e4 100644 --- a/src/java/org/apache/cassandra/streaming/messages/ReceivedMessage.java +++ b/src/java/org/apache/cassandra/streaming/messages/ReceivedMessage.java @@ -21,6 +21,7 @@ import org.apache.cassandra.io.util.DataInputPlus; import org.apache.cassandra.io.util.DataOutputStreamPlus; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.schema.TableId; import org.apache.cassandra.streaming.StreamSession; diff --git a/src/java/org/apache/cassandra/tools/AuditLogViewer.java b/src/java/org/apache/cassandra/tools/AuditLogViewer.java index dd0e839832e1..f226aa2e706d 100644 --- a/src/java/org/apache/cassandra/tools/AuditLogViewer.java +++ b/src/java/org/apache/cassandra/tools/AuditLogViewer.java @@ -17,12 +17,12 @@ */ package org.apache.cassandra.tools; -import java.io.File; import java.util.Arrays; import java.util.List; import java.util.function.Consumer; import java.util.stream.Collectors; +import org.apache.cassandra.io.util.File; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.CommandLineParser; import org.apache.commons.cli.GnuParser; @@ -76,7 +76,7 @@ static void dump(List pathList, String rollCycle, boolean follow, boolea Pauser pauser = Pauser.millis(100); List tailers = pathList.stream() .distinct() - .map(path -> SingleChronicleQueueBuilder.single(new File(path)).readOnly(true).rollCycle(RollCycles.valueOf(rollCycle)).build()) + .map(path -> SingleChronicleQueueBuilder.single(new File(path).toJavaIOFile()).readOnly(true).rollCycle(RollCycles.valueOf(rollCycle)).build()) .map(SingleChronicleQueue::createTailer) .collect(Collectors.toList()); boolean hadWork = true; diff --git a/src/java/org/apache/cassandra/tools/BulkLoader.java b/src/java/org/apache/cassandra/tools/BulkLoader.java index bb29bbe1945e..43df49c1de4a 100644 --- a/src/java/org/apache/cassandra/tools/BulkLoader.java +++ b/src/java/org/apache/cassandra/tools/BulkLoader.java @@ -58,7 +58,7 @@ public static void load(LoaderOptions options) throws BulkLoadException DatabaseDescriptor.toolInitialization(); OutputHandler handler = new OutputHandler.SystemOutput(options.verbose, options.debug); SSTableLoader loader = new SSTableLoader( - options.directory.getAbsoluteFile(), + options.directory.toAbsolute(), new ExternalClient( options.hosts, options.storagePort, diff --git a/src/java/org/apache/cassandra/tools/JMXTool.java b/src/java/org/apache/cassandra/tools/JMXTool.java index e9171794df58..d054716ea594 100644 --- a/src/java/org/apache/cassandra/tools/JMXTool.java +++ b/src/java/org/apache/cassandra/tools/JMXTool.java @@ -18,8 +18,6 @@ package org.apache.cassandra.tools; -import java.io.File; -import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; @@ -71,6 +69,8 @@ import io.airlift.airline.Help; import io.airlift.airline.HelpOption; import io.airlift.airline.Option; +import org.apache.cassandra.io.util.File; +import org.apache.cassandra.io.util.FileInputStreamPlus; import org.yaml.snakeyaml.TypeDescription; import org.yaml.snakeyaml.Yaml; import org.yaml.snakeyaml.constructor.Constructor; @@ -229,8 +229,8 @@ public Void call() throws Exception Preconditions.checkArgument(files.size() == 2, "files requires 2 arguments but given %s", files); Map left; Map right; - try (FileInputStream leftStream = new FileInputStream(files.get(0)); - FileInputStream rightStream = new FileInputStream(files.get(1))) + try (FileInputStreamPlus leftStream = new FileInputStreamPlus(files.get(0)); + FileInputStreamPlus rightStream = new FileInputStreamPlus(files.get(1))) { left = format.load(leftStream); right = format.load(rightStream); diff --git a/src/java/org/apache/cassandra/tools/LoaderOptions.java b/src/java/org/apache/cassandra/tools/LoaderOptions.java index ca1bd40f71da..62f5046e1cc0 100644 --- a/src/java/org/apache/cassandra/tools/LoaderOptions.java +++ b/src/java/org/apache/cassandra/tools/LoaderOptions.java @@ -20,7 +20,6 @@ */ package org.apache.cassandra.tools; -import java.io.File; import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; import java.net.*; @@ -32,6 +31,7 @@ import org.apache.cassandra.config.*; import org.apache.cassandra.exceptions.ConfigurationException; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.locator.InetAddressAndPort; import org.apache.cassandra.tools.BulkLoader.CmdLineOptions; @@ -376,7 +376,7 @@ public Builder parseArgs(String cmdArgs[]) { errorMsg("Config file not found", options); } - config = new YamlConfigurationLoader().loadConfig(configFile.toURI().toURL()); + config = new YamlConfigurationLoader().loadConfig(configFile.toPath().toUri().toURL()); } else { diff --git a/src/java/org/apache/cassandra/tools/NodeTool.java b/src/java/org/apache/cassandra/tools/NodeTool.java index 91a008e1c730..7d3a3caca8e6 100644 --- a/src/java/org/apache/cassandra/tools/NodeTool.java +++ b/src/java/org/apache/cassandra/tools/NodeTool.java @@ -22,18 +22,20 @@ import static com.google.common.collect.Lists.newArrayList; import static java.lang.Integer.parseInt; import static java.lang.String.format; +import static org.apache.cassandra.io.util.File.WriteMode.APPEND; import static org.apache.commons.lang3.ArrayUtils.EMPTY_STRING_ARRAY; import static org.apache.commons.lang3.StringUtils.EMPTY; import static org.apache.commons.lang3.StringUtils.isEmpty; import static org.apache.commons.lang3.StringUtils.isNotEmpty; import java.io.Console; -import java.io.File; +import org.apache.cassandra.io.util.File; +import org.apache.cassandra.io.util.FileWriter; import java.io.FileNotFoundException; -import java.io.FileWriter; import java.io.IOError; import java.io.IOException; import java.net.UnknownHostException; +import java.nio.file.NoSuchFileException; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Collections; @@ -278,7 +280,7 @@ private static void printHistory(String... args) String cmdLine = Joiner.on(" ").skipNulls().join(args); cmdLine = cmdLine.replaceFirst("(?<=(-pw|--password))\\s+\\S+", " "); - try (FileWriter writer = new FileWriter(new File(FBUtilities.getToolsOutputDirectory(), HISTORYFILE), true)) + try (FileWriter writer = new File(FBUtilities.getToolsOutputDirectory(), HISTORYFILE).newWriter(APPEND)) { SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss,SSS"); writer.append(sdf.format(new Date())).append(": ").append(cmdLine).append(System.lineSeparator()); @@ -374,7 +376,7 @@ private String readUserPasswordFromFile(String username, String passwordFilePath String password = EMPTY; File passwordFile = new File(passwordFilePath); - try (Scanner scanner = new Scanner(passwordFile).useDelimiter("\\s+")) + try (Scanner scanner = new Scanner(passwordFile.toJavaIOFile()).useDelimiter("\\s+")) { while (scanner.hasNextLine()) { @@ -389,7 +391,8 @@ private String readUserPasswordFromFile(String username, String passwordFilePath } scanner.nextLine(); } - } catch (FileNotFoundException e) + } + catch (FileNotFoundException e) { throw new RuntimeException(e); } diff --git a/src/java/org/apache/cassandra/tools/SSTableExport.java b/src/java/org/apache/cassandra/tools/SSTableExport.java index 5be67d72c74a..b3000d0a0588 100644 --- a/src/java/org/apache/cassandra/tools/SSTableExport.java +++ b/src/java/org/apache/cassandra/tools/SSTableExport.java @@ -17,7 +17,6 @@ */ package org.apache.cassandra.tools; -import java.io.File; import java.io.IOException; import java.util.Arrays; import java.util.HashSet; @@ -26,6 +25,7 @@ import java.util.stream.Collectors; import java.util.stream.Stream; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.schema.TableMetadata; import org.apache.cassandra.config.DatabaseDescriptor; import org.apache.cassandra.db.DecoratedKey; @@ -135,7 +135,7 @@ public static void main(String[] args) throws ConfigurationException printUsage(); System.exit(1); } - String ssTableFileName = new File(cmd.getArgs()[0]).getAbsolutePath(); + String ssTableFileName = new File(cmd.getArgs()[0]).absolutePath(); if (!new File(ssTableFileName).exists()) { diff --git a/src/java/org/apache/cassandra/tools/SSTableMetadataViewer.java b/src/java/org/apache/cassandra/tools/SSTableMetadataViewer.java old mode 100755 new mode 100644 index 8c1f5db4bcd9..b7164e8d7e9f --- a/src/java/org/apache/cassandra/tools/SSTableMetadataViewer.java +++ b/src/java/org/apache/cassandra/tools/SSTableMetadataViewer.java @@ -25,7 +25,6 @@ import static org.apache.commons.lang3.time.DurationFormatUtils.formatDurationWords; import java.io.DataInputStream; -import java.io.File; import java.io.IOException; import java.io.PrintStream; import java.io.PrintWriter; @@ -60,6 +59,7 @@ import org.apache.cassandra.io.sstable.metadata.MetadataType; import org.apache.cassandra.io.sstable.metadata.StatsMetadata; import org.apache.cassandra.io.sstable.metadata.ValidationMetadata; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.schema.TableMetadata; import org.apache.cassandra.schema.TableMetadataRef; import org.apache.cassandra.tools.Util.TermHistogram; @@ -544,7 +544,7 @@ public static void main(String[] args) throws IOException File sstable = new File(fname); if (sstable.exists()) { - metawriter.printSStableMetadata(sstable.getAbsolutePath(), fullScan); + metawriter.printSStableMetadata(sstable.absolutePath(), fullScan); } else { diff --git a/src/java/org/apache/cassandra/tools/SSTableOfflineRelevel.java b/src/java/org/apache/cassandra/tools/SSTableOfflineRelevel.java index 79fec81f345b..72c1e99bcc41 100644 --- a/src/java/org/apache/cassandra/tools/SSTableOfflineRelevel.java +++ b/src/java/org/apache/cassandra/tools/SSTableOfflineRelevel.java @@ -17,7 +17,6 @@ */ package org.apache.cassandra.tools; -import java.io.File; import java.io.IOException; import java.io.PrintStream; import java.util.ArrayList; @@ -35,6 +34,7 @@ import com.google.common.collect.SetMultimap; import org.apache.cassandra.db.lifecycle.LifecycleTransaction; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.schema.Schema; import org.apache.cassandra.db.ColumnFamilyStore; import org.apache.cassandra.db.DecoratedKey; diff --git a/src/java/org/apache/cassandra/tools/SSTableRepairedAtSetter.java b/src/java/org/apache/cassandra/tools/SSTableRepairedAtSetter.java index 31d80facc65d..62dd76ee567c 100644 --- a/src/java/org/apache/cassandra/tools/SSTableRepairedAtSetter.java +++ b/src/java/org/apache/cassandra/tools/SSTableRepairedAtSetter.java @@ -17,7 +17,9 @@ */ package org.apache.cassandra.tools; -import java.io.*; + +import java.io.IOException; +import java.io.PrintStream; import java.nio.charset.Charset; import java.nio.file.Files; import java.nio.file.Paths; @@ -39,6 +41,8 @@ * sstablerepairset --is-repaired -f <(find /var/lib/cassandra/data/.../ -iname "*Data.db*" -mtime +14) * } */ +import org.apache.cassandra.io.util.File; + public class SSTableRepairedAtSetter { /** diff --git a/src/java/org/apache/cassandra/tools/StandaloneSSTableUtil.java b/src/java/org/apache/cassandra/tools/StandaloneSSTableUtil.java index cca48fc395e9..06618b39b4cb 100644 --- a/src/java/org/apache/cassandra/tools/StandaloneSSTableUtil.java +++ b/src/java/org/apache/cassandra/tools/StandaloneSSTableUtil.java @@ -25,10 +25,10 @@ import org.apache.cassandra.utils.OutputHandler; import org.apache.commons.cli.*; -import java.io.File; import java.io.IOException; import java.util.function.BiPredicate; +import org.apache.cassandra.io.util.File; import static org.apache.cassandra.tools.BulkLoader.CmdLineOptions; public class StandaloneSSTableUtil @@ -87,7 +87,7 @@ private static void listFiles(Options options, TableMetadata metadata, OutputHan for (File dir : directories.getCFDirectories()) { for (File file : LifecycleTransaction.getFiles(dir.toPath(), getFilter(options), Directories.OnTxnErr.THROW)) - handler.output(file.getCanonicalPath()); + handler.output(file.canonicalPath()); } } diff --git a/src/java/org/apache/cassandra/tools/StandaloneScrubber.java b/src/java/org/apache/cassandra/tools/StandaloneScrubber.java index 4dfa4abdce91..6ee320e1073a 100644 --- a/src/java/org/apache/cassandra/tools/StandaloneScrubber.java +++ b/src/java/org/apache/cassandra/tools/StandaloneScrubber.java @@ -18,7 +18,6 @@ */ package org.apache.cassandra.tools; -import java.io.File; import java.nio.file.Paths; import java.util.ArrayList; import java.util.Collection; @@ -27,6 +26,7 @@ import java.util.Set; import java.util.concurrent.TimeUnit; +import org.apache.cassandra.io.util.File; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.CommandLineParser; import org.apache.commons.cli.GnuParser; @@ -127,7 +127,7 @@ public static void main(String args[]) listResult.add(Pair.create(descriptor, components)); File snapshotDirectory = Directories.getSnapshotDirectory(descriptor, snapshotName); - SSTableReader.createLinks(descriptor, components, snapshotDirectory.getPath()); + SSTableReader.createLinks(descriptor, components, snapshotDirectory.path()); } System.out.println(String.format("Pre-scrub sstables snapshotted into snapshot %s", snapshotName)); diff --git a/src/java/org/apache/cassandra/tools/StandaloneSplitter.java b/src/java/org/apache/cassandra/tools/StandaloneSplitter.java index e3c80f1243ec..cd3affa755da 100644 --- a/src/java/org/apache/cassandra/tools/StandaloneSplitter.java +++ b/src/java/org/apache/cassandra/tools/StandaloneSplitter.java @@ -18,10 +18,10 @@ */ package org.apache.cassandra.tools; -import java.io.File; import java.util.*; import java.util.concurrent.TimeUnit; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.schema.Schema; import org.apache.cassandra.io.sstable.format.SSTableReader; import org.apache.commons.cli.*; @@ -133,7 +133,7 @@ else if (!cfName.equals(desc.cfname)) if (options.snapshot) { File snapshotDirectory = Directories.getSnapshotDirectory(sstable.descriptor, snapshotName); - sstable.createLinks(snapshotDirectory.getPath()); + sstable.createLinks(snapshotDirectory.path()); } } diff --git a/src/java/org/apache/cassandra/tools/nodetool/Assassinate.java b/src/java/org/apache/cassandra/tools/nodetool/Assassinate.java index a075ded04dad..2639ec81b32f 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/Assassinate.java +++ b/src/java/org/apache/cassandra/tools/nodetool/Assassinate.java @@ -44,4 +44,4 @@ public void execute(NodeProbe probe) throw new RuntimeException(e); } } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/tools/nodetool/CfHistograms.java b/src/java/org/apache/cassandra/tools/nodetool/CfHistograms.java index 8fdf803c6745..6a06fd4f2d6d 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/CfHistograms.java +++ b/src/java/org/apache/cassandra/tools/nodetool/CfHistograms.java @@ -26,4 +26,4 @@ @Deprecated public class CfHistograms extends TableHistograms { -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/tools/nodetool/CfStats.java b/src/java/org/apache/cassandra/tools/nodetool/CfStats.java index 2d27ea0f1dcb..42e2bc3023ad 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/CfStats.java +++ b/src/java/org/apache/cassandra/tools/nodetool/CfStats.java @@ -26,4 +26,4 @@ @Deprecated public class CfStats extends TableStats { -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/tools/nodetool/Compact.java b/src/java/org/apache/cassandra/tools/nodetool/Compact.java index 7278eada333e..ca560cd3bba1 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/Compact.java +++ b/src/java/org/apache/cassandra/tools/nodetool/Compact.java @@ -94,4 +94,4 @@ public void execute(NodeProbe probe) } } } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/tools/nodetool/Decommission.java b/src/java/org/apache/cassandra/tools/nodetool/Decommission.java index 0e58687ff169..98b6d5846c5f 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/Decommission.java +++ b/src/java/org/apache/cassandra/tools/nodetool/Decommission.java @@ -46,4 +46,4 @@ public void execute(NodeProbe probe) throw new IllegalStateException("Unsupported operation: " + e.getMessage(), e); } } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/tools/nodetool/DisableAuditLog.java b/src/java/org/apache/cassandra/tools/nodetool/DisableAuditLog.java index 35653aef7f95..6d878a0b1655 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/DisableAuditLog.java +++ b/src/java/org/apache/cassandra/tools/nodetool/DisableAuditLog.java @@ -30,4 +30,4 @@ public void execute(NodeProbe probe) { probe.disableAuditLog(); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/tools/nodetool/DisableAutoCompaction.java b/src/java/org/apache/cassandra/tools/nodetool/DisableAutoCompaction.java index b9fc7d67d95d..39a4c76352d4 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/DisableAutoCompaction.java +++ b/src/java/org/apache/cassandra/tools/nodetool/DisableAutoCompaction.java @@ -50,4 +50,4 @@ public void execute(NodeProbe probe) } } } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/tools/nodetool/DisableBackup.java b/src/java/org/apache/cassandra/tools/nodetool/DisableBackup.java index 4b0bfbea8440..4ee6340da883 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/DisableBackup.java +++ b/src/java/org/apache/cassandra/tools/nodetool/DisableBackup.java @@ -30,4 +30,4 @@ public void execute(NodeProbe probe) { probe.setIncrementalBackupsEnabled(false); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/tools/nodetool/DisableBinary.java b/src/java/org/apache/cassandra/tools/nodetool/DisableBinary.java index 463f2b0a626a..79b921908240 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/DisableBinary.java +++ b/src/java/org/apache/cassandra/tools/nodetool/DisableBinary.java @@ -30,4 +30,4 @@ public void execute(NodeProbe probe) { probe.stopNativeTransport(); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/tools/nodetool/DisableFullQueryLog.java b/src/java/org/apache/cassandra/tools/nodetool/DisableFullQueryLog.java index 8820e5f0cbe2..aa5d0b709241 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/DisableFullQueryLog.java +++ b/src/java/org/apache/cassandra/tools/nodetool/DisableFullQueryLog.java @@ -30,4 +30,4 @@ public void execute(NodeProbe probe) { probe.stopFullQueryLogger(); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/tools/nodetool/DisableGossip.java b/src/java/org/apache/cassandra/tools/nodetool/DisableGossip.java index 6f950bbc0593..7b6c348549e4 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/DisableGossip.java +++ b/src/java/org/apache/cassandra/tools/nodetool/DisableGossip.java @@ -30,4 +30,4 @@ public void execute(NodeProbe probe) { probe.stopGossiping(); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/tools/nodetool/DisableHandoff.java b/src/java/org/apache/cassandra/tools/nodetool/DisableHandoff.java index d7ec35fae881..62465a395b09 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/DisableHandoff.java +++ b/src/java/org/apache/cassandra/tools/nodetool/DisableHandoff.java @@ -30,4 +30,4 @@ public void execute(NodeProbe probe) { probe.disableHintedHandoff(); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/tools/nodetool/DisableHintsForDC.java b/src/java/org/apache/cassandra/tools/nodetool/DisableHintsForDC.java index d65c70bd43ea..3615a997303c 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/DisableHintsForDC.java +++ b/src/java/org/apache/cassandra/tools/nodetool/DisableHintsForDC.java @@ -39,4 +39,4 @@ public void execute(NodeProbe probe) probe.disableHintsForDC(args.get(0)); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/tools/nodetool/DisableOldProtocolVersions.java b/src/java/org/apache/cassandra/tools/nodetool/DisableOldProtocolVersions.java index 20830629fef5..875647127c4f 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/DisableOldProtocolVersions.java +++ b/src/java/org/apache/cassandra/tools/nodetool/DisableOldProtocolVersions.java @@ -30,4 +30,4 @@ public void execute(NodeProbe probe) { probe.disableOldProtocolVersions(); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/tools/nodetool/Drain.java b/src/java/org/apache/cassandra/tools/nodetool/Drain.java index eaa537aa411b..a152057798e6 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/Drain.java +++ b/src/java/org/apache/cassandra/tools/nodetool/Drain.java @@ -39,4 +39,4 @@ public void execute(NodeProbe probe) throw new RuntimeException("Error occurred during flushing", e); } } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/tools/nodetool/EnableAuditLog.java b/src/java/org/apache/cassandra/tools/nodetool/EnableAuditLog.java index 51498769cd1f..ae0bb42d3d6a 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/EnableAuditLog.java +++ b/src/java/org/apache/cassandra/tools/nodetool/EnableAuditLog.java @@ -82,4 +82,4 @@ public void execute(NodeProbe probe) probe.enableAuditLog(logger, Collections.EMPTY_MAP, included_keyspaces, excluded_keyspaces, included_categories, excluded_categories, included_users, excluded_users, archiveRetries, bblocking, rollCycle, maxLogSize, maxQueueWeight, archiveCommand); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/tools/nodetool/EnableAutoCompaction.java b/src/java/org/apache/cassandra/tools/nodetool/EnableAutoCompaction.java index 795ab133c2bb..f8b98ff2899f 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/EnableAutoCompaction.java +++ b/src/java/org/apache/cassandra/tools/nodetool/EnableAutoCompaction.java @@ -50,4 +50,4 @@ public void execute(NodeProbe probe) } } } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/tools/nodetool/EnableBackup.java b/src/java/org/apache/cassandra/tools/nodetool/EnableBackup.java index d1773d9c42c0..7ebad8a4d887 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/EnableBackup.java +++ b/src/java/org/apache/cassandra/tools/nodetool/EnableBackup.java @@ -30,4 +30,4 @@ public void execute(NodeProbe probe) { probe.setIncrementalBackupsEnabled(true); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/tools/nodetool/EnableBinary.java b/src/java/org/apache/cassandra/tools/nodetool/EnableBinary.java index 506945fd25d8..2e37e6ff4c12 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/EnableBinary.java +++ b/src/java/org/apache/cassandra/tools/nodetool/EnableBinary.java @@ -30,4 +30,4 @@ public void execute(NodeProbe probe) { probe.startNativeTransport(); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/tools/nodetool/EnableFullQueryLog.java b/src/java/org/apache/cassandra/tools/nodetool/EnableFullQueryLog.java index 9873e5a01aed..50848946e0df 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/EnableFullQueryLog.java +++ b/src/java/org/apache/cassandra/tools/nodetool/EnableFullQueryLog.java @@ -61,4 +61,4 @@ public void execute(NodeProbe probe) } probe.enableFullQueryLogger(path, rollCycle, bblocking, maxQueueWeight, maxLogSize, archiveCommand, archiveRetries); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/tools/nodetool/EnableGossip.java b/src/java/org/apache/cassandra/tools/nodetool/EnableGossip.java index 900c427ccea7..3433c3ec610c 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/EnableGossip.java +++ b/src/java/org/apache/cassandra/tools/nodetool/EnableGossip.java @@ -30,4 +30,4 @@ public void execute(NodeProbe probe) { probe.startGossiping(); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/tools/nodetool/EnableHandoff.java b/src/java/org/apache/cassandra/tools/nodetool/EnableHandoff.java index bccf7e761867..be64e120ebdc 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/EnableHandoff.java +++ b/src/java/org/apache/cassandra/tools/nodetool/EnableHandoff.java @@ -30,4 +30,4 @@ public void execute(NodeProbe probe) { probe.enableHintedHandoff(); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/tools/nodetool/EnableOldProtocolVersions.java b/src/java/org/apache/cassandra/tools/nodetool/EnableOldProtocolVersions.java index f6d5be5269af..06c9f8d02f91 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/EnableOldProtocolVersions.java +++ b/src/java/org/apache/cassandra/tools/nodetool/EnableOldProtocolVersions.java @@ -31,4 +31,4 @@ public void execute(NodeProbe probe) { probe.enableOldProtocolVersions(); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/tools/nodetool/Flush.java b/src/java/org/apache/cassandra/tools/nodetool/Flush.java index c83e420cbf7f..fb2446d22f7a 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/Flush.java +++ b/src/java/org/apache/cassandra/tools/nodetool/Flush.java @@ -49,4 +49,4 @@ public void execute(NodeProbe probe) } } } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/tools/nodetool/GetSnapshotThrottle.java b/src/java/org/apache/cassandra/tools/nodetool/GetSnapshotThrottle.java index 0e9bdc134c05..bd98d343064d 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/GetSnapshotThrottle.java +++ b/src/java/org/apache/cassandra/tools/nodetool/GetSnapshotThrottle.java @@ -33,4 +33,4 @@ public void execute(NodeProbe probe) else System.out.println("Snapshot throttle is disabled"); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/tools/nodetool/HostStat.java b/src/java/org/apache/cassandra/tools/nodetool/HostStat.java index 19c0448e8bf7..56c46ee83d0e 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/HostStat.java +++ b/src/java/org/apache/cassandra/tools/nodetool/HostStat.java @@ -38,4 +38,4 @@ public String ipOrDns() { return resolveIp ? endpoint.getHostName() : endpoint.getHostAddress(); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/tools/nodetool/InvalidateCounterCache.java b/src/java/org/apache/cassandra/tools/nodetool/InvalidateCounterCache.java index aef77bdb7370..3cba8e0cd200 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/InvalidateCounterCache.java +++ b/src/java/org/apache/cassandra/tools/nodetool/InvalidateCounterCache.java @@ -30,4 +30,4 @@ public void execute(NodeProbe probe) { probe.invalidateCounterCache(); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/tools/nodetool/InvalidateKeyCache.java b/src/java/org/apache/cassandra/tools/nodetool/InvalidateKeyCache.java index cfe7d2f582ab..4414b42cc996 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/InvalidateKeyCache.java +++ b/src/java/org/apache/cassandra/tools/nodetool/InvalidateKeyCache.java @@ -30,4 +30,4 @@ public void execute(NodeProbe probe) { probe.invalidateKeyCache(); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/tools/nodetool/InvalidateRowCache.java b/src/java/org/apache/cassandra/tools/nodetool/InvalidateRowCache.java index 7357e2785787..1a10ed00f29d 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/InvalidateRowCache.java +++ b/src/java/org/apache/cassandra/tools/nodetool/InvalidateRowCache.java @@ -30,4 +30,4 @@ public void execute(NodeProbe probe) { probe.invalidateRowCache(); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/tools/nodetool/Move.java b/src/java/org/apache/cassandra/tools/nodetool/Move.java index 8654d25d8642..075e00850391 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/Move.java +++ b/src/java/org/apache/cassandra/tools/nodetool/Move.java @@ -43,4 +43,4 @@ public void execute(NodeProbe probe) throw new RuntimeException("Error during moving node", e); } } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/tools/nodetool/PauseHandoff.java b/src/java/org/apache/cassandra/tools/nodetool/PauseHandoff.java index 4ec70d8201b2..fde9eef271df 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/PauseHandoff.java +++ b/src/java/org/apache/cassandra/tools/nodetool/PauseHandoff.java @@ -30,4 +30,4 @@ public void execute(NodeProbe probe) { probe.pauseHintsDelivery(); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/tools/nodetool/Rebuild.java b/src/java/org/apache/cassandra/tools/nodetool/Rebuild.java index a083cde99b76..a16e8f22f141 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/Rebuild.java +++ b/src/java/org/apache/cassandra/tools/nodetool/Rebuild.java @@ -57,4 +57,4 @@ public void execute(NodeProbe probe) probe.rebuild(sourceDataCenterName, keyspace, tokens, specificSources); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/tools/nodetool/RebuildIndex.java b/src/java/org/apache/cassandra/tools/nodetool/RebuildIndex.java index 4a6b071510ee..f7a3b6f1eeea 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/RebuildIndex.java +++ b/src/java/org/apache/cassandra/tools/nodetool/RebuildIndex.java @@ -40,4 +40,4 @@ public void execute(NodeProbe probe) checkArgument(args.size() >= 3, "rebuild_index requires ks, cf and idx args"); probe.rebuildIndex(args.get(0), args.get(1), toArray(args.subList(2, args.size()), String.class)); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/tools/nodetool/ReloadTriggers.java b/src/java/org/apache/cassandra/tools/nodetool/ReloadTriggers.java index 6ca90fbecb38..8727a610838e 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/ReloadTriggers.java +++ b/src/java/org/apache/cassandra/tools/nodetool/ReloadTriggers.java @@ -30,4 +30,4 @@ public void execute(NodeProbe probe) { probe.reloadTriggers(); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/tools/nodetool/ResetFullQueryLog.java b/src/java/org/apache/cassandra/tools/nodetool/ResetFullQueryLog.java index 786852d96f54..d7ac30107bda 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/ResetFullQueryLog.java +++ b/src/java/org/apache/cassandra/tools/nodetool/ResetFullQueryLog.java @@ -30,4 +30,4 @@ public void execute(NodeProbe probe) { probe.resetFullQueryLogger(); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/tools/nodetool/ResetLocalSchema.java b/src/java/org/apache/cassandra/tools/nodetool/ResetLocalSchema.java index 708636f9d072..62775a481d7b 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/ResetLocalSchema.java +++ b/src/java/org/apache/cassandra/tools/nodetool/ResetLocalSchema.java @@ -38,4 +38,4 @@ public void execute(NodeProbe probe) throw new RuntimeException(e); } } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/tools/nodetool/ResumeHandoff.java b/src/java/org/apache/cassandra/tools/nodetool/ResumeHandoff.java index a3984f87e4e1..bda98aaec033 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/ResumeHandoff.java +++ b/src/java/org/apache/cassandra/tools/nodetool/ResumeHandoff.java @@ -30,4 +30,4 @@ public void execute(NodeProbe probe) { probe.resumeHintsDelivery(); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/tools/nodetool/SetBatchlogReplayThrottle.java b/src/java/org/apache/cassandra/tools/nodetool/SetBatchlogReplayThrottle.java index 65bb8f50d20c..3c6370c4e1c7 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/SetBatchlogReplayThrottle.java +++ b/src/java/org/apache/cassandra/tools/nodetool/SetBatchlogReplayThrottle.java @@ -34,4 +34,4 @@ public void execute(NodeProbe probe) { probe.setBatchlogReplayThrottle(batchlogReplayThrottle); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/tools/nodetool/SetCacheCapacity.java b/src/java/org/apache/cassandra/tools/nodetool/SetCacheCapacity.java index 461f6aeed257..b07eb9e00892 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/SetCacheCapacity.java +++ b/src/java/org/apache/cassandra/tools/nodetool/SetCacheCapacity.java @@ -42,4 +42,4 @@ public void execute(NodeProbe probe) checkArgument(args.size() == 3, "setcachecapacity requires key-cache-capacity, row-cache-capacity, and counter-cache-capacity args."); probe.setCacheCapacities(args.get(0), args.get(1), args.get(2)); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/tools/nodetool/SetCacheKeysToSave.java b/src/java/org/apache/cassandra/tools/nodetool/SetCacheKeysToSave.java index 18197e6ee3dc..de9bab58c8bf 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/SetCacheKeysToSave.java +++ b/src/java/org/apache/cassandra/tools/nodetool/SetCacheKeysToSave.java @@ -42,4 +42,4 @@ public void execute(NodeProbe probe) checkArgument(args.size() == 3, "setcachekeystosave requires key-cache-keys-to-save, row-cache-keys-to-save, and counter-cache-keys-to-save args."); probe.setCacheKeysToSave(args.get(0), args.get(1), args.get(2)); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/tools/nodetool/SetCompactionThreshold.java b/src/java/org/apache/cassandra/tools/nodetool/SetCompactionThreshold.java index 56e558ff470a..52bb5bc2906b 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/SetCompactionThreshold.java +++ b/src/java/org/apache/cassandra/tools/nodetool/SetCompactionThreshold.java @@ -47,4 +47,4 @@ public void execute(NodeProbe probe) probe.setCompactionThreshold(args.get(0), args.get(1), minthreshold, maxthreshold); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/tools/nodetool/SetCompactionThroughput.java b/src/java/org/apache/cassandra/tools/nodetool/SetCompactionThroughput.java index 80e722259257..4d01f619db35 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/SetCompactionThroughput.java +++ b/src/java/org/apache/cassandra/tools/nodetool/SetCompactionThroughput.java @@ -34,4 +34,4 @@ public void execute(NodeProbe probe) { probe.setCompactionThroughput(compactionThroughput); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/tools/nodetool/SetHintedHandoffThrottleInKB.java b/src/java/org/apache/cassandra/tools/nodetool/SetHintedHandoffThrottleInKB.java index feb945b9b1d8..96f1bdf588e7 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/SetHintedHandoffThrottleInKB.java +++ b/src/java/org/apache/cassandra/tools/nodetool/SetHintedHandoffThrottleInKB.java @@ -34,4 +34,4 @@ public void execute(NodeProbe probe) { probe.setHintedHandoffThrottleInKB(throttleInKB); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/tools/nodetool/SetHostStat.java b/src/java/org/apache/cassandra/tools/nodetool/SetHostStat.java index c43abe1869d6..116087610356 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/SetHostStat.java +++ b/src/java/org/apache/cassandra/tools/nodetool/SetHostStat.java @@ -51,4 +51,4 @@ public void add(String token, String host, Map ownerships) t Float owns = ownerships.get(endpoint); hostStats.add(new HostStat(token, endpoint, resolveIp, owns)); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/tools/nodetool/SetLoggingLevel.java b/src/java/org/apache/cassandra/tools/nodetool/SetLoggingLevel.java index 8d9ad90cde49..66d6283cc7de 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/SetLoggingLevel.java +++ b/src/java/org/apache/cassandra/tools/nodetool/SetLoggingLevel.java @@ -100,4 +100,4 @@ else if (target.equals("ring")) for (String classQualifier : classQualifiers) probe.setLoggingLevel(classQualifier, level); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/tools/nodetool/SetSnapshotThrottle.java b/src/java/org/apache/cassandra/tools/nodetool/SetSnapshotThrottle.java index a4c49b812250..045ccc171370 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/SetSnapshotThrottle.java +++ b/src/java/org/apache/cassandra/tools/nodetool/SetSnapshotThrottle.java @@ -33,4 +33,4 @@ public void execute(NodeProbe probe) { probe.setSnapshotLinksPerSecond(snapshotThrottle); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/tools/nodetool/SetStreamThroughput.java b/src/java/org/apache/cassandra/tools/nodetool/SetStreamThroughput.java index 069a6e98b40f..672d5fe05d74 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/SetStreamThroughput.java +++ b/src/java/org/apache/cassandra/tools/nodetool/SetStreamThroughput.java @@ -34,4 +34,4 @@ public void execute(NodeProbe probe) { probe.setStreamThroughput(streamThroughput); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/tools/nodetool/SetTraceProbability.java b/src/java/org/apache/cassandra/tools/nodetool/SetTraceProbability.java index e08198026111..ef9f4980baed 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/SetTraceProbability.java +++ b/src/java/org/apache/cassandra/tools/nodetool/SetTraceProbability.java @@ -36,4 +36,4 @@ public void execute(NodeProbe probe) checkArgument(traceProbability >= 0 && traceProbability <= 1, "Trace probability must be between 0 and 1"); probe.setTraceProbability(traceProbability); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/tools/nodetool/Sjk.java b/src/java/org/apache/cassandra/tools/nodetool/Sjk.java index 3ad2c94c6e42..d7f7a043f606 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/Sjk.java +++ b/src/java/org/apache/cassandra/tools/nodetool/Sjk.java @@ -17,7 +17,6 @@ */ package org.apache.cassandra.tools.nodetool; -import java.io.File; import java.io.IOException; import java.io.PrintStream; import java.lang.reflect.Field; @@ -57,6 +56,7 @@ import com.beust.jcommander.Parameterized; import io.airlift.airline.Arguments; import io.airlift.airline.Command; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.tools.Output; import org.gridkit.jvmtool.JmxConnectionInfo; import org.gridkit.jvmtool.cli.CommandLauncher; @@ -464,15 +464,15 @@ static void listFiles(List results, URL packageURL, String path) throws { // loop through files in classpath File dir = new File(packageURL.getFile()); - String cp = dir.getCanonicalPath(); + String cp = dir.canonicalPath(); File root = dir; while (true) { - if (cp.equals(new File(root, path).getCanonicalPath())) + if (cp.equals(new File(root, path).canonicalPath())) { break; } - root = root.getParentFile(); + root = root.parent(); } listFiles(results, root, dir); } @@ -480,10 +480,10 @@ static void listFiles(List results, URL packageURL, String path) throws static void listFiles(List names, File root, File dir) { - String rootPath = root.getAbsolutePath(); + String rootPath = root.absolutePath(); if (dir.exists() && dir.isDirectory()) { - for (File file : dir.listFiles()) + for (File file : dir.tryList()) { if (file.isDirectory()) { @@ -491,7 +491,7 @@ static void listFiles(List names, File root, File dir) } else { - String name = file.getAbsolutePath().substring(rootPath.length() + 1); + String name = file.absolutePath().substring(rootPath.length() + 1); name = name.replace('\\', '/'); names.add(name); } diff --git a/src/java/org/apache/cassandra/tools/nodetool/TruncateHints.java b/src/java/org/apache/cassandra/tools/nodetool/TruncateHints.java index a3a004963625..2a19d3a62978 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/TruncateHints.java +++ b/src/java/org/apache/cassandra/tools/nodetool/TruncateHints.java @@ -38,4 +38,4 @@ public void execute(NodeProbe probe) else probe.truncateHints(endpoint); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/tools/nodetool/stats/StatsHolder.java b/src/java/org/apache/cassandra/tools/nodetool/stats/StatsHolder.java index c35e1fed7ed9..a345ce0018d2 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/stats/StatsHolder.java +++ b/src/java/org/apache/cassandra/tools/nodetool/stats/StatsHolder.java @@ -26,4 +26,4 @@ public interface StatsHolder { public Map convert2Map(); -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/tools/nodetool/stats/StatsKeyspace.java b/src/java/org/apache/cassandra/tools/nodetool/stats/StatsKeyspace.java index dc153325c84e..89d77059f559 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/stats/StatsKeyspace.java +++ b/src/java/org/apache/cassandra/tools/nodetool/stats/StatsKeyspace.java @@ -75,4 +75,4 @@ public double writeLatency() ? totalWriteTime / writeCount / 1000 : Double.NaN; } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/tools/nodetool/stats/StatsPrinter.java b/src/java/org/apache/cassandra/tools/nodetool/stats/StatsPrinter.java index 389efba9ace6..037227bfe4aa 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/stats/StatsPrinter.java +++ b/src/java/org/apache/cassandra/tools/nodetool/stats/StatsPrinter.java @@ -66,4 +66,4 @@ public void print(T data, PrintStream out) out.println(yaml.dump(data.convert2Map())); } } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/triggers/CustomClassLoader.java b/src/java/org/apache/cassandra/triggers/CustomClassLoader.java index 6948c2dc942b..16b182ecf4b4 100644 --- a/src/java/org/apache/cassandra/triggers/CustomClassLoader.java +++ b/src/java/org/apache/cassandra/triggers/CustomClassLoader.java @@ -21,22 +21,22 @@ */ -import java.io.File; -import java.io.FilenameFilter; import java.io.IOException; import java.net.URL; import java.net.URLClassLoader; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; +import java.util.function.BiPredicate; +import org.apache.cassandra.io.util.File; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.io.Files; - import org.apache.cassandra.io.FSWriteError; import org.apache.cassandra.io.util.FileUtils; +import static java.nio.file.Files.*; + /** * Custom class loader will load the classes from the class path, CCL will load * the classes from the the URL first, if it cannot find the required class it @@ -69,28 +69,22 @@ public void addClassPath(File dir) { if (dir == null || !dir.exists()) return; - FilenameFilter filter = new FilenameFilter() - { - public boolean accept(File dir, String name) - { - return name.endsWith(".jar"); - } - }; - for (File inputJar : dir.listFiles(filter)) + BiPredicate filter = (ignore, name) -> name.endsWith(".jar"); + for (File inputJar : dir.tryList(filter)) { File lib = new File(FileUtils.getTempDir(), "lib"); if (!lib.exists()) { - lib.mkdir(); + lib.tryCreateDirectory(); lib.deleteOnExit(); } File out = FileUtils.createTempFile("cassandra-", ".jar", lib); out.deleteOnExit(); - logger.info("Loading new jar {}", inputJar.getAbsolutePath()); + logger.info("Loading new jar {}", inputJar.absolutePath()); try { - Files.copy(inputJar, out); - addURL(out.toURI().toURL()); + copy(inputJar.toPath(), out.toPath()); + addURL(out.toPath().toUri().toURL()); } catch (IOException ex) { diff --git a/src/java/org/apache/cassandra/triggers/TriggerExecutor.java b/src/java/org/apache/cassandra/triggers/TriggerExecutor.java index 295003fff930..298ac5693293 100644 --- a/src/java/org/apache/cassandra/triggers/TriggerExecutor.java +++ b/src/java/org/apache/cassandra/triggers/TriggerExecutor.java @@ -18,7 +18,6 @@ */ package org.apache.cassandra.triggers; -import java.io.File; import java.nio.ByteBuffer; import java.util.*; @@ -33,6 +32,7 @@ import org.apache.cassandra.db.partitions.PartitionUpdate; import org.apache.cassandra.exceptions.CassandraException; import org.apache.cassandra.exceptions.InvalidRequestException; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.schema.TableId; import org.apache.cassandra.schema.TriggerMetadata; import org.apache.cassandra.schema.Triggers; diff --git a/src/java/org/apache/cassandra/utils/BloomFilterSerializer.java b/src/java/org/apache/cassandra/utils/BloomFilterSerializer.java index d3c08b53cbed..8506ce5b98d3 100644 --- a/src/java/org/apache/cassandra/utils/BloomFilterSerializer.java +++ b/src/java/org/apache/cassandra/utils/BloomFilterSerializer.java @@ -17,10 +17,13 @@ */ package org.apache.cassandra.utils; +import java.io.DataInput; import java.io.DataInputStream; import java.io.IOException; +import java.io.InputStream; import org.apache.cassandra.db.TypeSizes; +import org.apache.cassandra.io.util.DataInputPlus; import org.apache.cassandra.io.util.DataOutputPlus; import org.apache.cassandra.utils.obs.IBitSet; import org.apache.cassandra.utils.obs.OffHeapBitSet; @@ -38,7 +41,7 @@ public static void serialize(BloomFilter bf, DataOutputPlus out) throws IOExcept } @SuppressWarnings("resource") - public static BloomFilter deserialize(DataInputStream in, boolean oldBfFormat) throws IOException + public static BloomFilter deserialize(I in, boolean oldBfFormat) throws IOException { int hashes = in.readInt(); IBitSet bs = OffHeapBitSet.deserialize(in, oldBfFormat); diff --git a/src/java/org/apache/cassandra/utils/ByteArrayUtil.java b/src/java/org/apache/cassandra/utils/ByteArrayUtil.java index 75734ada9690..3673f6760436 100644 --- a/src/java/org/apache/cassandra/utils/ByteArrayUtil.java +++ b/src/java/org/apache/cassandra/utils/ByteArrayUtil.java @@ -253,4 +253,4 @@ public static void copyBytes(byte[] src, int srcPos, ByteBuffer dst, int dstPos, { FastByteOperations.copy(src, srcPos, dst, dstPos, length); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/utils/DirectorySizeCalculator.java b/src/java/org/apache/cassandra/utils/DirectorySizeCalculator.java index c1fb6e040c8b..97fc22ea83ad 100644 --- a/src/java/org/apache/cassandra/utils/DirectorySizeCalculator.java +++ b/src/java/org/apache/cassandra/utils/DirectorySizeCalculator.java @@ -18,7 +18,6 @@ package org.apache.cassandra.utils; -import java.io.File; import java.io.IOException; import java.nio.file.FileVisitResult; import java.nio.file.Path; @@ -28,6 +27,8 @@ /** * Walks directory recursively, summing up total contents of files within. */ +import org.apache.cassandra.io.util.File; + public class DirectorySizeCalculator extends SimpleFileVisitor { protected volatile long size = 0; diff --git a/src/java/org/apache/cassandra/utils/ExecutorUtils.java b/src/java/org/apache/cassandra/utils/ExecutorUtils.java index e26d776e0083..5bb841f32bdd 100644 --- a/src/java/org/apache/cassandra/utils/ExecutorUtils.java +++ b/src/java/org/apache/cassandra/utils/ExecutorUtils.java @@ -152,4 +152,4 @@ public static void shutdownNowAndWait(long timeout, TimeUnit unit, Object ... ex { shutdownNowAndWait(timeout, unit, Arrays.asList(executors)); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/utils/FBUtilities.java b/src/java/org/apache/cassandra/utils/FBUtilities.java index e3161e8e215d..93f951c53fff 100644 --- a/src/java/org/apache/cassandra/utils/FBUtilities.java +++ b/src/java/org/apache/cassandra/utils/FBUtilities.java @@ -17,7 +17,14 @@ */ package org.apache.cassandra.utils; -import java.io.*; + +import java.io.BufferedReader; +import java.io.ByteArrayOutputStream; +import java.io.DataOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.OutputStream; import java.lang.reflect.Field; import java.math.BigInteger; import java.net.*; @@ -38,6 +45,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Joiner; import com.google.common.base.Strings; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.utils.concurrent.*; import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; @@ -364,7 +372,7 @@ public static String resourceToFile(String filename) throws ConfigurationExcepti if (scpurl == null) throw new ConfigurationException("unable to locate " + filename); - return new File(scpurl.getFile()).getAbsolutePath(); + return new File(scpurl.getFile()).absolutePath(); } public static File cassandraTriggerDir() diff --git a/src/java/org/apache/cassandra/utils/HeapUtils.java b/src/java/org/apache/cassandra/utils/HeapUtils.java index 4dd0d46b43d2..c0910d87fc86 100644 --- a/src/java/org/apache/cassandra/utils/HeapUtils.java +++ b/src/java/org/apache/cassandra/utils/HeapUtils.java @@ -17,9 +17,13 @@ */ package org.apache.cassandra.utils; -import java.io.*; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; import java.lang.management.ManagementFactory; +import org.apache.cassandra.io.util.File; import org.apache.commons.lang3.ArrayUtils; import org.apache.commons.lang3.text.StrBuilder; @@ -81,14 +85,8 @@ private static String getJcmdPath() if (javaHome == null) return null; File javaBinDirectory = new File(javaHome, "bin"); - File[] files = javaBinDirectory.listFiles(new FilenameFilter() - { - public boolean accept(File dir, String name) - { - return name.startsWith("jcmd"); - } - }); - return ArrayUtils.isEmpty(files) ? null : files[0].getPath(); + File[] files = javaBinDirectory.tryList((dir, name) -> name.startsWith("jcmd")); + return ArrayUtils.isEmpty(files) ? null : files[0].path(); } /** diff --git a/src/java/org/apache/cassandra/utils/IndexedSearchIterator.java b/src/java/org/apache/cassandra/utils/IndexedSearchIterator.java index 597e5bbb2353..bd2f70a7417b 100644 --- a/src/java/org/apache/cassandra/utils/IndexedSearchIterator.java +++ b/src/java/org/apache/cassandra/utils/IndexedSearchIterator.java @@ -35,4 +35,4 @@ public interface IndexedSearchIterator extends SearchIterator * @throws java.util.NoSuchElementException if next() returned null */ public int indexOfCurrent(); -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/utils/JVMStabilityInspector.java b/src/java/org/apache/cassandra/utils/JVMStabilityInspector.java index 158baf2556ad..6470f1ff3d8b 100644 --- a/src/java/org/apache/cassandra/utils/JVMStabilityInspector.java +++ b/src/java/org/apache/cassandra/utils/JVMStabilityInspector.java @@ -19,6 +19,7 @@ import java.io.FileNotFoundException; import java.net.SocketException; +import java.nio.file.FileSystemException; import java.util.ArrayList; import java.util.Arrays; import java.util.List; @@ -141,7 +142,7 @@ else if (t instanceof UnrecoverableIllegalStateException) fn.accept(t); // Check for file handle exhaustion - if (t instanceof FileNotFoundException || t instanceof SocketException) + if (t instanceof FileNotFoundException || t instanceof FileSystemException || t instanceof SocketException) if (t.getMessage() != null && t.getMessage().contains("Too many open files")) isUnstable = true; diff --git a/src/java/org/apache/cassandra/utils/NativeLibrary.java b/src/java/org/apache/cassandra/utils/NativeLibrary.java index e5b5da7308ff..01225aa37e4f 100644 --- a/src/java/org/apache/cassandra/utils/NativeLibrary.java +++ b/src/java/org/apache/cassandra/utils/NativeLibrary.java @@ -17,14 +17,14 @@ */ package org.apache.cassandra.utils; -import java.io.File; import java.io.FileDescriptor; -import java.io.FileInputStream; import java.io.IOException; import java.lang.reflect.Field; import java.nio.channels.FileChannel; import java.util.concurrent.TimeUnit; +import org.apache.cassandra.io.util.File; +import org.apache.cassandra.io.util.FileInputStreamPlus; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -216,7 +216,7 @@ public static void trySkipCache(String path, long offset, long len) if (!f.exists()) return; - try (FileInputStream fis = new FileInputStream(f)) + try (FileInputStreamPlus fis = new FileInputStreamPlus(f)) { trySkipCache(getfd(fis.getChannel()), offset, len, path); } diff --git a/src/java/org/apache/cassandra/utils/ResourceWatcher.java b/src/java/org/apache/cassandra/utils/ResourceWatcher.java index 5e7cbdd4f3da..e8dcb8574372 100644 --- a/src/java/org/apache/cassandra/utils/ResourceWatcher.java +++ b/src/java/org/apache/cassandra/utils/ResourceWatcher.java @@ -17,9 +17,9 @@ */ package org.apache.cassandra.utils; -import java.io.File; import java.util.concurrent.TimeUnit; +import org.apache.cassandra.io.util.File; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/src/java/org/apache/cassandra/utils/SyncUtil.java b/src/java/org/apache/cassandra/utils/SyncUtil.java index b4a4bee2be4a..6055859531d1 100644 --- a/src/java/org/apache/cassandra/utils/SyncUtil.java +++ b/src/java/org/apache/cassandra/utils/SyncUtil.java @@ -30,6 +30,7 @@ import org.apache.cassandra.config.Config; import com.google.common.base.Preconditions; +import org.apache.cassandra.io.util.File; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -172,12 +173,6 @@ public static void force(FileChannel fc, boolean metaData) throws IOException } } - public static void sync(RandomAccessFile ras) throws IOException - { - Preconditions.checkNotNull(ras); - sync(ras.getFD()); - } - public static void sync(FileOutputStream fos) throws IOException { Preconditions.checkNotNull(fos); @@ -197,7 +192,7 @@ public static void trySyncDir(File dir) if (SKIP_SYNC) return; - int directoryFD = NativeLibrary.tryOpenDirectory(dir.getPath()); + int directoryFD = NativeLibrary.tryOpenDirectory(dir.path()); try { trySync(directoryFD); diff --git a/src/java/org/apache/cassandra/utils/Throwables.java b/src/java/org/apache/cassandra/utils/Throwables.java index 73e7d24ee5f7..7012132c0362 100644 --- a/src/java/org/apache/cassandra/utils/Throwables.java +++ b/src/java/org/apache/cassandra/utils/Throwables.java @@ -18,7 +18,7 @@ */ package org.apache.cassandra.utils; -import java.io.File; +import org.apache.cassandra.io.util.File; import java.io.IOException; import java.lang.reflect.InvocationTargetException; import java.util.Arrays; @@ -152,7 +152,7 @@ public static Throwable perform(Throwable accumulate, Iterator ... actions) { - perform(against.getPath(), opType, actions); + perform(against.path(), opType, actions); } @SafeVarargs diff --git a/src/java/org/apache/cassandra/utils/binlog/BinLog.java b/src/java/org/apache/cassandra/utils/binlog/BinLog.java index 8b8588afcaf4..a9bb55ac8d84 100644 --- a/src/java/org/apache/cassandra/utils/binlog/BinLog.java +++ b/src/java/org/apache/cassandra/utils/binlog/BinLog.java @@ -18,7 +18,6 @@ package org.apache.cassandra.utils.binlog; -import java.io.File; import java.nio.file.Path; import java.util.ArrayList; import java.util.Collections; @@ -31,6 +30,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.base.Strings; +import org.apache.cassandra.io.util.File; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -363,11 +363,11 @@ public static class Builder public Builder path(Path path) { Preconditions.checkNotNull(path, "path was null"); - File pathAsFile = path.toFile(); + File pathAsFile = new File(path); //Exists and is a directory or can be created Preconditions.checkArgument(!pathAsFile.toString().isEmpty(), "you might have forgotten to specify a directory to save logs"); - Preconditions.checkArgument((pathAsFile.exists() && pathAsFile.isDirectory()) || (!pathAsFile.exists() && pathAsFile.mkdirs()), "path exists and is not a directory or couldn't be created"); - Preconditions.checkArgument(pathAsFile.canRead() && pathAsFile.canWrite() && pathAsFile.canExecute(), "path is not readable, writable, and executable"); + Preconditions.checkArgument((pathAsFile.exists() && pathAsFile.isDirectory()) || (!pathAsFile.exists() && pathAsFile.tryCreateDirectories()), "path exists and is not a directory or couldn't be created"); + Preconditions.checkArgument(pathAsFile.isReadable() && pathAsFile.isWritable() && pathAsFile.isExecutable(), "path is not readable, writable, and executable"); this.path = path; return this; } @@ -432,7 +432,7 @@ public BinLog build(boolean cleanDirectory) logger.info("Cleaning directory: {} as requested", path); if (path.toFile().exists()) { - Throwable error = cleanDirectory(path.toFile(), null); + Throwable error = cleanDirectory(new File(path), null); if (error != null) { throw new RuntimeException(error); @@ -471,7 +471,7 @@ public static Throwable cleanDirectory(File directory, Throwable accumulate) { return Throwables.merge(accumulate, new RuntimeException(String.format("%s is not a directory", directory))); } - for (File f : directory.listFiles()) + for (File f : directory.tryList()) { accumulate = deleteRecursively(f, accumulate); } @@ -486,7 +486,7 @@ private static Throwable deleteRecursively(File fileOrDirectory, Throwable accum { if (fileOrDirectory.isDirectory()) { - for (File f : fileOrDirectory.listFiles()) + for (File f : fileOrDirectory.tryList()) { accumulate = FileUtils.deleteWithConfirm(f, accumulate); } diff --git a/src/java/org/apache/cassandra/utils/btree/LeafBTreeSearchIterator.java b/src/java/org/apache/cassandra/utils/btree/LeafBTreeSearchIterator.java index a23f460812b4..3c1991afd123 100644 --- a/src/java/org/apache/cassandra/utils/btree/LeafBTreeSearchIterator.java +++ b/src/java/org/apache/cassandra/utils/btree/LeafBTreeSearchIterator.java @@ -133,4 +133,4 @@ public int indexOfCurrent() int current = forwards ? nextPos - 1 : nextPos + 1; return forwards ? current - lowerBound : upperBound - current; } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/utils/btree/NodeCursor.java b/src/java/org/apache/cassandra/utils/btree/NodeCursor.java index e9fa89e9317f..4c7e9935181b 100644 --- a/src/java/org/apache/cassandra/utils/btree/NodeCursor.java +++ b/src/java/org/apache/cassandra/utils/btree/NodeCursor.java @@ -195,4 +195,4 @@ K value() { return (K) node[position]; } -} \ No newline at end of file +} diff --git a/src/java/org/apache/cassandra/utils/obs/OffHeapBitSet.java b/src/java/org/apache/cassandra/utils/obs/OffHeapBitSet.java index 8d118e7f300f..8b0550f278c3 100644 --- a/src/java/org/apache/cassandra/utils/obs/OffHeapBitSet.java +++ b/src/java/org/apache/cassandra/utils/obs/OffHeapBitSet.java @@ -17,7 +17,7 @@ */ package org.apache.cassandra.utils.obs; -import java.io.DataInputStream; +import java.io.*; import java.io.IOException; import com.google.common.annotations.VisibleForTesting; @@ -143,7 +143,7 @@ public long serializedSize() } @SuppressWarnings("resource") - public static OffHeapBitSet deserialize(DataInputStream in, boolean oldBfFormat) throws IOException + public static OffHeapBitSet deserialize(I in, boolean oldBfFormat) throws IOException { long byteCount = in.readInt() * 8L; Memory memory = Memory.allocate(byteCount); diff --git a/src/java/org/apache/cassandra/utils/streamhist/HistogramDataConsumer.java b/src/java/org/apache/cassandra/utils/streamhist/HistogramDataConsumer.java old mode 100755 new mode 100644 diff --git a/src/java/org/apache/cassandra/utils/streamhist/StreamingTombstoneHistogramBuilder.java b/src/java/org/apache/cassandra/utils/streamhist/StreamingTombstoneHistogramBuilder.java old mode 100755 new mode 100644 diff --git a/src/java/org/apache/cassandra/utils/streamhist/TombstoneHistogram.java b/src/java/org/apache/cassandra/utils/streamhist/TombstoneHistogram.java old mode 100755 new mode 100644 diff --git a/test/burn/org/apache/cassandra/net/GlobalInboundSettings.java b/test/burn/org/apache/cassandra/net/GlobalInboundSettings.java index 9b23041ae41c..da453fed4e30 100644 --- a/test/burn/org/apache/cassandra/net/GlobalInboundSettings.java +++ b/test/burn/org/apache/cassandra/net/GlobalInboundSettings.java @@ -54,4 +54,4 @@ GlobalInboundSettings withTemplate(InboundConnectionSettings template) { return new GlobalInboundSettings(queueCapacity, endpointReserveLimit, globalReserveLimit, template); } -} \ No newline at end of file +} diff --git a/test/burn/org/apache/cassandra/transport/DriverBurnTest.java b/test/burn/org/apache/cassandra/transport/DriverBurnTest.java index 56558672b9cf..7856b1b40d35 100644 --- a/test/burn/org/apache/cassandra/transport/DriverBurnTest.java +++ b/test/burn/org/apache/cassandra/transport/DriverBurnTest.java @@ -438,4 +438,4 @@ public int encodedSize(QueryMessage queryMessage, ProtocolVersion version) System.out.println("99p: " + stats.getPercentile(0.99)); } } -// TODO: test disconnecting and reconnecting constantly \ No newline at end of file +// TODO: test disconnecting and reconnecting constantly diff --git a/test/burn/org/apache/cassandra/transport/SimpleClientBurnTest.java b/test/burn/org/apache/cassandra/transport/SimpleClientBurnTest.java index 7e57916eb1a4..2d863cf02049 100644 --- a/test/burn/org/apache/cassandra/transport/SimpleClientBurnTest.java +++ b/test/burn/org/apache/cassandra/transport/SimpleClientBurnTest.java @@ -212,4 +212,4 @@ public int encodedSize(QueryMessage queryMessage, ProtocolVersion version) server.stop(); } -} \ No newline at end of file +} diff --git a/test/distributed/org/apache/cassandra/distributed/impl/AbstractCluster.java b/test/distributed/org/apache/cassandra/distributed/impl/AbstractCluster.java index 201555f70c2e..217dcd5d6c91 100644 --- a/test/distributed/org/apache/cassandra/distributed/impl/AbstractCluster.java +++ b/test/distributed/org/apache/cassandra/distributed/impl/AbstractCluster.java @@ -18,7 +18,6 @@ package org.apache.cassandra.distributed.impl; -import java.io.File; import java.lang.annotation.Annotation; import java.net.InetSocketAddress; import java.util.ArrayList; @@ -45,7 +44,6 @@ import javax.annotation.concurrent.GuardedBy; import com.google.common.collect.Sets; -import org.apache.cassandra.utils.concurrent.Condition; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -78,7 +76,10 @@ import org.apache.cassandra.distributed.shared.ShutdownException; import org.apache.cassandra.distributed.shared.Versions; import org.apache.cassandra.io.util.FileUtils; +import org.apache.cassandra.io.util.File; +import org.apache.cassandra.io.util.PathUtils; import org.apache.cassandra.net.Verb; +import org.apache.cassandra.utils.concurrent.Condition; import org.apache.cassandra.utils.FBUtilities; import org.reflections.Reflections; import org.reflections.util.ConfigurationBuilder; @@ -388,7 +389,7 @@ public String toString() protected AbstractCluster(AbstractBuilder, ?> builder) { - this.root = builder.getRoot(); + this.root = new File(builder.getRoot()); this.sharedClassLoader = builder.getSharedClassLoader(); this.subnet = builder.getSubnet(); this.tokenSupplier = builder.getTokenSupplier(); @@ -857,6 +858,7 @@ public void close() instances.clear(); instanceMap.clear(); + PathUtils.setDeletionListener(ignore -> {}); // Make sure to only delete directory when threads are stopped if (root.exists()) FileUtils.deleteRecursive(root); diff --git a/test/distributed/org/apache/cassandra/distributed/impl/FileLogAction.java b/test/distributed/org/apache/cassandra/distributed/impl/FileLogAction.java index 0f48a234fa94..14db56183da3 100644 --- a/test/distributed/org/apache/cassandra/distributed/impl/FileLogAction.java +++ b/test/distributed/org/apache/cassandra/distributed/impl/FileLogAction.java @@ -18,7 +18,6 @@ package org.apache.cassandra.distributed.impl; -import java.io.File; import java.io.FileNotFoundException; import java.io.IOException; import java.io.RandomAccessFile; @@ -28,6 +27,7 @@ import com.google.common.io.Closeables; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.utils.AbstractIterator; import org.apache.cassandra.distributed.api.LogAction; import org.apache.cassandra.distributed.api.LineIterator; @@ -53,7 +53,7 @@ public LineIterator match(long startPosition, Predicate fn) RandomAccessFile reader; try { - reader = new RandomAccessFile(file, "r"); + reader = new RandomAccessFile(file.toJavaIOFile(), "r"); } catch (FileNotFoundException e) { diff --git a/test/distributed/org/apache/cassandra/distributed/impl/Instance.java b/test/distributed/org/apache/cassandra/distributed/impl/Instance.java index c980f3f2040f..cd5bb4af8dce 100644 --- a/test/distributed/org/apache/cassandra/distributed/impl/Instance.java +++ b/test/distributed/org/apache/cassandra/distributed/impl/Instance.java @@ -20,7 +20,6 @@ import java.io.ByteArrayOutputStream; import java.io.Closeable; -import java.io.File; import java.io.IOException; import java.io.PrintStream; import java.net.InetSocketAddress; @@ -97,6 +96,7 @@ import org.apache.cassandra.io.util.DataInputBuffer; import org.apache.cassandra.io.util.DataOutputBuffer; import org.apache.cassandra.io.util.DataOutputPlus; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.io.util.FileUtils; import org.apache.cassandra.locator.InetAddressAndPort; import org.apache.cassandra.metrics.CassandraMetricsRegistry; @@ -200,7 +200,7 @@ public LogAction logs() if (!f.exists()) f = new File(String.format("build/test/logs/%s/%s/%s/system.log", tag, clusterId, instanceId)); if (!f.exists()) - throw new AssertionError("Unable to locate system.log under " + new File("build/test/logs").getAbsolutePath() + "; make sure ICluster.setup() is called or extend TestBaseImpl and do not define a static beforeClass function with @BeforeClass"); + throw new AssertionError("Unable to locate system.log under " + new File("build/test/logs").absolutePath() + "; make sure ICluster.setup() is called or extend TestBaseImpl and do not define a static beforeClass function with @BeforeClass"); return new FileLogAction(f); } @@ -606,11 +606,11 @@ public void startup(ICluster cluster) private void mkdirs() { - new File(config.getString("saved_caches_directory")).mkdirs(); - new File(config.getString("hints_directory")).mkdirs(); - new File(config.getString("commitlog_directory")).mkdirs(); + new File(config.getString("saved_caches_directory")).tryCreateDirectories(); + new File(config.getString("hints_directory")).tryCreateDirectories(); + new File(config.getString("commitlog_directory")).tryCreateDirectories(); for (String dir : (String[]) config.get("data_file_directories")) - new File(dir).mkdirs(); + new File(dir).tryCreateDirectories(); } private Config loadConfig(IInstanceConfig overrides) diff --git a/test/distributed/org/apache/cassandra/distributed/impl/InstanceConfig.java b/test/distributed/org/apache/cassandra/distributed/impl/InstanceConfig.java index 1bbdd0be1ee3..81e254d3e5b2 100644 --- a/test/distributed/org/apache/cassandra/distributed/impl/InstanceConfig.java +++ b/test/distributed/org/apache/cassandra/distributed/impl/InstanceConfig.java @@ -18,7 +18,6 @@ package org.apache.cassandra.distributed.impl; -import java.io.File; import java.net.InetSocketAddress; import java.net.UnknownHostException; import java.util.Collections; @@ -37,6 +36,7 @@ import org.apache.cassandra.distributed.shared.NetworkTopology; import org.apache.cassandra.distributed.shared.Shared; import org.apache.cassandra.distributed.upgrade.UpgradeTestBase; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.locator.InetAddressAndPort; import org.apache.cassandra.locator.SimpleSeedProvider; @@ -275,7 +275,7 @@ public static InstanceConfig generate(int nodeNum, private static String[] datadirs(int datadirCount, File root, int nodeNum) { - String datadirFormat = String.format("%s/node%d/data%%d", root.getPath(), nodeNum); + String datadirFormat = String.format("%s/node%d/data%%d", root.path(), nodeNum); String [] datadirs = new String[datadirCount]; for (int i = 0; i < datadirs.length; i++) datadirs[i] = String.format(datadirFormat, i); diff --git a/test/distributed/org/apache/cassandra/distributed/shared/Byteman.java b/test/distributed/org/apache/cassandra/distributed/shared/Byteman.java index bc27ec765e21..b4dd10c3f46f 100644 --- a/test/distributed/org/apache/cassandra/distributed/shared/Byteman.java +++ b/test/distributed/org/apache/cassandra/distributed/shared/Byteman.java @@ -18,7 +18,6 @@ package org.apache.cassandra.distributed.shared; -import java.io.File; import java.io.IOException; import java.io.UncheckedIOException; import java.lang.reflect.Method; @@ -42,6 +41,7 @@ import com.google.common.base.StandardSystemProperty; import com.google.common.io.ByteStreams; import com.google.common.io.Files; +import org.apache.cassandra.io.util.File; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -96,7 +96,7 @@ public static Byteman createFromScripts(String... scripts) List texts = Stream.of(scripts).map(p -> { try { - return Files.toString(new File(p), StandardCharsets.UTF_8); + return Files.toString(new File(p).toJavaIOFile(), StandardCharsets.UTF_8); } catch (IOException e) { @@ -155,11 +155,11 @@ public void install(ClassLoader cl) if (DEBUG_TRANSFORMATIONS) { File f = new File(StandardSystemProperty.JAVA_IO_TMPDIR.value(), "byteman/" + details.klassPath + ".class"); - f.getParentFile().mkdirs(); - File original = new File(f.getParentFile(), "original-" + f.getName()); - logger.info("Writing class file for {} to {}", details.klassPath, f.getAbsolutePath()); - Files.asByteSink(f).write(newBytes); - Files.asByteSink(original).write(details.bytes); + f.parent().tryCreateDirectories(); + File original = new File(f.parent(), "original-" + f.name()); + logger.info("Writing class file for {} to {}", details.klassPath, f.absolutePath()); + Files.asByteSink(f.toJavaIOFile()).write(newBytes); + Files.asByteSink(original.toJavaIOFile()).write(details.bytes); } } } diff --git a/test/distributed/org/apache/cassandra/distributed/shared/ClusterUtils.java b/test/distributed/org/apache/cassandra/distributed/shared/ClusterUtils.java index 382f5a7f968b..1821f9c87807 100644 --- a/test/distributed/org/apache/cassandra/distributed/shared/ClusterUtils.java +++ b/test/distributed/org/apache/cassandra/distributed/shared/ClusterUtils.java @@ -18,7 +18,6 @@ package org.apache.cassandra.distributed.shared; -import java.io.File; import java.lang.reflect.Field; import java.net.InetSocketAddress; import java.util.ArrayList; @@ -40,6 +39,7 @@ import java.util.stream.Collectors; import com.google.common.util.concurrent.Futures; +import org.apache.cassandra.io.util.File; import org.junit.Assert; import org.apache.cassandra.dht.Token; diff --git a/test/distributed/org/apache/cassandra/distributed/shared/ShutdownException.java b/test/distributed/org/apache/cassandra/distributed/shared/ShutdownException.java index d2b5bf7d35cf..973835704c47 100644 --- a/test/distributed/org/apache/cassandra/distributed/shared/ShutdownException.java +++ b/test/distributed/org/apache/cassandra/distributed/shared/ShutdownException.java @@ -27,4 +27,4 @@ public ShutdownException(List uncaughtExceptions) super("Uncaught exceptions were thrown during test"); uncaughtExceptions.forEach(super::addSuppressed); } -} \ No newline at end of file +} diff --git a/test/distributed/org/apache/cassandra/distributed/test/FrozenUDTTest.java b/test/distributed/org/apache/cassandra/distributed/test/FrozenUDTTest.java index 2a45b86dc9e8..3b54398406d6 100644 --- a/test/distributed/org/apache/cassandra/distributed/test/FrozenUDTTest.java +++ b/test/distributed/org/apache/cassandra/distributed/test/FrozenUDTTest.java @@ -150,4 +150,4 @@ private String json(int i, int j) { return String.format("system.fromjson('{\"foo\":\"%d\", \"bar\":\"%d\"}')", i, j); } -} \ No newline at end of file +} diff --git a/test/distributed/org/apache/cassandra/distributed/test/LargeColumnTest.java b/test/distributed/org/apache/cassandra/distributed/test/LargeColumnTest.java index 0a81359dcff9..e1733ce00eff 100644 --- a/test/distributed/org/apache/cassandra/distributed/test/LargeColumnTest.java +++ b/test/distributed/org/apache/cassandra/distributed/test/LargeColumnTest.java @@ -94,4 +94,4 @@ public void test() throws Throwable { testLargeColumns(2, 16 << 20, 5); } -} \ No newline at end of file +} diff --git a/test/distributed/org/apache/cassandra/distributed/test/MessageFiltersTest.java b/test/distributed/org/apache/cassandra/distributed/test/MessageFiltersTest.java index 6ea186c6eec5..9cae1bdc4ff1 100644 --- a/test/distributed/org/apache/cassandra/distributed/test/MessageFiltersTest.java +++ b/test/distributed/org/apache/cassandra/distributed/test/MessageFiltersTest.java @@ -335,4 +335,4 @@ private static void assertTimeOut(Runnable r) // ignore } } -} \ No newline at end of file +} diff --git a/test/distributed/org/apache/cassandra/distributed/test/MessageForwardingTest.java b/test/distributed/org/apache/cassandra/distributed/test/MessageForwardingTest.java index 153d7de70647..f92a3157e8e3 100644 --- a/test/distributed/org/apache/cassandra/distributed/test/MessageForwardingTest.java +++ b/test/distributed/org/apache/cassandra/distributed/test/MessageForwardingTest.java @@ -100,4 +100,4 @@ else if (traceEntry.activity.contains("Enqueuing forwarded write to ")) TracingUtil.setWaitForTracingEventTimeoutSecs(originalTraceTimeout); } } -} \ No newline at end of file +} diff --git a/test/distributed/org/apache/cassandra/distributed/test/NativeProtocolTest.java b/test/distributed/org/apache/cassandra/distributed/test/NativeProtocolTest.java index 0905b92f538f..d2febdb6b297 100644 --- a/test/distributed/org/apache/cassandra/distributed/test/NativeProtocolTest.java +++ b/test/distributed/org/apache/cassandra/distributed/test/NativeProtocolTest.java @@ -78,4 +78,4 @@ public void withCounters() throws Throwable cluster.close(); } } -} \ No newline at end of file +} diff --git a/test/distributed/org/apache/cassandra/distributed/test/NetstatsRepairStreamingTest.java b/test/distributed/org/apache/cassandra/distributed/test/NetstatsRepairStreamingTest.java index 5f74c7773b9a..36bde630a795 100644 --- a/test/distributed/org/apache/cassandra/distributed/test/NetstatsRepairStreamingTest.java +++ b/test/distributed/org/apache/cassandra/distributed/test/NetstatsRepairStreamingTest.java @@ -85,4 +85,4 @@ private void executeTest(boolean compressionEnabled) throws Exception NetstatsOutputParser.validate(NetstatsOutputParser.parse(results)); } } -} \ No newline at end of file +} diff --git a/test/distributed/org/apache/cassandra/distributed/test/ReadRepairEmptyRangeTombstonesTest.java b/test/distributed/org/apache/cassandra/distributed/test/ReadRepairEmptyRangeTombstonesTest.java index c07b128e89cb..44ee07ebdc62 100644 --- a/test/distributed/org/apache/cassandra/distributed/test/ReadRepairEmptyRangeTombstonesTest.java +++ b/test/distributed/org/apache/cassandra/distributed/test/ReadRepairEmptyRangeTombstonesTest.java @@ -286,4 +286,4 @@ else if (reverse) return this; } } -} \ No newline at end of file +} diff --git a/test/distributed/org/apache/cassandra/distributed/test/ReadRepairTester.java b/test/distributed/org/apache/cassandra/distributed/test/ReadRepairTester.java index c3a36cbb58d2..a73d9687d36c 100644 --- a/test/distributed/org/apache/cassandra/distributed/test/ReadRepairTester.java +++ b/test/distributed/org/apache/cassandra/distributed/test/ReadRepairTester.java @@ -159,4 +159,4 @@ static long readRepairRequestsCount(IInvokableInstance node, String table) return cfs.metric.readRepairRequests.getCount(); }); } -} \ No newline at end of file +} diff --git a/test/distributed/org/apache/cassandra/distributed/test/ReplicaFilteringProtectionTest.java b/test/distributed/org/apache/cassandra/distributed/test/ReplicaFilteringProtectionTest.java index f891dfee5c60..2e39f18ad0bb 100644 --- a/test/distributed/org/apache/cassandra/distributed/test/ReplicaFilteringProtectionTest.java +++ b/test/distributed/org/apache/cassandra/distributed/test/ReplicaFilteringProtectionTest.java @@ -241,4 +241,4 @@ private long rowsCachedPerQueryCount(IInvokableInstance instance, String tableNa .getColumnFamilyStore(tableName) .metric.rfpRowsCachedPerQuery.getCount()); } -} \ No newline at end of file +} diff --git a/test/distributed/org/apache/cassandra/distributed/test/ResourceLeakTest.java b/test/distributed/org/apache/cassandra/distributed/test/ResourceLeakTest.java index 5430800c02aa..8358c2e27aed 100644 --- a/test/distributed/org/apache/cassandra/distributed/test/ResourceLeakTest.java +++ b/test/distributed/org/apache/cassandra/distributed/test/ResourceLeakTest.java @@ -18,7 +18,6 @@ package org.apache.cassandra.distributed.test; -import java.io.File; import java.io.IOException; import java.lang.management.ManagementFactory; import java.nio.file.FileSystems; @@ -29,6 +28,7 @@ import java.util.function.Consumer; import javax.management.MBeanServer; +import org.apache.cassandra.io.util.File; import org.junit.Ignore; import org.junit.Test; @@ -123,7 +123,7 @@ static void dumpOpenFiles(String description) throws IOException, InterruptedExc long pid = getProcessId(); ProcessBuilder map = new ProcessBuilder("/usr/sbin/lsof", "-p", Long.toString(pid)); File output = new File(outputFilename("lsof", description, ".txt")); - map.redirectOutput(output); + map.redirectOutput(output.toJavaIOFile()); map.redirectErrorStream(true); map.start().waitFor(); } diff --git a/test/distributed/org/apache/cassandra/distributed/test/ShortReadProtectionTest.java b/test/distributed/org/apache/cassandra/distributed/test/ShortReadProtectionTest.java index 69b074f5a1b4..2e26659243a6 100644 --- a/test/distributed/org/apache/cassandra/distributed/test/ShortReadProtectionTest.java +++ b/test/distributed/org/apache/cassandra/distributed/test/ShortReadProtectionTest.java @@ -544,4 +544,4 @@ private void dropTable() cluster.schemaChange(format("DROP TABLE IF EXISTS %s")); } } -} \ No newline at end of file +} diff --git a/test/distributed/org/apache/cassandra/distributed/test/ring/BootstrapTest.java b/test/distributed/org/apache/cassandra/distributed/test/ring/BootstrapTest.java index 81861abc80c1..fd62d30e3ba5 100644 --- a/test/distributed/org/apache/cassandra/distributed/test/ring/BootstrapTest.java +++ b/test/distributed/org/apache/cassandra/distributed/test/ring/BootstrapTest.java @@ -120,4 +120,4 @@ public static Map count(ICluster cluster) .collect(Collectors.toMap(nodeId -> nodeId, nodeId -> (Long) cluster.get(nodeId).executeInternal("SELECT count(*) FROM " + KEYSPACE + ".tbl")[0][0])); } -} \ No newline at end of file +} diff --git a/test/distributed/org/apache/cassandra/distributed/test/ring/CommunicationDuringDecommissionTest.java b/test/distributed/org/apache/cassandra/distributed/test/ring/CommunicationDuringDecommissionTest.java index d6825d9adac5..848318c95dfa 100644 --- a/test/distributed/org/apache/cassandra/distributed/test/ring/CommunicationDuringDecommissionTest.java +++ b/test/distributed/org/apache/cassandra/distributed/test/ring/CommunicationDuringDecommissionTest.java @@ -73,4 +73,4 @@ public void internodeConnectionsDuringDecom() throws Throwable } } } -} \ No newline at end of file +} diff --git a/test/distributed/org/apache/cassandra/distributed/test/ring/PendingWritesTest.java b/test/distributed/org/apache/cassandra/distributed/test/ring/PendingWritesTest.java index 8daa58adedf1..2e702b219b05 100644 --- a/test/distributed/org/apache/cassandra/distributed/test/ring/PendingWritesTest.java +++ b/test/distributed/org/apache/cassandra/distributed/test/ring/PendingWritesTest.java @@ -106,4 +106,4 @@ public void testPendingWrites() throws Throwable Assert.assertEquals("Node " + e.getKey() + " has incorrect row state", e.getValue().longValue(), 150L); } } -} \ No newline at end of file +} diff --git a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeReadRepairWriteTest.java b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeReadRepairWriteTest.java index 4e3074d67100..fcb04824c2fd 100644 --- a/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeReadRepairWriteTest.java +++ b/test/distributed/org/apache/cassandra/distributed/upgrade/MixedModeReadRepairWriteTest.java @@ -100,4 +100,4 @@ public void mixedModeReadRepairUpdate() throws Throwable }) .run(); } -} \ No newline at end of file +} diff --git a/test/long/org/apache/cassandra/cql3/CorruptionTest.java b/test/long/org/apache/cassandra/cql3/CorruptionTest.java index f2ed36a6f937..8068a2df8b14 100644 --- a/test/long/org/apache/cassandra/cql3/CorruptionTest.java +++ b/test/long/org/apache/cassandra/cql3/CorruptionTest.java @@ -18,7 +18,6 @@ package org.apache.cassandra.cql3; -import java.io.File; import java.io.FileWriter; import java.io.IOException; import java.nio.ByteBuffer; @@ -28,6 +27,7 @@ import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; +import org.apache.cassandra.io.util.File; import org.junit.BeforeClass; import org.junit.Test; @@ -145,10 +145,10 @@ private void dumpKeys(byte[] putdata, byte[] getdata) throws IOException { String basename = "bad-data-tid" + Thread.currentThread().getId(); File put = new File(basename+"-put"); File get = new File(basename+"-get"); - try(FileWriter pw = new FileWriter(put)) { + try(FileWriter pw = new FileWriter(put.toJavaIOFile())) { pw.write(new String(putdata)); } - try(FileWriter pw = new FileWriter(get)) { + try(FileWriter pw = new FileWriter(get.toJavaIOFile())) { pw.write(new String(getdata)); } } diff --git a/test/long/org/apache/cassandra/db/commitlog/CommitLogStressTest.java b/test/long/org/apache/cassandra/db/commitlog/CommitLogStressTest.java index a4f98e9945d2..93faf252e184 100644 --- a/test/long/org/apache/cassandra/db/commitlog/CommitLogStressTest.java +++ b/test/long/org/apache/cassandra/db/commitlog/CommitLogStressTest.java @@ -30,6 +30,8 @@ import com.google.common.util.concurrent.RateLimiter; +import org.apache.cassandra.io.util.File; +import org.apache.cassandra.io.util.FileInputStreamPlus; import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; @@ -101,7 +103,7 @@ public CommitLogStressTest(ParameterizedClass commitLogCompression, EncryptionCo @BeforeClass static public void initialize() throws IOException { - try (FileInputStream fis = new FileInputStream("CHANGES.txt")) + try (FileInputStreamPlus fis = new FileInputStreamPlus("CHANGES.txt")) { dataSource = ByteBuffer.allocateDirect((int) fis.getChannel().size()); while (dataSource.hasRemaining()) @@ -123,15 +125,15 @@ public void cleanDir() throws IOException File dir = new File(location); if (dir.isDirectory()) { - File[] files = dir.listFiles(); + File[] files = dir.tryList(); for (File f : files) - if (!f.delete()) + if (!f.tryDelete()) Assert.fail("Failed to delete " + f); } else { - dir.mkdir(); + dir.tryCreateDirectory(); } } @@ -245,13 +247,13 @@ private void testLog(CommitLog commitLog) throws IOException, InterruptedExcepti System.out.println("Stopped. Replaying... "); System.out.flush(); Reader reader = new Reader(); - File[] files = new File(location).listFiles(); + File[] files = new File(location).tryList(); DummyHandler handler = new DummyHandler(); reader.readAllFiles(handler, files); for (File f : files) - if (!f.delete()) + if (!f.tryDelete()) Assert.fail("Failed to delete " + f); if (hash == reader.hash && cells == reader.cells) @@ -278,7 +280,7 @@ private void verifySizes(CommitLog commitLog) commitLog.segmentManager.awaitManagementTasksCompletion(); long combinedSize = 0; - for (File f : new File(commitLog.segmentManager.storageDirectory).listFiles()) + for (File f : new File(commitLog.segmentManager.storageDirectory).tryList()) combinedSize += f.length(); Assert.assertEquals(combinedSize, commitLog.getActiveOnDiskSize()); diff --git a/test/long/org/apache/cassandra/dht/tokenallocator/AbstractReplicationAwareTokenAllocatorTest.java b/test/long/org/apache/cassandra/dht/tokenallocator/AbstractReplicationAwareTokenAllocatorTest.java index 5f9aa31467af..420981d19d99 100644 --- a/test/long/org/apache/cassandra/dht/tokenallocator/AbstractReplicationAwareTokenAllocatorTest.java +++ b/test/long/org/apache/cassandra/dht/tokenallocator/AbstractReplicationAwareTokenAllocatorTest.java @@ -540,4 +540,4 @@ private void updateSummary(ReplicationAwareTokenAllocator t, Summary su, S System.out.format("Worst intermediate unit\t%s token %s\n", su, st); } } -} \ No newline at end of file +} diff --git a/test/long/org/apache/cassandra/hints/HintsWriteThenReadTest.java b/test/long/org/apache/cassandra/hints/HintsWriteThenReadTest.java index 24a9a78a7abf..8b43c8e076a8 100644 --- a/test/long/org/apache/cassandra/hints/HintsWriteThenReadTest.java +++ b/test/long/org/apache/cassandra/hints/HintsWriteThenReadTest.java @@ -17,7 +17,6 @@ */ package org.apache.cassandra.hints; -import java.io.File; import java.io.IOException; import java.io.InputStream; import java.nio.ByteBuffer; @@ -29,6 +28,7 @@ import com.google.common.collect.Iterables; +import org.apache.cassandra.io.util.File; import org.junit.Test; import org.apache.cassandra.SchemaLoader; @@ -63,7 +63,7 @@ public void testWriteReadCycle() throws IOException HintsDescriptor descriptor = new HintsDescriptor(UUID.randomUUID(), System.currentTimeMillis()); - File directory = Files.createTempDirectory(null).toFile(); + File directory = new File(Files.createTempDirectory(null)); try { testWriteReadCycle(directory, descriptor); diff --git a/test/long/org/apache/cassandra/io/compress/CompressorPerformance.java b/test/long/org/apache/cassandra/io/compress/CompressorPerformance.java index 19a8ec36f2da..9499401026e2 100644 --- a/test/long/org/apache/cassandra/io/compress/CompressorPerformance.java +++ b/test/long/org/apache/cassandra/io/compress/CompressorPerformance.java @@ -20,6 +20,8 @@ */ package org.apache.cassandra.io.compress; +import org.apache.cassandra.io.util.FileInputStreamPlus; + import java.io.FileInputStream; import java.io.IOException; import java.nio.ByteBuffer; @@ -116,7 +118,7 @@ private static void testPerformance(ICompressor compressor, BufferType in, Buffe public static void main(String[] args) throws IOException { - try (FileInputStream fis = new FileInputStream("CHANGES.txt")) + try (FileInputStreamPlus fis = new FileInputStreamPlus("CHANGES.txt")) { int len = (int)fis.getChannel().size(); dataSource = ByteBuffer.allocateDirect(len); diff --git a/test/long/org/apache/cassandra/io/sstable/CQLSSTableWriterLongTest.java b/test/long/org/apache/cassandra/io/sstable/CQLSSTableWriterLongTest.java index a6f428a22396..f2bbfa6a6023 100644 --- a/test/long/org/apache/cassandra/io/sstable/CQLSSTableWriterLongTest.java +++ b/test/long/org/apache/cassandra/io/sstable/CQLSSTableWriterLongTest.java @@ -18,13 +18,13 @@ package org.apache.cassandra.io.sstable; -import java.io.File; import java.util.ArrayList; import java.util.List; import java.util.Random; import com.google.common.io.Files; +import org.apache.cassandra.io.util.File; import org.junit.BeforeClass; import org.junit.Test; import org.apache.cassandra.SchemaLoader; @@ -46,9 +46,9 @@ public void testWideRow() throws Exception String TABLE = "table1"; int size = 30000; - File tempdir = Files.createTempDir(); - File dataDir = new File(tempdir.getAbsolutePath() + File.separator + KS + File.separator + TABLE); - assert dataDir.mkdirs(); + File tempdir = new File(Files.createTempDir()); + File dataDir = new File(tempdir.absolutePath() + File.pathSeparator() + KS + File.pathSeparator() + TABLE); + assert dataDir.tryCreateDirectories(); StringBuilder schemaColumns = new StringBuilder(); StringBuilder queryColumns = new StringBuilder(); diff --git a/test/long/org/apache/cassandra/streaming/LongStreamingTest.java b/test/long/org/apache/cassandra/streaming/LongStreamingTest.java index 186476078cf4..c4259fbbc070 100644 --- a/test/long/org/apache/cassandra/streaming/LongStreamingTest.java +++ b/test/long/org/apache/cassandra/streaming/LongStreamingTest.java @@ -18,12 +18,12 @@ package org.apache.cassandra.streaming; -import java.io.File; import java.io.IOException; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import com.google.common.io.Files; +import org.apache.cassandra.io.util.File; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; @@ -80,9 +80,9 @@ private void testStream(boolean useSstableCompression) throws InvalidRequestExce String KS = useSstableCompression ? "sstable_compression_ks" : "stream_compression_ks"; String TABLE = "table1"; - File tempdir = Files.createTempDir(); - File dataDir = new File(tempdir.getAbsolutePath() + File.separator + KS + File.separator + TABLE); - assert dataDir.mkdirs(); + File tempdir = new File(Files.createTempDir()); + File dataDir = new File(tempdir.absolutePath() + File.pathSeparator() + KS + File.pathSeparator() + TABLE); + assert dataDir.tryCreateDirectories(); String schema = "CREATE TABLE " + KS + '.' + TABLE + " (" + " k int PRIMARY KEY," @@ -108,11 +108,11 @@ private void testStream(boolean useSstableCompression) throws InvalidRequestExce writer.close(); System.err.println(String.format("Writer finished after %d seconds....", TimeUnit.NANOSECONDS.toSeconds(nanoTime() - start))); - File[] dataFiles = dataDir.listFiles((dir, name) -> name.endsWith("-Data.db")); + File[] dataFiles = dataDir.tryList((dir, name) -> name.endsWith("-Data.db")); long dataSize = 0l; for (File file : dataFiles) { - System.err.println("File : "+file.getAbsolutePath()); + System.err.println("File : "+file.absolutePath()); dataSize += file.length(); } diff --git a/test/microbench/org/apache/cassandra/test/microbench/BloomFilterSerializerBench.java b/test/microbench/org/apache/cassandra/test/microbench/BloomFilterSerializerBench.java index 922281145f52..32e048dca7b2 100644 --- a/test/microbench/org/apache/cassandra/test/microbench/BloomFilterSerializerBench.java +++ b/test/microbench/org/apache/cassandra/test/microbench/BloomFilterSerializerBench.java @@ -18,18 +18,16 @@ package org.apache.cassandra.test.microbench; -import java.io.DataInputStream; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; +import org.apache.cassandra.io.util.DataOutputStreamPlus; import java.io.IOException; import java.nio.ByteBuffer; import java.util.concurrent.TimeUnit; import org.apache.cassandra.db.BufferDecoratedKey; import org.apache.cassandra.dht.Murmur3Partitioner; -import org.apache.cassandra.io.util.BufferedDataOutputStreamPlus; -import org.apache.cassandra.io.util.DataOutputStreamPlus; +import org.apache.cassandra.io.util.File; +import org.apache.cassandra.io.util.FileInputStreamPlus; +import org.apache.cassandra.io.util.FileOutputStreamPlus; import org.apache.cassandra.io.util.FileUtils; import org.apache.cassandra.utils.BloomFilter; import org.apache.cassandra.utils.BloomFilterSerializer; @@ -77,7 +75,7 @@ public void serializationTest() throws IOException { BloomFilter filter = (BloomFilter) FilterFactory.getFilter(numElemsInK * 1024, 0.01d); filter.add(wrap(testVal)); - DataOutputStreamPlus out = new BufferedDataOutputStreamPlus(new FileOutputStream(file)); + DataOutputStreamPlus out = new FileOutputStreamPlus(file); if (oldBfFormat) SerializationsTest.serializeOldBfFormat(filter, out); else @@ -85,14 +83,14 @@ public void serializationTest() throws IOException out.close(); filter.close(); - DataInputStream in = new DataInputStream(new FileInputStream(file)); + FileInputStreamPlus in = new FileInputStreamPlus(file); BloomFilter filter2 = BloomFilterSerializer.deserialize(in, oldBfFormat); FileUtils.closeQuietly(in); filter2.close(); } finally { - file.delete(); + file.tryDelete(); } } diff --git a/test/microbench/org/apache/cassandra/test/microbench/CompactionBench.java b/test/microbench/org/apache/cassandra/test/microbench/CompactionBench.java index 41220a2a655c..a745054b4209 100644 --- a/test/microbench/org/apache/cassandra/test/microbench/CompactionBench.java +++ b/test/microbench/org/apache/cassandra/test/microbench/CompactionBench.java @@ -19,7 +19,6 @@ package org.apache.cassandra.test.microbench; -import java.io.File; import java.io.IOException; import java.util.List; import java.util.concurrent.*; @@ -29,6 +28,7 @@ import org.apache.cassandra.db.Directories; import org.apache.cassandra.db.Keyspace; import org.apache.cassandra.io.sstable.Descriptor; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.io.util.FileUtils; import org.openjdk.jmh.annotations.*; @@ -108,7 +108,7 @@ public void resetSnapshot() for (File file : directories) { - for (File f : file.listFiles()) + for (File f : file.tryList()) { if (f.isDirectory()) continue; @@ -119,7 +119,7 @@ public void resetSnapshot() for (File file : snapshotFiles) - FileUtils.createHardLink(file, new File(file.toPath().getParent().getParent().getParent().toFile(), file.getName())); + FileUtils.createHardLink(file, new File(new File(file.toPath().getParent().getParent().getParent()), file.name())); cfs.loadNewSSTables(); } diff --git a/test/microbench/org/apache/cassandra/test/microbench/DirectorySizerBench.java b/test/microbench/org/apache/cassandra/test/microbench/DirectorySizerBench.java index 7272c1f39ec8..a5b5fdd89b95 100644 --- a/test/microbench/org/apache/cassandra/test/microbench/DirectorySizerBench.java +++ b/test/microbench/org/apache/cassandra/test/microbench/DirectorySizerBench.java @@ -18,13 +18,13 @@ package org.apache.cassandra.test.microbench; -import java.io.File; import java.io.IOException; import java.io.PrintWriter; import java.nio.file.Files; import java.util.UUID; import java.util.concurrent.TimeUnit; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.io.util.FileUtils; import org.apache.cassandra.utils.DirectorySizeCalculator; import org.openjdk.jmh.annotations.*; @@ -45,7 +45,7 @@ public class DirectorySizerBench @Setup(Level.Trial) public void setUp() throws IOException { - tempDir = Files.createTempDirectory(randString()).toFile(); + tempDir = new File(Files.createTempDirectory(randString())); // Since #'s on laptops and commodity desktops are so useful in considering enterprise virtualized server environments... @@ -84,7 +84,7 @@ private void populateRandomFiles(File dir, int count) throws IOException { for (int i = 0; i < count; i++) { - PrintWriter pw = new PrintWriter(dir + File.separator + randString(), "UTF-8"); + PrintWriter pw = new PrintWriter(dir + File.pathSeparator() + randString(), "UTF-8"); pw.write(randString()); pw.close(); } diff --git a/test/microbench/org/apache/cassandra/test/microbench/OutputStreamBench.java b/test/microbench/org/apache/cassandra/test/microbench/OutputStreamBench.java index cd15646471b1..890f74c43a52 100644 --- a/test/microbench/org/apache/cassandra/test/microbench/OutputStreamBench.java +++ b/test/microbench/org/apache/cassandra/test/microbench/OutputStreamBench.java @@ -288,4 +288,4 @@ public void testRLargeStringBDOSP() throws IOException { public void testRLargeLegacyWriteUTF() throws IOException { BufferedDataOutputStreamTest.writeUTFLegacy(large, hole); } -} \ No newline at end of file +} diff --git a/test/microbench/org/apache/cassandra/test/microbench/StreamingTombstoneHistogramBuilderBench.java b/test/microbench/org/apache/cassandra/test/microbench/StreamingTombstoneHistogramBuilderBench.java old mode 100755 new mode 100644 diff --git a/test/unit/org/apache/cassandra/AbstractSerializationsTester.java b/test/unit/org/apache/cassandra/AbstractSerializationsTester.java index 3611f0e639b2..f5cdd5a0a4e8 100644 --- a/test/unit/org/apache/cassandra/AbstractSerializationsTester.java +++ b/test/unit/org/apache/cassandra/AbstractSerializationsTester.java @@ -20,20 +20,15 @@ package org.apache.cassandra; import org.apache.cassandra.io.IVersionedSerializer; -import org.apache.cassandra.io.util.DataInputPlus; -import org.apache.cassandra.io.util.DataInputPlus.DataInputStreamPlus; -import org.apache.cassandra.io.util.DataOutputBuffer; -import org.apache.cassandra.io.util.DataOutputStreamPlus; -import org.apache.cassandra.io.util.BufferedDataOutputStreamPlus; +import org.apache.cassandra.io.util.*; import org.apache.cassandra.net.MessagingService; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; import java.io.IOException; import java.util.HashMap; import java.util.Map; +import org.apache.cassandra.io.util.File; + public class AbstractSerializationsTester { protected static final String CUR_VER = System.getProperty("cassandra.version", "4.0"); @@ -57,16 +52,16 @@ protected void testSerializedSize(T obj, IVersionedSerializer serializer) assert out.getLength() == serializer.serializedSize(obj, getVersion()); } - protected static DataInputStreamPlus getInput(String name) throws IOException + protected static FileInputStreamPlus getInput(String name) throws IOException { return getInput(CUR_VER, name); } - protected static DataInputStreamPlus getInput(String version, String name) throws IOException + protected static FileInputStreamPlus getInput(String version, String name) throws IOException { File f = new File("test/data/serialization/" + version + '/' + name); - assert f.exists() : f.getPath(); - return new DataInputPlus.DataInputStreamPlus(new FileInputStream(f)); + assert f.exists() : f.path(); + return new FileInputStreamPlus(f); } @SuppressWarnings("resource") @@ -79,7 +74,7 @@ protected static DataOutputStreamPlus getOutput(String name) throws IOException protected static DataOutputStreamPlus getOutput(String version, String name) throws IOException { File f = new File("test/data/serialization/" + version + '/' + name); - f.getParentFile().mkdirs(); - return new BufferedDataOutputStreamPlus(new FileOutputStream(f).getChannel()); + f.parent().tryCreateDirectories(); + return new FileOutputStreamPlus(f); } } diff --git a/test/unit/org/apache/cassandra/CassandraBriefJUnitResultFormatter.java b/test/unit/org/apache/cassandra/CassandraBriefJUnitResultFormatter.java index a6c5997fae45..88dbc52941f5 100644 --- a/test/unit/org/apache/cassandra/CassandraBriefJUnitResultFormatter.java +++ b/test/unit/org/apache/cassandra/CassandraBriefJUnitResultFormatter.java @@ -316,4 +316,4 @@ public void formatSkip(Test test, String message) { public void testAssumptionFailure(Test test, Throwable cause) { formatSkip(test, cause.getMessage()); } -} \ No newline at end of file +} diff --git a/test/unit/org/apache/cassandra/CassandraIsolatedJunit4ClassRunner.java b/test/unit/org/apache/cassandra/CassandraIsolatedJunit4ClassRunner.java index 8c37be7688fd..5f65227cba86 100644 --- a/test/unit/org/apache/cassandra/CassandraIsolatedJunit4ClassRunner.java +++ b/test/unit/org/apache/cassandra/CassandraIsolatedJunit4ClassRunner.java @@ -104,4 +104,4 @@ protected void finalize() } } } -} \ No newline at end of file +} diff --git a/test/unit/org/apache/cassandra/CassandraXMLJUnitResultFormatter.java b/test/unit/org/apache/cassandra/CassandraXMLJUnitResultFormatter.java index 5015be926775..da8e4077f350 100644 --- a/test/unit/org/apache/cassandra/CassandraXMLJUnitResultFormatter.java +++ b/test/unit/org/apache/cassandra/CassandraXMLJUnitResultFormatter.java @@ -388,4 +388,4 @@ public void testAssumptionFailure(final Test test, final Throwable failure) { skippedTests.put(createDescription(test), test); } -} // XMLJUnitResultFormatter \ No newline at end of file +} // XMLJUnitResultFormatter diff --git a/test/unit/org/apache/cassandra/SchemaLoader.java b/test/unit/org/apache/cassandra/SchemaLoader.java index 9021c6eda55b..62fcebf5f155 100644 --- a/test/unit/org/apache/cassandra/SchemaLoader.java +++ b/test/unit/org/apache/cassandra/SchemaLoader.java @@ -18,7 +18,12 @@ package org.apache.cassandra; import java.io.IOException; -import java.util.*; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; import org.apache.cassandra.auth.AuthKeyspace; import org.apache.cassandra.auth.AuthSchemaChangeListener; diff --git a/test/unit/org/apache/cassandra/ServerTestUtils.java b/test/unit/org/apache/cassandra/ServerTestUtils.java index 221a23a526eb..fff968de72c1 100644 --- a/test/unit/org/apache/cassandra/ServerTestUtils.java +++ b/test/unit/org/apache/cassandra/ServerTestUtils.java @@ -17,9 +17,9 @@ */ package org.apache.cassandra; -import java.io.File; import java.io.IOException; import java.net.UnknownHostException; +import java.util.Arrays; import java.util.HashSet; import java.util.Set; @@ -31,6 +31,7 @@ import org.apache.cassandra.db.Keyspace; import org.apache.cassandra.db.SystemKeyspace; import org.apache.cassandra.db.commitlog.CommitLog; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.io.util.FileUtils; import org.apache.cassandra.locator.AbstractEndpointSnitch; import org.apache.cassandra.locator.InetAddressAndPort; @@ -167,7 +168,7 @@ private static void cleanupDirectory(File directory) { if (directory.exists()) { - FileUtils.deleteChildrenRecursive(directory); + Arrays.stream(directory.tryList()).forEach(File::deleteRecursive); } } diff --git a/test/unit/org/apache/cassandra/Util.java b/test/unit/org/apache/cassandra/Util.java index 4b7b6eaec908..2f696ba93f05 100644 --- a/test/unit/org/apache/cassandra/Util.java +++ b/test/unit/org/apache/cassandra/Util.java @@ -21,7 +21,6 @@ import java.io.Closeable; import java.io.EOFException; -import java.io.File; import java.io.IOError; import java.net.UnknownHostException; import java.nio.ByteBuffer; @@ -38,6 +37,7 @@ import com.google.common.base.Preconditions; import com.google.common.collect.Iterables; import com.google.common.collect.Iterators; +import org.apache.cassandra.io.util.File; import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; @@ -792,11 +792,11 @@ public static void assertOnDiskState(ColumnFamilyStore cfs, int expectedSSTableC int fileCount = 0; for (File f : cfs.getDirectories().getCFDirectories()) { - for (File sst : f.listFiles()) + for (File sst : f.tryList()) { - if (sst.getName().contains("Data")) + if (sst.name().contains("Data")) { - Descriptor d = Descriptor.fromFilename(sst.getAbsolutePath()); + Descriptor d = Descriptor.fromFilename(sst.absolutePath()); assertTrue(liveGenerations.contains(d.generation)); fileCount++; } diff --git a/test/unit/org/apache/cassandra/audit/AuditLogFilterTest.java b/test/unit/org/apache/cassandra/audit/AuditLogFilterTest.java index b593c2e30884..62bc767d6614 100644 --- a/test/unit/org/apache/cassandra/audit/AuditLogFilterTest.java +++ b/test/unit/org/apache/cassandra/audit/AuditLogFilterTest.java @@ -202,4 +202,4 @@ public void isFiltered_NullInputs() excludeSet.add("b"); Assert.assertFalse(isFiltered(null, includeSet, excludeSet)); } -} \ No newline at end of file +} diff --git a/test/unit/org/apache/cassandra/audit/AuditLoggerAuthTest.java b/test/unit/org/apache/cassandra/audit/AuditLoggerAuthTest.java index c64323f4ce41..ee21340438b0 100644 --- a/test/unit/org/apache/cassandra/audit/AuditLoggerAuthTest.java +++ b/test/unit/org/apache/cassandra/audit/AuditLoggerAuthTest.java @@ -310,4 +310,4 @@ private static void createTestRole() assertLogEntry(logEntry, AuditLogEntryType.CREATE_ROLE, getCreateRoleCql(TEST_ROLE, true, false, true), CASS_USER); assertEquals(0, getInMemAuditLogger().size()); } -} \ No newline at end of file +} diff --git a/test/unit/org/apache/cassandra/audit/BinAuditLoggerTest.java b/test/unit/org/apache/cassandra/audit/BinAuditLoggerTest.java index ea1bbbd41a15..678c11cf2b82 100644 --- a/test/unit/org/apache/cassandra/audit/BinAuditLoggerTest.java +++ b/test/unit/org/apache/cassandra/audit/BinAuditLoggerTest.java @@ -19,6 +19,7 @@ import java.nio.file.Path; +import org.apache.cassandra.io.util.File; import org.junit.BeforeClass; import org.junit.Test; diff --git a/test/unit/org/apache/cassandra/auth/AuthTestUtils.java b/test/unit/org/apache/cassandra/auth/AuthTestUtils.java index a012b6286c59..9e0b737dba8f 100644 --- a/test/unit/org/apache/cassandra/auth/AuthTestUtils.java +++ b/test/unit/org/apache/cassandra/auth/AuthTestUtils.java @@ -139,7 +139,7 @@ public static long getRolesReadCount() return rolesTable.metric.readLatency.latency.getCount(); } - public static RoleOptions getLoginRoleOprions() + public static RoleOptions getLoginRoleOptions() { RoleOptions roleOptions = new RoleOptions(); roleOptions.setOption(IRoleManager.Option.SUPERUSER, false); @@ -147,4 +147,4 @@ public static RoleOptions getLoginRoleOprions() roleOptions.setOption(IRoleManager.Option.PASSWORD, "ignored"); return roleOptions; } -} \ No newline at end of file +} diff --git a/test/unit/org/apache/cassandra/auth/PasswordAuthenticatorTest.java b/test/unit/org/apache/cassandra/auth/PasswordAuthenticatorTest.java index 5dd4ab5559b5..985e58d1d1e5 100644 --- a/test/unit/org/apache/cassandra/auth/PasswordAuthenticatorTest.java +++ b/test/unit/org/apache/cassandra/auth/PasswordAuthenticatorTest.java @@ -142,4 +142,4 @@ public static void tearDown() { schemaChange("DROP KEYSPACE " + SchemaConstants.AUTH_KEYSPACE_NAME); } -} \ No newline at end of file +} diff --git a/test/unit/org/apache/cassandra/config/CassandraRelevantPropertiesTest.java b/test/unit/org/apache/cassandra/config/CassandraRelevantPropertiesTest.java index 1086521bbfc8..de6e0d21f20a 100644 --- a/test/unit/org/apache/cassandra/config/CassandraRelevantPropertiesTest.java +++ b/test/unit/org/apache/cassandra/config/CassandraRelevantPropertiesTest.java @@ -142,4 +142,4 @@ public void testInteger_null() System.clearProperty(TEST_PROP.getKey()); } } -} \ No newline at end of file +} diff --git a/test/unit/org/apache/cassandra/config/DatabaseDescriptorRefTest.java b/test/unit/org/apache/cassandra/config/DatabaseDescriptorRefTest.java index 3512f32d56f6..c0fd6b27ccfe 100644 --- a/test/unit/org/apache/cassandra/config/DatabaseDescriptorRefTest.java +++ b/test/unit/org/apache/cassandra/config/DatabaseDescriptorRefTest.java @@ -136,12 +136,16 @@ public class DatabaseDescriptorRefTest "org.apache.cassandra.io.compress.LZ4Compressor", "org.apache.cassandra.io.sstable.metadata.MetadataType", "org.apache.cassandra.io.util.BufferedDataOutputStreamPlus", + "org.apache.cassandra.io.util.FileInputStreamPlus", + "org.apache.cassandra.io.util.FileOutputStreamPlus", + "org.apache.cassandra.io.util.File", "org.apache.cassandra.io.util.DataOutputBuffer", "org.apache.cassandra.io.util.DataOutputBufferFixed", "org.apache.cassandra.io.util.DataOutputStreamPlus", "org.apache.cassandra.io.util.DataOutputPlus", "org.apache.cassandra.io.util.DiskOptimizationStrategy", "org.apache.cassandra.io.util.SpinningDiskOptimizationStrategy", + "org.apache.cassandra.io.util.PathUtils$IOToLongFunction", "org.apache.cassandra.locator.Replica", "org.apache.cassandra.locator.SimpleSeedProvider", "org.apache.cassandra.locator.SeedProvider", diff --git a/test/unit/org/apache/cassandra/config/EncryptionOptionsTest.java b/test/unit/org/apache/cassandra/config/EncryptionOptionsTest.java index 23a6c005a314..35c1c0488936 100644 --- a/test/unit/org/apache/cassandra/config/EncryptionOptionsTest.java +++ b/test/unit/org/apache/cassandra/config/EncryptionOptionsTest.java @@ -18,11 +18,11 @@ package org.apache.cassandra.config; -import java.io.File; import java.util.Collections; import java.util.Map; import com.google.common.collect.ImmutableMap; +import org.apache.cassandra.io.util.File; import org.junit.Assert; import org.junit.Test; diff --git a/test/unit/org/apache/cassandra/cql3/CQLTester.java b/test/unit/org/apache/cassandra/cql3/CQLTester.java index 56be6f64b794..af95352764d0 100644 --- a/test/unit/org/apache/cassandra/cql3/CQLTester.java +++ b/test/unit/org/apache/cassandra/cql3/CQLTester.java @@ -17,7 +17,6 @@ */ package org.apache.cassandra.cql3; -import java.io.File; import java.io.IOException; import java.math.BigDecimal; import java.math.BigInteger; @@ -46,6 +45,7 @@ import com.google.common.base.Objects; import com.google.common.base.Strings; import com.google.common.collect.ImmutableSet; +import org.apache.cassandra.io.util.File; import org.junit.*; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -612,7 +612,7 @@ private static void removeAllSSTables(String ks, List tables) // clean up data directory which are stored as data directory/keyspace/data files for (File d : Directories.getKSChildDirectories(ks)) { - if (d.exists() && containsAny(d.getName(), tables)) + if (d.exists() && containsAny(d.name(), tables)) FileUtils.deleteRecursive(d); } } diff --git a/test/unit/org/apache/cassandra/cql3/validation/entities/SecondaryIndexOnStaticColumnTest.java b/test/unit/org/apache/cassandra/cql3/validation/entities/SecondaryIndexOnStaticColumnTest.java index f69d8d54e9e8..1d10eabf7192 100644 --- a/test/unit/org/apache/cassandra/cql3/validation/entities/SecondaryIndexOnStaticColumnTest.java +++ b/test/unit/org/apache/cassandra/cql3/validation/entities/SecondaryIndexOnStaticColumnTest.java @@ -214,4 +214,4 @@ public void testIndexOnUDT() throws Throwable assertEmpty(execute("SELECT id, company FROM %s WHERE home = " + addressString)); assertRows(execute("SELECT id, company FROM %s WHERE home = " + newAddressString), row(1, companyName)); } -} \ No newline at end of file +} diff --git a/test/unit/org/apache/cassandra/cql3/validation/miscellaneous/OverflowTest.java b/test/unit/org/apache/cassandra/cql3/validation/miscellaneous/OverflowTest.java index 71d632dee0af..9fc50531aae2 100644 --- a/test/unit/org/apache/cassandra/cql3/validation/miscellaneous/OverflowTest.java +++ b/test/unit/org/apache/cassandra/cql3/validation/miscellaneous/OverflowTest.java @@ -293,4 +293,4 @@ public void testBlobAsFunction() throws Throwable execute("INSERT INTO %s (k, v) VALUES (0, blobAsInt(0x00000001))"); assertRows(execute("select v from %s where k=0"), row(1)); } -} \ No newline at end of file +} diff --git a/test/unit/org/apache/cassandra/cql3/validation/operations/CompactStorageTest.java b/test/unit/org/apache/cassandra/cql3/validation/operations/CompactStorageTest.java index 70d3fc324ee3..39b76c39aa43 100644 --- a/test/unit/org/apache/cassandra/cql3/validation/operations/CompactStorageTest.java +++ b/test/unit/org/apache/cassandra/cql3/validation/operations/CompactStorageTest.java @@ -5148,4 +5148,4 @@ private void testUpdateWithCompactFormat(String tableQuery) throws Throwable assertInvalidMessage("Undefined column name column1", "UPDATE %s SET column1 = 6 WHERE a = 0"); assertInvalidMessage("Undefined column name value", "UPDATE %s SET value = 6 WHERE a = 0"); } -} \ No newline at end of file +} diff --git a/test/unit/org/apache/cassandra/cql3/validation/operations/DropRecreateAndRestoreTest.java b/test/unit/org/apache/cassandra/cql3/validation/operations/DropRecreateAndRestoreTest.java index 19aba6470aa2..d385639be865 100644 --- a/test/unit/org/apache/cassandra/cql3/validation/operations/DropRecreateAndRestoreTest.java +++ b/test/unit/org/apache/cassandra/cql3/validation/operations/DropRecreateAndRestoreTest.java @@ -17,9 +17,9 @@ */ package org.apache.cassandra.cql3.validation.operations; -import java.io.File; import java.util.List; +import org.apache.cassandra.io.util.File; import org.junit.Test; import org.apache.cassandra.config.DatabaseDescriptor; diff --git a/test/unit/org/apache/cassandra/cql3/validation/operations/TTLTest.java b/test/unit/org/apache/cassandra/cql3/validation/operations/TTLTest.java index 1c1f1161ac0c..97d95a058e81 100644 --- a/test/unit/org/apache/cassandra/cql3/validation/operations/TTLTest.java +++ b/test/unit/org/apache/cassandra/cql3/validation/operations/TTLTest.java @@ -18,11 +18,10 @@ package org.apache.cassandra.cql3.validation.operations; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; + import java.io.IOException; +import org.apache.cassandra.io.util.File; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -39,6 +38,8 @@ import org.apache.cassandra.db.Keyspace; import org.apache.cassandra.db.rows.AbstractCell; import org.apache.cassandra.exceptions.InvalidRequestException; +import org.apache.cassandra.io.util.FileInputStreamPlus; +import org.apache.cassandra.io.util.FileOutputStreamPlus; import org.apache.cassandra.tools.StandaloneScrubber; import org.apache.cassandra.tools.ToolRunner; import org.apache.cassandra.tools.ToolRunner.ToolResult; @@ -451,7 +452,7 @@ private void copySSTablesToTableDir(String table, boolean simple, boolean cluste { File destDir = Keyspace.open(keyspace()).getColumnFamilyStore(table).getDirectories().getCFDirectories().iterator().next(); File sourceDir = getTableDir(table, simple, clustering); - for (File file : sourceDir.listFiles()) + for (File file : sourceDir.tryList()) { copyFile(file, destDir); } @@ -467,12 +468,13 @@ private static void copyFile(File src, File dest) throws IOException byte[] buf = new byte[65536]; if (src.isFile()) { - File target = new File(dest, src.getName()); + File target = new File(dest, src.name()); int rd; - FileInputStream is = new FileInputStream(src); - FileOutputStream os = new FileOutputStream(target); + FileInputStreamPlus is = new FileInputStreamPlus(src); + FileOutputStreamPlus os = new FileOutputStreamPlus(target); while ((rd = is.read(buf)) >= 0) os.write(buf, 0, rd); + os.close(); } } diff --git a/test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java b/test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java index 583c1eb5f346..d26f3dd7fdf1 100644 --- a/test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java +++ b/test/unit/org/apache/cassandra/db/ColumnFamilyStoreTest.java @@ -18,7 +18,6 @@ */ package org.apache.cassandra.db; -import java.io.File; import java.io.IOException; import java.nio.ByteBuffer; import java.nio.file.Path; @@ -49,6 +48,7 @@ import org.apache.cassandra.db.rows.*; import org.apache.cassandra.db.partitions.*; import org.apache.cassandra.exceptions.ConfigurationException; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.io.sstable.Component; import org.apache.cassandra.io.sstable.Descriptor; import org.apache.cassandra.io.sstable.format.SSTableFormat; @@ -458,7 +458,7 @@ public void testSnapshotWithoutFlushWithSecondaryIndexes() throws Exception String baseTableFile = manifest.getFiles().get(0); String indexTableFile = manifest.getFiles().get(1); assertThat(baseTableFile).isNotEqualTo(indexTableFile); - assertThat(Directories.isSecondaryIndexFolder(new File(indexTableFile).getParentFile())).isTrue(); + assertThat(Directories.isSecondaryIndexFolder(new File(indexTableFile).parent())).isTrue(); assertThat(indexTableFile).endsWith(baseTableFile); } @@ -547,7 +547,7 @@ public void testScrubDataDirectories() throws Throwable String dataFileName = ssTable.descriptor.filenameFor(Component.DATA); String tmpDataFileName = ssTable.descriptor.tmpFilenameFor(Component.DATA); - new File(dataFileName).renameTo(new File(tmpDataFileName)); + new File(dataFileName).tryMove(new File(tmpDataFileName)); ssTable.selfRef().release(); diff --git a/test/unit/org/apache/cassandra/db/DirectoriesTest.java b/test/unit/org/apache/cassandra/db/DirectoriesTest.java index ba499cee947a..9c1857be0033 100644 --- a/test/unit/org/apache/cassandra/db/DirectoriesTest.java +++ b/test/unit/org/apache/cassandra/db/DirectoriesTest.java @@ -17,8 +17,6 @@ */ package org.apache.cassandra.db; -import java.io.File; -import java.io.FileOutputStream; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; @@ -29,6 +27,8 @@ import java.util.concurrent.Future; import com.google.common.collect.Sets; +import org.apache.cassandra.io.util.File; +import org.apache.cassandra.io.util.FileOutputStreamPlus; import org.apache.commons.lang3.StringUtils; import org.junit.AfterClass; @@ -102,8 +102,8 @@ public void beforeTest() throws IOException } tempDataDir = FileUtils.createTempFile("cassandra", "unittest"); - tempDataDir.delete(); // hack to create a temp dir - tempDataDir.mkdir(); + tempDataDir.tryDelete(); // hack to create a temp dir + tempDataDir.tryCreateDirectory(); // Create two fake data dir for tests, one using CF directories, one that do not. createTestFiles(); @@ -127,17 +127,17 @@ private static void createTestFiles() throws IOException List allSStables = new ArrayList<>(); sstablesByTableName.put(cfm.name, allSStables); File tableDir = cfDir(cfm); - tableDir.mkdirs(); + tableDir.tryCreateDirectories(); allSStables.addAll(createFakeSSTable(tableDir, cfm.name, 1)); allSStables.addAll(createFakeSSTable(tableDir, cfm.name, 2)); File backupDir = new File(tableDir, Directories.BACKUPS_SUBDIR); - backupDir.mkdir(); + backupDir.tryCreateDirectory(); allSStables.addAll(createFakeSSTable(backupDir, cfm.name, 1)); - File snapshotDir = new File(tableDir, Directories.SNAPSHOT_SUBDIR + File.separator + LEGACY_SNAPSHOT_NAME); - snapshotDir.mkdirs(); + File snapshotDir = new File(tableDir, Directories.SNAPSHOT_SUBDIR + File.pathSeparator() + LEGACY_SNAPSHOT_NAME); + snapshotDir.tryCreateDirectories(); allSStables.addAll(createFakeSSTable(snapshotDir, cfm.name, 1)); } } @@ -175,9 +175,9 @@ private TableMetadata createFakeTable(String table) public FakeSnapshot createFakeSnapshot(TableMetadata table, String tag, boolean createManifest) throws IOException { File tableDir = cfDir(table); - tableDir.mkdirs(); - File snapshotDir = new File(tableDir, Directories.SNAPSHOT_SUBDIR + File.separator + tag); - snapshotDir.mkdirs(); + tableDir.tryCreateDirectories(); + File snapshotDir = new File(tableDir, Directories.SNAPSHOT_SUBDIR + File.pathSeparator() + tag); + snapshotDir.tryCreateDirectories(); Descriptor sstableDesc = new Descriptor(snapshotDir, KS, table.name, 1, SSTableFormat.Type.BIG); createFakeSSTable(sstableDesc); @@ -205,7 +205,7 @@ private static List createFakeSSTable(Descriptor desc) throws IOException for (Component c : new Component[]{ Component.DATA, Component.PRIMARY_INDEX, Component.FILTER }) { File f = new File(desc.filenameFor(c)); - f.createNewFile(); + f.createFileIfNotExists(); components.add(f); } return components; @@ -219,13 +219,13 @@ private static File cfDir(TableMetadata metadata) { // secondary index return new File(tempDataDir, - metadata.keyspace + File.separator + - metadata.name.substring(0, idx) + '-' + tableId + File.separator + + metadata.keyspace + File.pathSeparator() + + metadata.name.substring(0, idx) + '-' + tableId + File.pathSeparator() + metadata.name.substring(idx)); } else { - return new File(tempDataDir, metadata.keyspace + File.separator + metadata.name + '-' + tableId); + return new File(tempDataDir, metadata.keyspace + File.pathSeparator() + metadata.name + '-' + tableId); } } @@ -238,11 +238,11 @@ public void testStandardDirs() throws IOException assertEquals(cfDir(cfm), directories.getDirectoryForNewSSTables()); Descriptor desc = new Descriptor(cfDir(cfm), KS, cfm.name, 1, SSTableFormat.Type.BIG); - File snapshotDir = new File(cfDir(cfm), File.separator + Directories.SNAPSHOT_SUBDIR + File.separator + LEGACY_SNAPSHOT_NAME); - assertEquals(snapshotDir.getCanonicalFile(), Directories.getSnapshotDirectory(desc, LEGACY_SNAPSHOT_NAME)); + File snapshotDir = new File(cfDir(cfm), File.pathSeparator() + Directories.SNAPSHOT_SUBDIR + File.pathSeparator() + LEGACY_SNAPSHOT_NAME); + assertEquals(snapshotDir.toCanonical(), Directories.getSnapshotDirectory(desc, LEGACY_SNAPSHOT_NAME)); - File backupsDir = new File(cfDir(cfm), File.separator + Directories.BACKUPS_SUBDIR); - assertEquals(backupsDir.getCanonicalFile(), Directories.getBackupsDirectory(desc)); + File backupsDir = new File(cfDir(cfm), File.pathSeparator() + Directories.BACKUPS_SUBDIR); + assertEquals(backupsDir.toCanonical(), Directories.getBackupsDirectory(desc)); } } @@ -307,7 +307,7 @@ public void testMaybeManifestLoading() throws Exception { File parentSnapshotDirectory = Directories.getSnapshotDirectory(parentDesc, tag); List files = new LinkedList<>(); - files.add(parentSnapshotDirectory.getAbsolutePath()); + files.add(parentSnapshotDirectory.toAbsolute().absolutePath()); File manifestFile = directories.getSnapshotManifestFile(tag); @@ -316,7 +316,7 @@ public void testMaybeManifestLoading() throws Exception { Set dirs = new HashSet<>(); - dirs.add(manifestFile.getParentFile()); + dirs.add(manifestFile.parent()); dirs.add(new File("buzz")); SnapshotManifest loadedManifest = Directories.maybeLoadManifest(KS, cfm.name, tag, dirs); @@ -356,9 +356,10 @@ public void testSecondaryIndexDirectories() // snapshot dir should be created under its parent's File parentSnapshotDirectory = Directories.getSnapshotDirectory(parentDesc, "test"); File indexSnapshotDirectory = Directories.getSnapshotDirectory(indexDesc, "test"); - assertEquals(parentSnapshotDirectory, indexSnapshotDirectory.getParentFile()); + assertEquals(parentSnapshotDirectory, indexSnapshotDirectory.parent()); // check if snapshot directory exists + parentSnapshotDirectory.tryCreateDirectories(); assertTrue(parentDirectories.snapshotExists("test")); assertTrue(indexDirectories.snapshotExists("test")); @@ -383,13 +384,13 @@ public void testSecondaryIndexDirectories() // check backup directory File parentBackupDirectory = Directories.getBackupsDirectory(parentDesc); File indexBackupDirectory = Directories.getBackupsDirectory(indexDesc); - assertEquals(parentBackupDirectory, indexBackupDirectory.getParentFile()); + assertEquals(parentBackupDirectory, indexBackupDirectory.parent()); } private File createFile(String fileName, int size) { File newFile = new File(fileName); - try (FileOutputStream writer = new FileOutputStream(newFile)) + try (FileOutputStreamPlus writer = new FileOutputStreamPlus(newFile);) { writer.write(new byte[size]); writer.flush(); @@ -416,7 +417,7 @@ private void checkFiles(TableMetadata cfm, Directories directories) listed = new HashSet<>(lister.listFiles()); for (File f : sstablesByTableName.get(cfm.name)) { - if (f.getPath().contains(Directories.SNAPSHOT_SUBDIR) || f.getPath().contains(Directories.BACKUPS_SUBDIR)) + if (f.path().contains(Directories.SNAPSHOT_SUBDIR) || f.path().contains(Directories.BACKUPS_SUBDIR)) assertFalse(f + " should not be listed", listed.contains(f)); else assertTrue(f + " is missing", listed.contains(f)); @@ -427,7 +428,7 @@ private void checkFiles(TableMetadata cfm, Directories directories) listed = new HashSet<>(lister.listFiles()); for (File f : sstablesByTableName.get(cfm.name)) { - if (f.getPath().contains(Directories.SNAPSHOT_SUBDIR)) + if (f.path().contains(Directories.SNAPSHOT_SUBDIR)) assertFalse(f + " should not be listed", listed.contains(f)); else assertTrue(f + " is missing", listed.contains(f)); @@ -438,9 +439,9 @@ private void checkFiles(TableMetadata cfm, Directories directories) listed = new HashSet<>(lister.listFiles()); for (File f : sstablesByTableName.get(cfm.name)) { - if (f.getPath().contains(Directories.SNAPSHOT_SUBDIR) || f.getPath().contains(Directories.BACKUPS_SUBDIR)) + if (f.path().contains(Directories.SNAPSHOT_SUBDIR) || f.path().contains(Directories.BACKUPS_SUBDIR)) assertFalse(f + " should not be listed", listed.contains(f)); - else if (f.getName().contains("tmp-")) + else if (f.name().contains("tmp-")) assertFalse(f + " should not be listed", listed.contains(f)); else assertTrue(f + " is missing", listed.contains(f)); @@ -455,9 +456,9 @@ public void testTemporaryFile() throws IOException Directories directories = new Directories(cfm, toDataDirectories(tempDataDir)); File tempDir = directories.getTemporaryWriteableDirectoryAsFile(10); - tempDir.mkdir(); + tempDir.tryCreateDirectory(); File tempFile = new File(tempDir, "tempFile"); - tempFile.createNewFile(); + tempFile.createFileIfNotExists(); assertTrue(tempDir.exists()); assertTrue(tempFile.exists()); @@ -491,14 +492,13 @@ public void testDiskFailurePolicy_best_effort() if (!directories.isEmpty()) { String[] path = new String[] {KS, "bad"}; - File dir = new File(first.location, StringUtils.join(path, File.separator)); + File dir = new File(first.location, StringUtils.join(path, File.pathSeparator())); JVMStabilityInspector.inspectThrowable(new FSWriteError(new IOException("Unable to create directory " + dir), dir)); } - File file = new File(first.location, new File(KS, "bad").getPath()); + File file = new File(first.location, new File(KS, "bad").path()); assertTrue(DisallowedDirectories.isUnwritable(file)); - - } + } finally { DatabaseDescriptor.setDiskFailurePolicy(origPolicy); @@ -641,8 +641,8 @@ public void testGetLocationForDisk() Directories dirs = new Directories(cfm, paths); for (DataDirectory dir : paths) { - String p = dirs.getLocationForDisk(dir).getAbsolutePath() + File.separator; - assertTrue(p.startsWith(dir.location.getAbsolutePath() + File.separator)); + String p = dirs.getLocationForDisk(dir).absolutePath() + File.pathSeparator(); + assertTrue(p.startsWith(dir.location.absolutePath() + File.pathSeparator())); } } } @@ -658,11 +658,11 @@ public void testGetLocationWithSymlinks() throws IOException Path p2 = Files.createDirectories(ddir.resolve("p2")); Path l1 = Files.createSymbolicLink(p2.resolve("ks"), symlinktarget); - DataDirectory path1 = new DataDirectory(p1.toFile()); - DataDirectory path2 = new DataDirectory(p2.toFile()); + DataDirectory path1 = new DataDirectory(new File(p1)); + DataDirectory path2 = new DataDirectory(new File(p2)); Directories dirs = new Directories(CFM.iterator().next(), new DataDirectory[] {path1, path2}); - dirs.getLocationForDisk(new DataDirectory(p1.toFile())); - dirs.getLocationForDisk(new DataDirectory(p2.toFile())); + dirs.getLocationForDisk(new DataDirectory(new File(p1))); + dirs.getLocationForDisk(new DataDirectory(new File(p2))); assertTrue(dirs.getLocationForDisk(path2).toPath().startsWith(l1)); assertTrue(dirs.getLocationForDisk(path1).toPath().startsWith(p1)); @@ -682,8 +682,8 @@ public void getDataDirectoryForFile() for (DataDirectory dir : paths) { Descriptor d = Descriptor.fromFilename(new File(dir.location, getNewFilename(cfm, false)).toString()); - String p = dirs.getDataDirectoryForFile(d).location.getAbsolutePath() + File.separator; - assertTrue(p.startsWith(dir.location.getAbsolutePath() + File.separator)); + String p = dirs.getDataDirectoryForFile(d).location.absolutePath() + File.pathSeparator(); + assertTrue(p.startsWith(dir.location.absolutePath() + File.pathSeparator())); } } } @@ -704,16 +704,16 @@ public void testDirectoriesSymlinks() throws IOException Path symlinktarget = Files.createDirectories(p.resolve("symlinktarget")); Path ddir1 = Files.createDirectories(p.resolve("datadir1")); Path ddir2 = Files.createSymbolicLink(p.resolve("datadir11"), symlinktarget); - DataDirectory dd1 = new DataDirectory(ddir1.toFile()); - DataDirectory dd2 = new DataDirectory(ddir2.toFile()); + DataDirectory dd1 = new DataDirectory(new File(ddir1)); + DataDirectory dd2 = new DataDirectory(new File(ddir2)); for (TableMetadata tm : CFM) { Directories dirs = new Directories(tm, Sets.newHashSet(dd1, dd2)); - Descriptor desc = Descriptor.fromFilename(ddir1.resolve(getNewFilename(tm, false)).toFile()); - assertEquals(ddir1.toFile(), dirs.getDataDirectoryForFile(desc).location); - desc = Descriptor.fromFilename(ddir2.resolve(getNewFilename(tm, false)).toFile()); - assertEquals(ddir2.toFile(), dirs.getDataDirectoryForFile(desc).location); + Descriptor desc = Descriptor.fromFilename(new File(ddir1.resolve(getNewFilename(tm, false)))); + assertEquals(new File(ddir1), dirs.getDataDirectoryForFile(desc).location); + desc = Descriptor.fromFilename(new File(ddir2.resolve(getNewFilename(tm, false)))); + assertEquals(new File(ddir2), dirs.getDataDirectoryForFile(desc).location); } } @@ -755,15 +755,15 @@ private void testDirectoriesSymlinksHelper(boolean oldStyle) throws IOException Files.createSymbolicLink(keyspacedir.resolve(tabledir), symlinktarget); } - DataDirectory dd1 = new DataDirectory(ddir1.toFile()); - DataDirectory dd2 = new DataDirectory(ddir2.toFile()); + DataDirectory dd1 = new DataDirectory(new File(ddir1)); + DataDirectory dd2 = new DataDirectory(new File(ddir2)); for (TableMetadata tm : CFM) { Directories dirs = new Directories(tm, Sets.newHashSet(dd1, dd2)); - Descriptor desc = Descriptor.fromFilename(ddir1.resolve(getNewFilename(tm, oldStyle)).toFile()); - assertEquals(ddir1.toFile(), dirs.getDataDirectoryForFile(desc).location); - desc = Descriptor.fromFilename(ddir2.resolve(getNewFilename(tm, oldStyle)).toFile()); - assertEquals(ddir2.toFile(), dirs.getDataDirectoryForFile(desc).location); + Descriptor desc = Descriptor.fromFilename(new File(ddir1.resolve(getNewFilename(tm, oldStyle)))); + assertEquals(new File(ddir1), dirs.getDataDirectoryForFile(desc).location); + desc = Descriptor.fromFilename(new File(ddir2.resolve(getNewFilename(tm, oldStyle)))); + assertEquals(new File(ddir2), dirs.getDataDirectoryForFile(desc).location); } } @@ -795,11 +795,11 @@ public void testDataDirectoriesIterator() throws IOException Iterator iter = directories.iterator(); assertTrue(iter.hasNext()); - assertEquals(new DataDirectory(subDir_1.toFile()), iter.next()); + assertEquals(new DataDirectory(new File(subDir_1)), iter.next()); assertTrue(iter.hasNext()); - assertEquals(new DataDirectory(subDir_2.toFile()), iter.next()); + assertEquals(new DataDirectory(new File(subDir_2)), iter.next()); assertTrue(iter.hasNext()); - assertEquals(new DataDirectory(subDir_3.toFile()), iter.next()); + assertEquals(new DataDirectory(new File(subDir_3)), iter.next()); assertFalse(iter.hasNext()); directories = new DataDirectories(new String[]{subDir_1.toString(), subDir_2.toString()}, @@ -807,15 +807,15 @@ public void testDataDirectoriesIterator() throws IOException iter = directories.iterator(); assertTrue(iter.hasNext()); - assertEquals(new DataDirectory(subDir_1.toFile()), iter.next()); + assertEquals(new DataDirectory(new File(subDir_1)), iter.next()); assertTrue(iter.hasNext()); - assertEquals(new DataDirectory(subDir_2.toFile()), iter.next()); + assertEquals(new DataDirectory(new File(subDir_2)), iter.next()); assertFalse(iter.hasNext()); } private String getNewFilename(TableMetadata tm, boolean oldStyle) { - return tm.keyspace + File.separator + tm.name + (oldStyle ? "" : Component.separator + tm.id.toHexString()) + "/na-1-big-Data.db"; + return tm.keyspace + File.pathSeparator() + tm.name + (oldStyle ? "" : Component.separator + tm.id.toHexString()) + "/na-1-big-Data.db"; } private List getWriteableDirectories(DataDirectory[] dataDirectories, long writeSize) diff --git a/test/unit/org/apache/cassandra/db/DiskBoundaryManagerTest.java b/test/unit/org/apache/cassandra/db/DiskBoundaryManagerTest.java index a9d69019217b..2138f529a0c2 100644 --- a/test/unit/org/apache/cassandra/db/DiskBoundaryManagerTest.java +++ b/test/unit/org/apache/cassandra/db/DiskBoundaryManagerTest.java @@ -18,11 +18,11 @@ package org.apache.cassandra.db; -import java.io.File; import java.net.UnknownHostException; import java.util.List; import com.google.common.collect.Lists; +import org.apache.cassandra.io.util.File; import org.junit.Assert; import org.junit.Before; import org.junit.Test; diff --git a/test/unit/org/apache/cassandra/db/ImportTest.java b/test/unit/org/apache/cassandra/db/ImportTest.java index c0c3799b858b..85d32516e2ea 100644 --- a/test/unit/org/apache/cassandra/db/ImportTest.java +++ b/test/unit/org/apache/cassandra/db/ImportTest.java @@ -18,7 +18,6 @@ package org.apache.cassandra.db; -import java.io.File; import java.io.IOException; import java.io.RandomAccessFile; import java.nio.file.Files; @@ -42,6 +41,7 @@ import org.apache.cassandra.dht.BootStrapper; import org.apache.cassandra.io.sstable.Component; import org.apache.cassandra.io.sstable.format.SSTableReader; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.locator.InetAddressAndPort; import org.apache.cassandra.locator.TokenMetadata; import org.apache.cassandra.service.CacheService; @@ -227,36 +227,36 @@ private File moveToBackupDir(Set sstables) throws IOException { Path temp = Files.createTempDirectory("importtest"); SSTableReader sst = sstables.iterator().next(); - String tabledir = sst.descriptor.directory.getName(); - String ksdir = sst.descriptor.directory.getParentFile().getName(); + String tabledir = sst.descriptor.directory.name(); + String ksdir = sst.descriptor.directory.parent().name(); Path backupdir = createDirectories(temp.toString(), ksdir, tabledir); for (SSTableReader sstable : sstables) { sstable.selfRef().release(); - for (File f : sstable.descriptor.directory.listFiles()) + for (File f : sstable.descriptor.directory.tryList()) { if (f.toString().contains(sstable.descriptor.baseFilename())) { System.out.println("move " + f.toPath() + " to " + backupdir); - File moveFileTo = new File(backupdir.toFile(), f.getName()); + File moveFileTo = new File(backupdir, f.name()); moveFileTo.deleteOnExit(); Files.move(f.toPath(), moveFileTo.toPath()); } } } - return backupdir.toFile(); + return new File(backupdir); } private Path createDirectories(String base, String ... subdirs) { File b = new File(base); - b.mkdir(); + b.tryCreateDirectory(); System.out.println("mkdir "+b); b.deleteOnExit(); for (String subdir : subdirs) { b = new File(b, subdir); - b.mkdir(); + b.tryCreateDirectory(); System.out.println("mkdir "+b); b.deleteOnExit(); } @@ -291,8 +291,8 @@ public void testGetCorrectDirectory() throws Throwable importer.importNewSSTables(SSTableImporter.Options.options(dir.toString()).build()); for (SSTableReader sstable : mock.getLiveSSTables()) { - File movedDir = sstable.descriptor.directory.getCanonicalFile(); - File correctDir = mock.getDiskBoundaries().getCorrectDiskForSSTable(sstable).location.getCanonicalFile(); + File movedDir = sstable.descriptor.directory.toCanonical(); + File correctDir = mock.getDiskBoundaries().getCorrectDiskForSSTable(sstable).location.toCanonical(); assertTrue(movedDir.toString().startsWith(correctDir.toString())); } for (SSTableReader sstable : mock.getLiveSSTables()) @@ -331,7 +331,7 @@ private void testCorruptHelper(boolean verify, boolean copy) throws Throwable getCurrentColumnFamilyStore().clearUnsafe(); File backupdirCorrect = moveToBackupDir(correctSSTables); - Set beforeImport = Sets.newHashSet(backupdir.listFiles()); + Set beforeImport = Sets.newHashSet(backupdir.tryList()); // first we moved out 2 sstables, one correct and one corrupt in to a single directory (backupdir) // then we moved out 1 sstable, a correct one (in backupdirCorrect). // now import should fail import on backupdir, but import the one in backupdirCorrect. @@ -346,7 +346,7 @@ private void testCorruptHelper(boolean verify, boolean copy) throws Throwable assertTrue("pk = "+pk, pk >= 100 && pk < 130); } assertEquals("Data dir should contain one file", 1, countFiles(getCurrentColumnFamilyStore().getDirectories().getDirectoryForNewSSTables())); - assertEquals("backupdir contained 2 files before import, should still contain 2 after failing to import it", beforeImport, Sets.newHashSet(backupdir.listFiles())); + assertEquals("backupdir contained 2 files before import, should still contain 2 after failing to import it", beforeImport, Sets.newHashSet(backupdir.tryList())); if (copy) { assertEquals("backupdirCorrect contained 1 file before import, should contain 1 after import too", 1, countFiles(backupdirCorrect)); @@ -355,14 +355,13 @@ private void testCorruptHelper(boolean verify, boolean copy) throws Throwable { assertEquals("backupdirCorrect contained 1 file before import, should be empty after import", 0, countFiles(backupdirCorrect)); } - } private int countFiles(File dir) { int fileCount = 0; - for (File f : dir.listFiles()) + for (File f : dir.tryList()) { if (f.isFile() && f.toString().contains("-Data.db")) { @@ -618,8 +617,8 @@ public void testRefreshCorrupt() throws Throwable assertTrue(new File(sstable.descriptor.filenameFor(Component.DATA)).exists()); getCurrentColumnFamilyStore().truncateBlocking(); LifecycleTransaction.waitForDeletions(); - for (File f : sstableToCorrupt.descriptor.directory.listFiles()) // clean up the corrupt files which truncate does not handle - f.delete(); + for (File f : sstableToCorrupt.descriptor.directory.tryList()) // clean up the corrupt files which truncate does not handle + f.tryDelete(); } diff --git a/test/unit/org/apache/cassandra/db/MmapFileTest.java b/test/unit/org/apache/cassandra/db/MmapFileTest.java index 71a218eff893..c8338cb20776 100644 --- a/test/unit/org/apache/cassandra/db/MmapFileTest.java +++ b/test/unit/org/apache/cassandra/db/MmapFileTest.java @@ -17,7 +17,6 @@ */ package org.apache.cassandra.db; -import java.io.File; import java.io.RandomAccessFile; import java.lang.management.ManagementFactory; import java.nio.MappedByteBuffer; @@ -26,6 +25,7 @@ import javax.management.MBeanServer; import javax.management.ObjectName; +import org.apache.cassandra.io.util.File; import org.junit.Assert; import org.junit.Test; @@ -56,17 +56,17 @@ public void testMmapFile() throws Exception { int size = 1024 * 1024; - try (RandomAccessFile raf = new RandomAccessFile(f1, "rw")) + try (RandomAccessFile raf = new RandomAccessFile(f1.toJavaIOFile(), "rw")) { raf.setLength(size); } - try (RandomAccessFile raf = new RandomAccessFile(f2, "rw")) + try (RandomAccessFile raf = new RandomAccessFile(f2.toJavaIOFile(), "rw")) { raf.setLength(size); } - try (RandomAccessFile raf = new RandomAccessFile(f3, "rw")) + try (RandomAccessFile raf = new RandomAccessFile(f3.toJavaIOFile(), "rw")) { raf.setLength(size); } @@ -148,16 +148,16 @@ public void testMmapFile() throws Exception Assert.assertEquals("# of mapped buffers should be 0", Long.valueOf(0L), mmapCount); Assert.assertEquals("amount of mapped memory should be 0", Long.valueOf(0L), mmapMemoryUsed); - Assert.assertTrue(f1.delete()); - Assert.assertTrue(f2.delete()); - Assert.assertTrue(f3.delete()); + Assert.assertTrue(f1.tryDelete()); + Assert.assertTrue(f2.tryDelete()); + Assert.assertTrue(f3.tryDelete()); } finally { Runtime.getRuntime().gc(); - f1.delete(); - f2.delete(); - f3.delete(); + f1.tryDelete(); + f2.tryDelete(); + f3.tryDelete(); } } } diff --git a/test/unit/org/apache/cassandra/db/MutationExceededMaxSizeExceptionTest.java b/test/unit/org/apache/cassandra/db/MutationExceededMaxSizeExceptionTest.java index 81d97359b29e..09ab8a1e21ec 100644 --- a/test/unit/org/apache/cassandra/db/MutationExceededMaxSizeExceptionTest.java +++ b/test/unit/org/apache/cassandra/db/MutationExceededMaxSizeExceptionTest.java @@ -43,4 +43,4 @@ public void testMakePKString() assertEquals("aaa, bbb and 1 more.", makeTopKeysString(new ArrayList<>(keys), 8)); assertEquals("aaa, bbb and 1 more.", makeTopKeysString(new ArrayList<>(keys), 10)); } -} \ No newline at end of file +} diff --git a/test/unit/org/apache/cassandra/db/ReadMessageTest.java b/test/unit/org/apache/cassandra/db/ReadMessageTest.java index 5b052536fb27..cd003c63a353 100644 --- a/test/unit/org/apache/cassandra/db/ReadMessageTest.java +++ b/test/unit/org/apache/cassandra/db/ReadMessageTest.java @@ -21,6 +21,7 @@ import static org.junit.Assert.*; import java.io.*; +import org.apache.cassandra.io.util.File; import com.google.common.base.Predicate; diff --git a/test/unit/org/apache/cassandra/db/RecoveryManagerMissingHeaderTest.java b/test/unit/org/apache/cassandra/db/RecoveryManagerMissingHeaderTest.java index 4044fff612fc..21058671b8af 100644 --- a/test/unit/org/apache/cassandra/db/RecoveryManagerMissingHeaderTest.java +++ b/test/unit/org/apache/cassandra/db/RecoveryManagerMissingHeaderTest.java @@ -18,12 +18,12 @@ */ package org.apache.cassandra.db; -import java.io.File; import java.io.IOException; import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import org.apache.cassandra.io.util.File; import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; @@ -112,9 +112,9 @@ public void testMissingHeader() throws IOException keyspace2.getColumnFamilyStore("Standard3").clearUnsafe(); // nuke the header - for (File file : new File(DatabaseDescriptor.getCommitLogLocation()).listFiles()) + for (File file : new File(DatabaseDescriptor.getCommitLogLocation()).tryList()) { - if (file.getName().endsWith(".header")) + if (file.name().endsWith(".header")) FileUtils.deleteWithConfirm(file); } diff --git a/test/unit/org/apache/cassandra/db/RowIndexEntryTest.java b/test/unit/org/apache/cassandra/db/RowIndexEntryTest.java index 7b774eb03793..cac8038e6a33 100644 --- a/test/unit/org/apache/cassandra/db/RowIndexEntryTest.java +++ b/test/unit/org/apache/cassandra/db/RowIndexEntryTest.java @@ -17,7 +17,6 @@ */ package org.apache.cassandra.db; -import java.io.File; import java.io.IOException; import java.nio.ByteBuffer; import java.util.ArrayList; @@ -28,6 +27,7 @@ import java.util.List; import com.google.common.primitives.Ints; +import org.apache.cassandra.io.util.File; import org.junit.Assert; import org.junit.Test; diff --git a/test/unit/org/apache/cassandra/db/SchemaCQLHelperTest.java b/test/unit/org/apache/cassandra/db/SchemaCQLHelperTest.java index b928ebfe6d07..5a6b69ef0fec 100644 --- a/test/unit/org/apache/cassandra/db/SchemaCQLHelperTest.java +++ b/test/unit/org/apache/cassandra/db/SchemaCQLHelperTest.java @@ -22,6 +22,7 @@ import com.google.common.collect.ImmutableMap; import com.google.common.io.Files; +import org.apache.cassandra.io.util.FileReader; import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -40,7 +41,6 @@ import org.json.simple.JSONObject; import org.json.simple.parser.JSONParser; -import java.io.FileReader; import java.nio.ByteBuffer; import java.nio.charset.Charset; import java.util.Arrays; @@ -402,7 +402,7 @@ public void testSnapshot() throws Throwable ColumnFamilyStore cfs = Keyspace.open(keyspace()).getColumnFamilyStore(tableName); cfs.snapshot(SNAPSHOT); - String schema = Files.toString(cfs.getDirectories().getSnapshotSchemaFile(SNAPSHOT), Charset.defaultCharset()); + String schema = Files.toString(cfs.getDirectories().getSnapshotSchemaFile(SNAPSHOT).toJavaIOFile(), Charset.defaultCharset()); assertThat(schema, allOf(containsString(String.format("CREATE TYPE IF NOT EXISTS %s.%s (\n" + " a1 varint,\n" + diff --git a/test/unit/org/apache/cassandra/db/ScrubTest.java b/test/unit/org/apache/cassandra/db/ScrubTest.java index 7c24d7ced186..1e2c8e052f45 100644 --- a/test/unit/org/apache/cassandra/db/ScrubTest.java +++ b/test/unit/org/apache/cassandra/db/ScrubTest.java @@ -18,7 +18,6 @@ */ package org.apache.cassandra.db; -import java.io.File; import java.io.IOError; import java.io.IOException; import java.io.RandomAccessFile; @@ -36,6 +35,7 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicInteger; +import org.apache.cassandra.io.util.File; import org.apache.commons.lang3.StringUtils; import org.junit.AfterClass; @@ -336,7 +336,7 @@ public void testScrubNoIndex() throws ExecutionException, InterruptedException, assertOrderedAll(cfs, 10); for (SSTableReader sstable : cfs.getLiveSSTables()) - assertTrue(new File(sstable.descriptor.filenameFor(Component.PRIMARY_INDEX)).delete()); + assertTrue(new File(sstable.descriptor.filenameFor(Component.PRIMARY_INDEX)).tryDelete()); CompactionManager.instance.performScrub(cfs, false, true, 2); @@ -352,10 +352,10 @@ public void testScrubOutOfOrder() DatabaseDescriptor.setPartitionerUnsafe(new ByteOrderedPartitioner()); // Create out-of-order SSTable - File tempDir = FileUtils.createTempFile("ScrubTest.testScrubOutOfOrder", "").getParentFile(); + File tempDir = FileUtils.createTempFile("ScrubTest.testScrubOutOfOrder", "").parent(); // create ks/cf directory - File tempDataDir = new File(tempDir, String.join(File.separator, ksName, CF)); - assertTrue(tempDataDir.mkdirs()); + File tempDataDir = new File(tempDir, String.join(File.pathSeparator(), ksName, CF)); + assertTrue(tempDataDir.tryCreateDirectories()); try { CompactionManager.instance.disableAutoCompaction(); diff --git a/test/unit/org/apache/cassandra/db/SerializationHeaderTest.java b/test/unit/org/apache/cassandra/db/SerializationHeaderTest.java index 32a9a6d57d18..34c15bb7391e 100644 --- a/test/unit/org/apache/cassandra/db/SerializationHeaderTest.java +++ b/test/unit/org/apache/cassandra/db/SerializationHeaderTest.java @@ -44,7 +44,6 @@ import org.junit.Assert; import org.junit.Test; -import java.io.File; import java.nio.ByteBuffer; import java.util.Collections; import java.util.concurrent.Callable; @@ -52,6 +51,8 @@ import java.util.function.BiFunction; import java.util.function.Function; +import org.apache.cassandra.io.util.File; + public class SerializationHeaderTest { private static String KEYSPACE = "SerializationHeaderTest"; @@ -84,7 +85,7 @@ public void testWrittenAsDifferentKind() throws Exception schemaWithRegular = schemaWithRegular.unbuild().recordColumnDrop(columnStatic, 0L).build(); final AtomicInteger generation = new AtomicInteger(); - File dir = Files.createTempDir(); + File dir = new File(Files.createTempDir()); try { BiFunction>, Callable> writer = (schema, clusteringFunction) -> () -> { diff --git a/test/unit/org/apache/cassandra/db/VerifyTest.java b/test/unit/org/apache/cassandra/db/VerifyTest.java index 65bff51d2fc9..9593b7bd82f1 100644 --- a/test/unit/org/apache/cassandra/db/VerifyTest.java +++ b/test/unit/org/apache/cassandra/db/VerifyTest.java @@ -39,6 +39,7 @@ import org.apache.cassandra.io.sstable.Component; import org.apache.cassandra.io.sstable.CorruptSSTableException; import org.apache.cassandra.io.sstable.format.SSTableReader; +import org.apache.cassandra.io.util.FileInputStreamPlus; import org.apache.cassandra.io.util.FileUtils; import org.apache.cassandra.locator.InetAddressAndPort; import org.apache.cassandra.locator.TokenMetadata; @@ -63,6 +64,7 @@ import java.util.zip.CRC32; import java.util.zip.CheckedInputStream; +import org.apache.cassandra.io.util.File; import static org.apache.cassandra.SchemaLoader.counterCFMD; import static org.apache.cassandra.SchemaLoader.createKeyspace; import static org.apache.cassandra.SchemaLoader.loadSchema; @@ -769,7 +771,7 @@ protected void fillCounterCF(ColumnFamilyStore cfs, int partitionsPerSSTable) th protected long simpleFullChecksum(String filename) throws IOException { - try (FileInputStream inputStream = new FileInputStream(filename)) + try (FileInputStreamPlus inputStream = new FileInputStreamPlus(filename)) { CRC32 checksum = new CRC32(); CheckedInputStream cinStream = new CheckedInputStream(inputStream, checksum); diff --git a/test/unit/org/apache/cassandra/db/commitlog/CDCTestReplayer.java b/test/unit/org/apache/cassandra/db/commitlog/CDCTestReplayer.java index fa3295a484cc..762459097cc8 100644 --- a/test/unit/org/apache/cassandra/db/commitlog/CDCTestReplayer.java +++ b/test/unit/org/apache/cassandra/db/commitlog/CDCTestReplayer.java @@ -17,9 +17,9 @@ */ package org.apache.cassandra.db.commitlog; -import java.io.File; import java.io.IOException; +import org.apache.cassandra.io.util.File; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -45,7 +45,7 @@ public CDCTestReplayer() throws IOException public void examineCommitLog() throws IOException { - replayFiles(new File(DatabaseDescriptor.getCommitLogLocation()).listFiles()); + replayFiles(new File(DatabaseDescriptor.getCommitLogLocation()).tryList()); } private class CommitLogTestReader extends CommitLogReader diff --git a/test/unit/org/apache/cassandra/db/commitlog/CommitLogChainedMarkersTest.java b/test/unit/org/apache/cassandra/db/commitlog/CommitLogChainedMarkersTest.java index fb90d59462be..319e75b34668 100644 --- a/test/unit/org/apache/cassandra/db/commitlog/CommitLogChainedMarkersTest.java +++ b/test/unit/org/apache/cassandra/db/commitlog/CommitLogChainedMarkersTest.java @@ -18,12 +18,12 @@ package org.apache.cassandra.db.commitlog; -import java.io.File; import java.io.IOException; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Random; +import org.apache.cassandra.io.util.File; import org.junit.Assert; import org.junit.Test; import org.junit.runner.RunWith; diff --git a/test/unit/org/apache/cassandra/db/commitlog/CommitLogReaderTest.java b/test/unit/org/apache/cassandra/db/commitlog/CommitLogReaderTest.java index 794f99f47b28..83723c5c5bf4 100644 --- a/test/unit/org/apache/cassandra/db/commitlog/CommitLogReaderTest.java +++ b/test/unit/org/apache/cassandra/db/commitlog/CommitLogReaderTest.java @@ -17,11 +17,11 @@ */ package org.apache.cassandra.db.commitlog; -import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.List; +import org.apache.cassandra.io.util.File; import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; @@ -191,7 +191,7 @@ private void confirmReadOrder(TestCLRHandler handler, int offset) static ArrayList getCommitLogs() { File dir = new File(DatabaseDescriptor.getCommitLogLocation()); - File[] files = dir.listFiles(); + File[] files = dir.tryList(); ArrayList results = new ArrayList<>(); for (File f : files) { diff --git a/test/unit/org/apache/cassandra/db/commitlog/CommitLogSegmentManagerCDCTest.java b/test/unit/org/apache/cassandra/db/commitlog/CommitLogSegmentManagerCDCTest.java index 4128b7122ee6..cbfdadbb5fca 100644 --- a/test/unit/org/apache/cassandra/db/commitlog/CommitLogSegmentManagerCDCTest.java +++ b/test/unit/org/apache/cassandra/db/commitlog/CommitLogSegmentManagerCDCTest.java @@ -24,6 +24,8 @@ import java.nio.file.Path; import java.util.*; +import org.apache.cassandra.io.util.File; +import org.apache.cassandra.io.util.FileReader; import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; @@ -101,7 +103,7 @@ public void testCDCWriteFailure() throws Throwable Assert.assertTrue("Expected files to be moved to overflow.", getCDCRawCount() > 0); // Simulate a CDC consumer reading files then deleting them - for (File f : new File(DatabaseDescriptor.getCDCLogLocation()).listFiles()) + for (File f : new File(DatabaseDescriptor.getCDCLogLocation()).tryList()) FileUtils.deleteWithConfirm(f); // Update size tracker to reflect deleted files. Should flip flag on current allocatingFrom to allow. @@ -143,14 +145,14 @@ public void testSegmentFlaggingOnCreation() throws Throwable cdcMgr.awaitManagementTasksCompletion(); // Delete all files in cdc_raw - for (File f : new File(DatabaseDescriptor.getCDCLogLocation()).listFiles()) - f.delete(); + for (File f : new File(DatabaseDescriptor.getCDCLogLocation()).tryList()) + f.tryDelete(); cdcMgr.updateCDCTotalSize(); // Confirm cdc update process changes flag on active segment expectCurrentCDCState(CDCState.PERMITTED); // Clear out archived CDC files - for (File f : new File(DatabaseDescriptor.getCDCLogLocation()).listFiles()) { + for (File f : new File(DatabaseDescriptor.getCDCLogLocation()).tryList()) { FileUtils.deleteWithConfirm(f); } } @@ -234,7 +236,7 @@ public void testDeleteLinkOnDiscardNoCDC() throws Throwable CommitLogSegment currentSegment = CommitLog.instance.segmentManager.allocatingFrom(); // Confirm that, with no CDC data present, we've hard-linked but have no index file - Path linked = new File(DatabaseDescriptor.getCDCLogLocation(), currentSegment.logFile.getName()).toPath(); + Path linked = new File(DatabaseDescriptor.getCDCLogLocation(), currentSegment.logFile.name()).toPath(); File cdcIndexFile = currentSegment.getCDCIndexFile(); Assert.assertTrue("File does not exist: " + linked, Files.exists(linked)); Assert.assertFalse("Expected index file to not be created but found: " + cdcIndexFile, cdcIndexFile.exists()); @@ -262,7 +264,7 @@ public void testRetainLinkOnDiscardCDC() throws Throwable .add("data", randomizeBuffer(DatabaseDescriptor.getCommitLogSegmentSize() / 3)) .build().apply(); - Path linked = new File(DatabaseDescriptor.getCDCLogLocation(), currentSegment.logFile.getName()).toPath(); + Path linked = new File(DatabaseDescriptor.getCDCLogLocation(), currentSegment.logFile.name()).toPath(); // Confirm that, with CDC data present but not yet flushed, we've hard-linked but have no index file Assert.assertTrue("File does not exist: " + linked, Files.exists(linked)); @@ -310,13 +312,13 @@ public void testReplayLogic() throws IOException // Build up a list of expected index files after replay and then clear out cdc_raw List oldData = parseCDCIndexData(); - for (File f : new File(DatabaseDescriptor.getCDCLogLocation()).listFiles()) - FileUtils.deleteWithConfirm(f.getAbsolutePath()); + for (File f : new File(DatabaseDescriptor.getCDCLogLocation()).tryList()) + FileUtils.deleteWithConfirm(f.absolutePath()); try { Assert.assertEquals("Expected 0 files in CDC folder after deletion. ", - 0, new File(DatabaseDescriptor.getCDCLogLocation()).listFiles().length); + 0, new File(DatabaseDescriptor.getCDCLogLocation()).tryList().length); } finally { @@ -331,7 +333,7 @@ public void testReplayLogic() throws IOException // Rough sanity check -> should be files there now. Assert.assertTrue("Expected non-zero number of files in CDC folder after restart.", - new File(DatabaseDescriptor.getCDCLogLocation()).listFiles().length > 0); + new File(DatabaseDescriptor.getCDCLogLocation()).tryList().length > 0); // Confirm all the old indexes in old are present and >= the original offset, as we flag the entire segment // as cdc written on a replay. @@ -377,9 +379,9 @@ private List parseCDCIndexData() List results = new ArrayList<>(); try { - for (File f : new File(DatabaseDescriptor.getCDCLogLocation()).listFiles()) + for (File f : new File(DatabaseDescriptor.getCDCLogLocation()).tryList()) { - if (f.getName().contains("_cdc.idx")) + if (f.name().contains("_cdc.idx")) results.add(new CDCIndexData(f)); } } @@ -398,7 +400,7 @@ private static class CDCIndexData CDCIndexData(File f) throws IOException { String line = ""; - try (BufferedReader br = new BufferedReader(new InputStreamReader(new FileInputStream(f)))) + try (BufferedReader br = new BufferedReader(new FileReader(f))) { line = br.readLine(); } @@ -406,7 +408,7 @@ private static class CDCIndexData { throw e; } - fileName = f.getName(); + fileName = f.name(); offset = Integer.parseInt(line); } @@ -433,7 +435,7 @@ private ByteBuffer randomizeBuffer(int size) private int getCDCRawCount() { - return new File(DatabaseDescriptor.getCDCLogLocation()).listFiles().length; + return new File(DatabaseDescriptor.getCDCLogLocation()).tryList().length; } private void expectCurrentCDCState(CDCState expectedState) diff --git a/test/unit/org/apache/cassandra/db/commitlog/CommitLogTest.java b/test/unit/org/apache/cassandra/db/commitlog/CommitLogTest.java index c6a11aff0b8a..c0a70c66f374 100644 --- a/test/unit/org/apache/cassandra/db/commitlog/CommitLogTest.java +++ b/test/unit/org/apache/cassandra/db/commitlog/CommitLogTest.java @@ -19,6 +19,7 @@ package org.apache.cassandra.db.commitlog; import java.io.*; +import org.apache.cassandra.io.util.File; import java.math.BigInteger; import java.nio.ByteBuffer; import java.util.*; @@ -34,6 +35,7 @@ import com.google.common.collect.Iterables; import com.google.common.io.Files; +import org.apache.cassandra.io.util.FileOutputStreamPlus; import org.junit.*; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; @@ -210,7 +212,7 @@ public void testHeaderOnlyFileFiltering() throws Exception { Assume.assumeTrue(!DatabaseDescriptor.getEncryptionContext().isEnabled()); - File directory = Files.createTempDir(); + File directory = new File(Files.createTempDir()); CommitLogDescriptor desc1 = new CommitLogDescriptor(CommitLogDescriptor.current_version, 1, null, DatabaseDescriptor.getEncryptionContext()); CommitLogDescriptor desc2 = new CommitLogDescriptor(CommitLogDescriptor.current_version, 2, null, DatabaseDescriptor.getEncryptionContext()); @@ -227,7 +229,7 @@ public void testHeaderOnlyFileFiltering() throws Exception buffer.putInt(5); buffer.putInt(6); - try (OutputStream lout = new FileOutputStream(file1)) + try (OutputStream lout = new FileOutputStreamPlus(file1)) { lout.write(buffer.array()); } @@ -236,7 +238,7 @@ public void testHeaderOnlyFileFiltering() throws Exception File file2 = new File(directory, desc2.fileName()); buffer = ByteBuffer.allocate(1024); CommitLogDescriptor.writeHeader(buffer, desc2); - try (OutputStream lout = new FileOutputStream(file2)) + try (OutputStream lout = new FileOutputStreamPlus(file2)) { lout.write(buffer.array()); } @@ -571,7 +573,7 @@ protected Pair tmpFile() throws IOException File logFile = new File(DatabaseDescriptor.getCommitLogLocation(), desc.fileName()); - try (OutputStream lout = new FileOutputStream(logFile)) + try (OutputStream lout = new FileOutputStreamPlus(logFile)) { lout.write(buf.array(), 0, buf.limit()); } @@ -600,11 +602,11 @@ protected File tmpFile(int version) protected Void testRecovery(byte[] logData, int version) throws Exception { File logFile = tmpFile(version); - try (OutputStream lout = new FileOutputStream(logFile)) + try (OutputStream lout = new FileOutputStreamPlus(logFile)) { lout.write(logData); //statics make it annoying to test things correctly - CommitLog.instance.recover(logFile.getPath()); //CASSANDRA-1119 / CASSANDRA-1179 throw on failure*/ + CommitLog.instance.recover(logFile.path()); //CASSANDRA-1119 / CASSANDRA-1179 throw on failure*/ } return null; } @@ -612,17 +614,17 @@ protected Void testRecovery(byte[] logData, int version) throws Exception protected Void testRecovery(CommitLogDescriptor desc, byte[] logData) throws Exception { File logFile = tmpFile(desc.version); - CommitLogDescriptor fromFile = CommitLogDescriptor.fromFileName(logFile.getName()); + CommitLogDescriptor fromFile = CommitLogDescriptor.fromFileName(logFile.name()); // Change id to match file. desc = new CommitLogDescriptor(desc.version, fromFile.id, desc.compression, desc.getEncryptionContext()); ByteBuffer buf = ByteBuffer.allocate(1024); CommitLogDescriptor.writeHeader(buf, desc, getAdditionalHeaders(desc.getEncryptionContext())); - try (OutputStream lout = new FileOutputStream(logFile)) + try (OutputStream lout = new FileOutputStreamPlus(logFile)) { lout.write(buf.array(), 0, buf.position()); lout.write(logData); //statics make it annoying to test things correctly - CommitLog.instance.recover(logFile.getPath()); //CASSANDRA-1119 / CASSANDRA-1179 throw on failure*/ + CommitLog.instance.recover(logFile.path()); //CASSANDRA-1119 / CASSANDRA-1179 throw on failure*/ } return null; } @@ -634,12 +636,12 @@ public void testRecoveryWithIdMismatch() throws Exception File logFile = tmpFile(desc.version); ByteBuffer buf = ByteBuffer.allocate(1024); CommitLogDescriptor.writeHeader(buf, desc); - try (OutputStream lout = new FileOutputStream(logFile)) + try (OutputStream lout = new FileOutputStreamPlus(logFile)) { lout.write(buf.array(), 0, buf.position()); runExpecting(() -> { - CommitLog.instance.recover(logFile.getPath()); //CASSANDRA-1119 / CASSANDRA-1179 throw on failure*/ + CommitLog.instance.recover(logFile.path()); //CASSANDRA-1119 / CASSANDRA-1179 throw on failure*/ return null; }, CommitLogReplayException.class); } @@ -770,7 +772,7 @@ public void replaySimple() throws IOException List activeSegments = CommitLog.instance.getActiveSegmentNames(); assertFalse(activeSegments.isEmpty()); - File[] files = new File(CommitLog.instance.segmentManager.storageDirectory).listFiles((file, name) -> activeSegments.contains(name)); + File[] files = new File(CommitLog.instance.segmentManager.storageDirectory).tryList((file, name) -> activeSegments.contains(name)); replayer.replayFiles(files); assertEquals(cellCount, replayer.cells); @@ -791,7 +793,7 @@ public void replayWithBadSyncMarkerCRC() throws IOException assertFalse(activeSegments.isEmpty()); File directory = new File(CommitLog.instance.segmentManager.storageDirectory); - File firstActiveFile = Objects.requireNonNull(directory.listFiles((file, name) -> activeSegments.contains(name)))[0]; + File firstActiveFile = Objects.requireNonNull(directory.tryList((file, name) -> activeSegments.contains(name)))[0]; zeroFirstSyncMarkerCRC(firstActiveFile); CommitLogSegmentReader.setAllowSkipSyncMarkerCrc(true); @@ -838,7 +840,7 @@ private void zeroFirstSyncMarkerCRC(File file) throws IOException buffer.putInt(0); // ...and write the file back out. - try (OutputStream out = new FileOutputStream(file)) + try (OutputStream out = new FileOutputStreamPlus(file)) { out.write(buffer.array()); } @@ -875,7 +877,7 @@ public void replayWithDiscard() throws IOException List activeSegments = CommitLog.instance.getActiveSegmentNames(); assertFalse(activeSegments.isEmpty()); - File[] files = new File(CommitLog.instance.segmentManager.storageDirectory).listFiles((file, name) -> activeSegments.contains(name)); + File[] files = new File(CommitLog.instance.segmentManager.storageDirectory).tryList((file, name) -> activeSegments.contains(name)); replayer.replayFiles(files); assertEquals(cellCount, replayer.cells); diff --git a/test/unit/org/apache/cassandra/db/commitlog/CommitLogTestReplayer.java b/test/unit/org/apache/cassandra/db/commitlog/CommitLogTestReplayer.java index 5b87d687813e..0519af925c80 100644 --- a/test/unit/org/apache/cassandra/db/commitlog/CommitLogTestReplayer.java +++ b/test/unit/org/apache/cassandra/db/commitlog/CommitLogTestReplayer.java @@ -18,7 +18,7 @@ */ package org.apache.cassandra.db.commitlog; -import java.io.File; +import org.apache.cassandra.io.util.File; import java.io.IOException; import com.google.common.base.Predicate; @@ -48,7 +48,7 @@ public CommitLogTestReplayer(Predicate processor) throws IOException public void examineCommitLog() throws IOException { - replayFiles(new File(DatabaseDescriptor.getCommitLogLocation()).listFiles()); + replayFiles(new File(DatabaseDescriptor.getCommitLogLocation()).tryList()); } private class CommitLogTestReader extends CommitLogReader diff --git a/test/unit/org/apache/cassandra/db/commitlog/CommitLogUpgradeTest.java b/test/unit/org/apache/cassandra/db/commitlog/CommitLogUpgradeTest.java index 47059193bdd1..5747a381b3ec 100644 --- a/test/unit/org/apache/cassandra/db/commitlog/CommitLogUpgradeTest.java +++ b/test/unit/org/apache/cassandra/db/commitlog/CommitLogUpgradeTest.java @@ -25,6 +25,8 @@ import java.nio.ByteBuffer; import java.util.Properties; +import org.apache.cassandra.io.util.File; +import org.apache.cassandra.io.util.FileInputStreamPlus; import org.junit.Assert; import com.google.common.base.Predicate; @@ -116,7 +118,7 @@ public static void initialize() public void testRestore(String location) throws IOException, InterruptedException { Properties prop = new Properties(); - prop.load(new FileInputStream(new File(location + File.separatorChar + PROPERTIES_FILE))); + prop.load(new FileInputStreamPlus(new File(location + File.pathSeparator() + PROPERTIES_FILE))); int hash = Integer.parseInt(prop.getProperty(HASH_PROPERTY)); int cells = Integer.parseInt(prop.getProperty(CELLS_PROPERTY)); @@ -130,7 +132,7 @@ public void testRestore(String location) throws IOException, InterruptedExceptio Hasher hasher = new Hasher(); CommitLogTestReplayer replayer = new CommitLogTestReplayer(hasher); - File[] files = new File(location).listFiles((file, name) -> name.endsWith(".log")); + File[] files = new File(location).tryList((file, name) -> name.endsWith(".log")); replayer.replayFiles(files); Assert.assertEquals(cells, hasher.cells); diff --git a/test/unit/org/apache/cassandra/db/commitlog/CommitLogUpgradeTestMaker.java b/test/unit/org/apache/cassandra/db/commitlog/CommitLogUpgradeTestMaker.java index 680a0e7f6c4b..b067faa13c37 100644 --- a/test/unit/org/apache/cassandra/db/commitlog/CommitLogUpgradeTestMaker.java +++ b/test/unit/org/apache/cassandra/db/commitlog/CommitLogUpgradeTestMaker.java @@ -33,6 +33,9 @@ import java.util.concurrent.atomic.AtomicLong; import com.google.common.util.concurrent.RateLimiter; +import org.apache.cassandra.io.util.File; +import org.apache.cassandra.io.util.FileInputStreamPlus; +import org.apache.cassandra.io.util.FileOutputStreamPlus; import org.junit.Assert; import org.apache.cassandra.SchemaLoader; @@ -82,7 +85,7 @@ public static void main(String[] args) throws Exception static public void initialize() throws IOException, ConfigurationException { - try (FileInputStream fis = new FileInputStream("CHANGES.txt")) + try (FileInputStreamPlus fis = new FileInputStreamPlus("CHANGES.txt")) { dataSource = ByteBuffer.allocateDirect((int) fis.getChannel().size()); while (dataSource.hasRemaining()) @@ -128,15 +131,15 @@ public void makeLog() throws IOException, InterruptedException if (dataDir.exists()) FileUtils.deleteRecursive(dataDir); - dataDir.mkdirs(); - for (File f : new File(DatabaseDescriptor.getCommitLogLocation()).listFiles()) - FileUtils.createHardLink(f, new File(dataDir, f.getName())); + dataDir.tryCreateDirectories(); + for (File f : new File(DatabaseDescriptor.getCommitLogLocation()).tryList()) + FileUtils.createHardLink(f, new File(dataDir, f.name())); Properties prop = new Properties(); prop.setProperty(CFID_PROPERTY, Schema.instance.getTableMetadata(KEYSPACE, TABLE).id.toString()); prop.setProperty(CELLS_PROPERTY, Integer.toString(cells)); prop.setProperty(HASH_PROPERTY, Integer.toString(hash)); - prop.store(new FileOutputStream(new File(dataDir, PROPERTIES_FILE)), + prop.store(new FileOutputStreamPlus(new File(dataDir, PROPERTIES_FILE)), "CommitLog upgrade test, version " + FBUtilities.getReleaseVersionString()); System.out.println("Done"); } diff --git a/test/unit/org/apache/cassandra/db/commitlog/CommitlogShutdownTest.java b/test/unit/org/apache/cassandra/db/commitlog/CommitlogShutdownTest.java index 390a6e97df16..e962450a80b1 100644 --- a/test/unit/org/apache/cassandra/db/commitlog/CommitlogShutdownTest.java +++ b/test/unit/org/apache/cassandra/db/commitlog/CommitlogShutdownTest.java @@ -18,11 +18,11 @@ package org.apache.cassandra.db.commitlog; -import java.io.File; import java.nio.ByteBuffer; import java.util.Random; import com.google.common.collect.ImmutableMap; +import org.apache.cassandra.io.util.File; import org.junit.Assert; import org.junit.Test; import org.junit.runner.RunWith; @@ -93,6 +93,6 @@ public void testShutdownWithPendingTasks() throws Exception CommitLog.instance.shutdownBlocking(); // the shutdown should block until all logs except the currently active one and perhaps a new, empty one are gone - Assert.assertTrue(new File(DatabaseDescriptor.getCommitLogLocation()).listFiles().length <= 2); + Assert.assertTrue(new File(DatabaseDescriptor.getCommitLogLocation()).tryList().length <= 2); } } diff --git a/test/unit/org/apache/cassandra/db/commitlog/SegmentReaderTest.java b/test/unit/org/apache/cassandra/db/commitlog/SegmentReaderTest.java index ce209351fa62..416675907028 100644 --- a/test/unit/org/apache/cassandra/db/commitlog/SegmentReaderTest.java +++ b/test/unit/org/apache/cassandra/db/commitlog/SegmentReaderTest.java @@ -17,10 +17,8 @@ */ package org.apache.cassandra.db.commitlog; -import java.io.File; -import java.io.FileOutputStream; +import org.apache.cassandra.io.util.*; import java.io.IOException; -import java.io.RandomAccessFile; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; import java.util.Collections; @@ -42,9 +40,6 @@ import org.apache.cassandra.io.compress.LZ4Compressor; import org.apache.cassandra.io.compress.SnappyCompressor; import org.apache.cassandra.io.compress.ZstdCompressor; -import org.apache.cassandra.io.util.FileDataInput; -import org.apache.cassandra.io.util.FileUtils; -import org.apache.cassandra.io.util.RandomAccessReader; import org.apache.cassandra.security.CipherFactory; import org.apache.cassandra.security.EncryptionUtils; import org.apache.cassandra.security.EncryptionContext; @@ -103,7 +98,7 @@ private void compressedSegmenter(ICompressor compressor) throws IOException File compressedFile = FileUtils.createTempFile("compressed-segment-", ".log"); compressedFile.deleteOnExit(); - FileOutputStream fos = new FileOutputStream(compressedFile); + FileOutputStreamPlus fos = new FileOutputStreamPlus(compressedFile); fos.getChannel().write(compBuffer); fos.close(); @@ -190,7 +185,7 @@ public void underlyingEncryptedSegmenterTest(BiFunction(sstableOld.getAllFilePaths())); + assertFiles(dataFolder.path(), new HashSet<>(sstableOld.getAllFilePaths())); } void assertCommitted() throws Exception { - assertFiles(dataFolder.getPath(), new HashSet<>(sstableNew.getAllFilePaths())); + assertFiles(dataFolder.path(), new HashSet<>(sstableNew.getAllFilePaths())); } } @@ -218,7 +218,7 @@ public void testUntrack() throws Throwable Thread.sleep(1); LogTransaction.waitForDeletions(); - assertFiles(dataFolder.getPath(), Collections.emptySet()); + assertFiles(dataFolder.path(), Collections.emptySet()); } @Test @@ -247,7 +247,7 @@ public void testCommitSameDesc() throws Throwable sstableOld1.selfRef().release(); sstableOld2.selfRef().release(); - assertFiles(dataFolder.getPath(), new HashSet<>(sstableNew.getAllFilePaths())); + assertFiles(dataFolder.path(), new HashSet<>(sstableNew.getAllFilePaths())); sstableNew.selfRef().release(); } @@ -265,7 +265,7 @@ public void testCommitOnlyNew() throws Throwable log.trackNew(sstable); log.finish(); - assertFiles(dataFolder.getPath(), new HashSet<>(sstable.getAllFilePaths())); + assertFiles(dataFolder.path(), new HashSet<>(sstable.getAllFilePaths())); sstable.selfRef().release(); } @@ -287,7 +287,7 @@ public void testCommitOnlyOld() throws Throwable sstable.markObsolete(tidier); sstable.selfRef().release(); - assertFiles(dataFolder.getPath(), new HashSet<>()); + assertFiles(dataFolder.path(), new HashSet<>()); } @Test @@ -323,8 +323,8 @@ public void testCommitMultipleFolders() throws Throwable Arrays.stream(sstables).forEach(s -> s.selfRef().release()); LogTransaction.waitForDeletions(); - assertFiles(dataFolder1.getPath(), new HashSet<>(sstables[1].getAllFilePaths())); - assertFiles(dataFolder2.getPath(), new HashSet<>(sstables[3].getAllFilePaths())); + assertFiles(dataFolder1.path(), new HashSet<>(sstables[1].getAllFilePaths())); + assertFiles(dataFolder2.path(), new HashSet<>(sstables[3].getAllFilePaths())); } @Test @@ -342,7 +342,7 @@ public void testAbortOnlyNew() throws Throwable sstable.selfRef().release(); - assertFiles(dataFolder.getPath(), new HashSet<>()); + assertFiles(dataFolder.path(), new HashSet<>()); } @Test @@ -363,7 +363,7 @@ public void testAbortOnlyOld() throws Throwable sstable.selfRef().release(); - assertFiles(dataFolder.getPath(), new HashSet<>(sstable.getAllFilePaths())); + assertFiles(dataFolder.path(), new HashSet<>(sstable.getAllFilePaths())); } @Test @@ -397,8 +397,8 @@ public void testAbortMultipleFolders() throws Throwable Arrays.stream(sstables).forEach(s -> s.selfRef().release()); LogTransaction.waitForDeletions(); - assertFiles(dataFolder1.getPath(), new HashSet<>(sstables[0].getAllFilePaths())); - assertFiles(dataFolder2.getPath(), new HashSet<>(sstables[2].getAllFilePaths())); + assertFiles(dataFolder1.path(), new HashSet<>(sstables[0].getAllFilePaths())); + assertFiles(dataFolder2.path(), new HashSet<>(sstables[2].getAllFilePaths())); } @@ -432,7 +432,7 @@ public void testRemoveUnfinishedLeftovers_abort() throws Throwable Map> sstables = directories.sstableLister(Directories.OnTxnErr.THROW).list(); assertEquals(1, sstables.size()); - assertFiles(dataFolder.getPath(), new HashSet<>(sstableOld.getAllFilePaths())); + assertFiles(dataFolder.path(), new HashSet<>(sstableOld.getAllFilePaths())); // complete the transaction before releasing files tidier.run(); @@ -472,7 +472,7 @@ public void testRemoveUnfinishedLeftovers_commit() throws Throwable Map> sstables = directories.sstableLister(Directories.OnTxnErr.THROW).list(); assertEquals(1, sstables.size()); - assertFiles(dataFolder.getPath(), new HashSet<>(sstableNew.getAllFilePaths())); + assertFiles(dataFolder.path(), new HashSet<>(sstableNew.getAllFilePaths())); // complete the transaction to avoid LEAK errors tidier.run(); @@ -522,8 +522,8 @@ public void testRemoveUnfinishedLeftovers_commit_multipleFolders() throws Throwa assertTrue(LogTransaction.removeUnfinishedLeftovers(Arrays.asList(dataFolder1, dataFolder2))); // new tables should be only table left - assertFiles(dataFolder1.getPath(), new HashSet<>(sstables[1].getAllFilePaths())); - assertFiles(dataFolder2.getPath(), new HashSet<>(sstables[3].getAllFilePaths())); + assertFiles(dataFolder1.path(), new HashSet<>(sstables[1].getAllFilePaths())); + assertFiles(dataFolder2.path(), new HashSet<>(sstables[3].getAllFilePaths())); // complete the transaction to avoid LEAK errors Arrays.stream(tidiers).forEach(LogTransaction.SSTableTidier::run); @@ -573,8 +573,8 @@ public void testRemoveUnfinishedLeftovers_abort_multipleFolders() throws Throwab assertTrue(LogTransaction.removeUnfinishedLeftovers(Arrays.asList(dataFolder1, dataFolder2))); // old tables should be only table left - assertFiles(dataFolder1.getPath(), new HashSet<>(sstables[0].getAllFilePaths())); - assertFiles(dataFolder2.getPath(), new HashSet<>(sstables[2].getAllFilePaths())); + assertFiles(dataFolder1.path(), new HashSet<>(sstables[0].getAllFilePaths())); + assertFiles(dataFolder2.path(), new HashSet<>(sstables[2].getAllFilePaths())); // complete the transaction to avoid LEAK errors Arrays.stream(tidiers).forEach(LogTransaction.SSTableTidier::run); @@ -742,18 +742,18 @@ private static void testRemoveUnfinishedLeftovers_multipleFolders_errorCondition if (shouldCommit) { // only new sstables should still be there - assertFiles(dataFolder1.getPath(), new HashSet<>(sstables[1].getAllFilePaths())); - assertFiles(dataFolder2.getPath(), new HashSet<>(sstables[3].getAllFilePaths())); + assertFiles(dataFolder1.path(), new HashSet<>(sstables[1].getAllFilePaths())); + assertFiles(dataFolder2.path(), new HashSet<>(sstables[3].getAllFilePaths())); } else { // all files should still be there - assertFiles(dataFolder1.getPath(), Sets.newHashSet(Iterables.concat(sstables[0].getAllFilePaths(), - sstables[1].getAllFilePaths(), - Collections.singleton(log.logFilePaths().get(0))))); - assertFiles(dataFolder2.getPath(), Sets.newHashSet(Iterables.concat(sstables[2].getAllFilePaths(), - sstables[3].getAllFilePaths(), - Collections.singleton(log.logFilePaths().get(1))))); + assertFiles(dataFolder1.path(), Sets.newHashSet(Iterables.concat(sstables[0].getAllFilePaths(), + sstables[1].getAllFilePaths(), + Collections.singleton(log.logFilePaths().get(0))))); + assertFiles(dataFolder2.path(), Sets.newHashSet(Iterables.concat(sstables[2].getAllFilePaths(), + sstables[3].getAllFilePaths(), + Collections.singleton(log.logFilePaths().get(1))))); } @@ -778,7 +778,7 @@ public void testGetTemporaryFiles() throws IOException { Directories directories = new Directories(cfs.metadata()); - File[] beforeSecondSSTable = dataFolder.listFiles(pathname -> !pathname.isDirectory()); + File[] beforeSecondSSTable = dataFolder.tryList(pathname -> !pathname.isDirectory()); SSTableReader sstable2 = sstable(dataFolder, cfs, 1, 128); log.trackNew(sstable2); @@ -787,7 +787,7 @@ public void testGetTemporaryFiles() throws IOException assertEquals(2, sstables.size()); // this should contain sstable1, sstable2 and the transaction log file - File[] afterSecondSSTable = dataFolder.listFiles(pathname -> !pathname.isDirectory()); + File[] afterSecondSSTable = dataFolder.tryList(pathname -> !pathname.isDirectory()); int numNewFiles = afterSecondSSTable.length - beforeSecondSSTable.length; assertEquals(numNewFiles - 1, sstable2.getAllFilePaths().size()); // new files except for transaction log file @@ -1032,7 +1032,7 @@ private static void testCorruptRecord(BiConsumer LogTransaction.removeUnfinishedLeftovers(cfs.metadata()); // make sure to exclude the old files that were deleted by the modifier - assertFiles(dataFolder.getPath(), oldFiles); + assertFiles(dataFolder.path(), oldFiles); } else { // if an intermediate line was also modified, it should ignore the tx log file @@ -1040,9 +1040,9 @@ private static void testCorruptRecord(BiConsumer //This should not remove any files LogTransaction.removeUnfinishedLeftovers(cfs.metadata()); - assertFiles(dataFolder.getPath(), Sets.newHashSet(Iterables.concat(newFiles, - oldFiles, - log.logFilePaths()))); + assertFiles(dataFolder.path(), Sets.newHashSet(Iterables.concat(newFiles, + oldFiles, + log.logFilePaths()))); } // make sure to run the tidier to avoid any leaks in the logs @@ -1058,7 +1058,7 @@ public void testObsoletedDataFileUpdateTimeChanged() throws IOException for (String filePath : sstable.getAllFilePaths()) { if (filePath.endsWith("Data.db")) - assertTrue(new File(filePath).setLastModified(System.currentTimeMillis() + 60000)); //one minute later + assertTrue(new File(filePath).trySetLastModified(System.currentTimeMillis() + 60000)); //one minute later } }); } @@ -1086,7 +1086,7 @@ private static void testObsoletedFilesChanged(Consumer modifier) //This should not remove the old files LogTransaction.removeUnfinishedLeftovers(cfs.metadata()); - assertFiles(dataFolder.getPath(), Sets.newHashSet(Iterables.concat( + assertFiles(dataFolder.path(), Sets.newHashSet(Iterables.concat( sstableNew.getAllFilePaths(), sstableOld.getAllFilePaths(), log.logFilePaths()))); @@ -1097,9 +1097,9 @@ private static void testObsoletedFilesChanged(Consumer modifier) // complete the transaction to avoid LEAK errors assertNull(log.complete(null)); - assertFiles(dataFolder.getPath(), Sets.newHashSet(Iterables.concat(sstableNew.getAllFilePaths(), - sstableOld.getAllFilePaths(), - log.logFilePaths()))); + assertFiles(dataFolder.path(), Sets.newHashSet(Iterables.concat(sstableNew.getAllFilePaths(), + sstableOld.getAllFilePaths(), + log.logFilePaths()))); // make sure to run the tidier to avoid any leaks in the logs tidier.run(); @@ -1118,7 +1118,7 @@ public void testTruncateFileUpdateTime() throws IOException { File f = new File(filePath); long lastModified = f.lastModified(); - f.setLastModified(lastModified - (lastModified % 1000)); + f.trySetLastModified(lastModified - (lastModified % 1000)); } }); } @@ -1146,13 +1146,13 @@ private static void testTruncatedModificationTimesHelper(Consumer LogTransaction.removeUnfinishedLeftovers(cfs.metadata()); // only the new files should be there - assertFiles(dataFolder.getPath(), Sets.newHashSet(sstableNew.getAllFilePaths())); + assertFiles(dataFolder.path(), Sets.newHashSet(sstableNew.getAllFilePaths())); sstableNew.selfRef().release(); // complete the transaction to avoid LEAK errors assertNull(log.complete(null)); - assertFiles(dataFolder.getPath(), Sets.newHashSet(sstableNew.getAllFilePaths())); + assertFiles(dataFolder.path(), Sets.newHashSet(sstableNew.getAllFilePaths())); // make sure to run the tidier to avoid any leaks in the logs tidier.run(); @@ -1224,8 +1224,9 @@ private static SSTableReader sstable(File dataFolder, ColumnFamilyStore cfs, int { File file = new File(descriptor.filenameFor(component)); if (!file.exists()) - assertTrue(file.createNewFile()); - try (RandomAccessFile raf = new RandomAccessFile(file, "rw")) + assertTrue(file.createFileIfNotExists()); + + try (RandomAccessFile raf = new RandomAccessFile(file.toJavaIOFile(), "rw")) { raf.setLength(size); } @@ -1262,8 +1263,8 @@ private static void assertFiles(String dirPath, Set expectedFiles, boole { LogTransaction.waitForDeletions(); - File dir = new File(dirPath).getCanonicalFile(); - File[] files = dir.listFiles(); + File dir = new File(dirPath).toCanonical(); + File[] files = dir.tryList(); if (files != null) { for (File file : files) @@ -1271,7 +1272,7 @@ private static void assertFiles(String dirPath, Set expectedFiles, boole if (file.isDirectory()) continue; - String filePath = file.getPath(); + String filePath = file.path(); assertTrue(String.format("%s not in [%s]", filePath, expectedFiles), expectedFiles.contains(filePath)); expectedFiles.remove(filePath); } @@ -1328,16 +1329,7 @@ static Set listFiles(File folder, Directories.FileType... types) (file, type) -> match.contains(type), Directories.OnTxnErr.IGNORE).list() .stream() - .map(f -> { - try - { - return f.getCanonicalFile(); - } - catch (IOException e) - { - throw new IOError(e); - } - }) + .map(File::toCanonical) .collect(Collectors.toSet()); } } diff --git a/test/unit/org/apache/cassandra/db/lifecycle/RealTransactionsTest.java b/test/unit/org/apache/cassandra/db/lifecycle/RealTransactionsTest.java index 01bfaaee7e44..0420957b9dbd 100644 --- a/test/unit/org/apache/cassandra/db/lifecycle/RealTransactionsTest.java +++ b/test/unit/org/apache/cassandra/db/lifecycle/RealTransactionsTest.java @@ -18,13 +18,13 @@ package org.apache.cassandra.db.lifecycle; -import java.io.File; import java.io.IOException; import java.util.HashSet; import java.util.List; import java.util.Set; import java.util.concurrent.TimeUnit; +import org.apache.cassandra.io.util.File; import org.junit.BeforeClass; import org.junit.Test; @@ -85,8 +85,8 @@ public void testRewriteFinished() throws IOException LogTransaction.waitForDeletions(); // both sstables are in the same folder - assertFiles(oldSSTable.descriptor.directory.getPath(), new HashSet<>(newSSTable.getAllFilePaths())); - assertFiles(newSSTable.descriptor.directory.getPath(), new HashSet<>(newSSTable.getAllFilePaths())); + assertFiles(oldSSTable.descriptor.directory.path(), new HashSet<>(newSSTable.getAllFilePaths())); + assertFiles(newSSTable.descriptor.directory.path(), new HashSet<>(newSSTable.getAllFilePaths())); } @Test @@ -101,7 +101,7 @@ public void testRewriteAborted() throws IOException replaceSSTable(cfs, txn, true); LogTransaction.waitForDeletions(); - assertFiles(oldSSTable.descriptor.directory.getPath(), new HashSet<>(oldSSTable.getAllFilePaths())); + assertFiles(oldSSTable.descriptor.directory.path(), new HashSet<>(oldSSTable.getAllFilePaths())); } @Test @@ -112,7 +112,7 @@ public void testFlush() throws IOException SSTableReader ssTableReader = getSSTable(cfs, 100); - String dataFolder = cfs.getLiveSSTables().iterator().next().descriptor.directory.getPath(); + String dataFolder = cfs.getLiveSSTables().iterator().next().descriptor.directory.path(); assertFiles(dataFolder, new HashSet<>(ssTableReader.getAllFilePaths())); } @@ -202,12 +202,12 @@ private SSTableReader replaceSSTable(ColumnFamilyStore cfs, LifecycleTransaction private void assertFiles(String dirPath, Set expectedFiles) { File dir = new File(dirPath); - for (File file : dir.listFiles()) + for (File file : dir.tryList()) { if (file.isDirectory()) continue; - String filePath = file.getPath(); + String filePath = file.path(); assertTrue(filePath, expectedFiles.contains(filePath)); expectedFiles.remove(filePath); } diff --git a/test/unit/org/apache/cassandra/db/marshal/TimestampTypeTest.java b/test/unit/org/apache/cassandra/db/marshal/TimestampTypeTest.java index b34207f203b8..3ec63c04c842 100644 --- a/test/unit/org/apache/cassandra/db/marshal/TimestampTypeTest.java +++ b/test/unit/org/apache/cassandra/db/marshal/TimestampTypeTest.java @@ -42,4 +42,4 @@ public void stringProperty() .isEqualTo(buffer); }); } -} \ No newline at end of file +} diff --git a/test/unit/org/apache/cassandra/db/rows/UnfilteredRowsGenerator.java b/test/unit/org/apache/cassandra/db/rows/UnfilteredRowsGenerator.java index 2804733e3f54..c6deb2440bce 100644 --- a/test/unit/org/apache/cassandra/db/rows/UnfilteredRowsGenerator.java +++ b/test/unit/org/apache/cassandra/db/rows/UnfilteredRowsGenerator.java @@ -337,4 +337,4 @@ public void dumpList(List list) { System.out.println(str(list)); } -} \ No newline at end of file +} diff --git a/test/unit/org/apache/cassandra/dht/LengthPartitioner.java b/test/unit/org/apache/cassandra/dht/LengthPartitioner.java index bd6f3d4e0eae..c4e5db82849f 100644 --- a/test/unit/org/apache/cassandra/dht/LengthPartitioner.java +++ b/test/unit/org/apache/cassandra/dht/LengthPartitioner.java @@ -172,4 +172,4 @@ public AbstractType partitionOrdering() { return new PartitionerDefinedOrder(this); } -} \ No newline at end of file +} diff --git a/test/unit/org/apache/cassandra/fql/FullQueryLoggerTest.java b/test/unit/org/apache/cassandra/fql/FullQueryLoggerTest.java index e5a5d86586a2..0b9230bfd128 100644 --- a/test/unit/org/apache/cassandra/fql/FullQueryLoggerTest.java +++ b/test/unit/org/apache/cassandra/fql/FullQueryLoggerTest.java @@ -17,7 +17,6 @@ */ package org.apache.cassandra.fql; -import java.io.File; import java.nio.ByteBuffer; import java.nio.file.Path; import java.util.ArrayList; @@ -30,6 +29,7 @@ import javax.annotation.Nullable; +import org.apache.cassandra.io.util.File; import org.apache.commons.lang3.StringUtils; import org.junit.After; import org.junit.BeforeClass; @@ -132,42 +132,42 @@ public void testConfigureOverExistingFile() @Test(expected = IllegalArgumentException.class) public void testCanRead() throws Exception { - tempDir.toFile().setReadable(false); + new File(tempDir).trySetReadable(false); try { configureFQL(); } finally { - tempDir.toFile().setReadable(true); + new File(tempDir).trySetReadable(true); } } @Test(expected = IllegalArgumentException.class) public void testCanWrite() throws Exception { - tempDir.toFile().setWritable(false); + new File(tempDir).trySetWritable(false); try { configureFQL(); } finally { - tempDir.toFile().setWritable(true); + new File(tempDir).trySetWritable(true); } } @Test(expected = IllegalArgumentException.class) public void testCanExecute() throws Exception { - tempDir.toFile().setExecutable(false); + new File(tempDir).trySetExecutable(false); try { configureFQL(); } finally { - tempDir.toFile().setExecutable(true); + new File(tempDir).trySetExecutable(true); } } @@ -192,10 +192,10 @@ public void stopWithoutConfigure() throws Exception public void testResetCleansPaths() throws Exception { configureFQL(); - File tempA = File.createTempFile("foo", "bar", tempDir.toFile()); + File tempA = FileUtils.createTempFile("foo", "bar", new File(tempDir)); assertTrue(tempA.exists()); - File tempB = File.createTempFile("foo", "bar", BinLogTest.tempDir().toFile()); - FullQueryLogger.instance.reset(tempB.getParent()); + File tempB = FileUtils.createTempFile("foo", "bar", new File(BinLogTest.tempDir())); + FullQueryLogger.instance.reset(tempB.parentPath()); assertFalse(tempA.exists()); assertFalse(tempB.exists()); } @@ -207,9 +207,9 @@ public void testResetCleansPaths() throws Exception public void testResetSamePath() throws Exception { configureFQL(); - File tempA = File.createTempFile("foo", "bar", tempDir.toFile()); + File tempA = FileUtils.createTempFile("foo", "bar", new File(tempDir)); assertTrue(tempA.exists()); - FullQueryLogger.instance.reset(tempA.getParent()); + FullQueryLogger.instance.reset(tempA.parentPath()); assertFalse(tempA.exists()); } @@ -223,10 +223,10 @@ public void testDoubleConfigure() throws Exception @Test public void testCleansDirectory() throws Exception { - assertTrue(new File(tempDir.toFile(), "foobar").createNewFile()); + assertTrue(new File(tempDir, "foobar").createFileIfNotExists()); configureFQL(); - assertEquals(tempDir.toFile().listFiles().length, 1); - assertEquals("metadata.cq4t", tempDir.toFile().listFiles()[0].getName()); + assertEquals(new File(tempDir).tryList().length, 1); + assertEquals("metadata.cq4t", new File(tempDir).tryList()[0].name()); } @Test diff --git a/test/unit/org/apache/cassandra/gms/SerializationsTest.java b/test/unit/org/apache/cassandra/gms/SerializationsTest.java index 90ce10ba0dff..d8511fe2a3d8 100644 --- a/test/unit/org/apache/cassandra/gms/SerializationsTest.java +++ b/test/unit/org/apache/cassandra/gms/SerializationsTest.java @@ -24,6 +24,7 @@ import org.apache.cassandra.dht.Token; import org.apache.cassandra.io.util.DataInputPlus.DataInputStreamPlus; import org.apache.cassandra.io.util.DataOutputStreamPlus; +import org.apache.cassandra.io.util.FileInputStreamPlus; import org.apache.cassandra.locator.InetAddressAndPort; import org.apache.cassandra.service.StorageService; import org.apache.cassandra.utils.FBUtilities; @@ -68,7 +69,7 @@ public void testEndpointStateRead() throws IOException if (EXECUTE_WRITES) testEndpointStateWrite(); - DataInputStreamPlus in = getInput("gms.EndpointState.bin"); + FileInputStreamPlus in = getInput("gms.EndpointState.bin"); assert HeartBeatState.serializer.deserialize(in, getVersion()) != null; assert EndpointState.serializer.deserialize(in, getVersion()) != null; assert VersionedValue.serializer.deserialize(in, getVersion()) != null; @@ -110,7 +111,7 @@ public void testGossipDigestRead() throws IOException testGossipDigestWrite(); int count = 0; - DataInputStreamPlus in = getInput("gms.Gossip.bin"); + FileInputStreamPlus in = getInput("gms.Gossip.bin"); while (count < Statics.Digests.size()) assert GossipDigestAck2.serializer.deserialize(in, getVersion()) != null; assert GossipDigestAck.serializer.deserialize(in, getVersion()) != null; diff --git a/test/unit/org/apache/cassandra/hints/AlteredHints.java b/test/unit/org/apache/cassandra/hints/AlteredHints.java index 9b8e32f2fd21..0379c41b0c19 100644 --- a/test/unit/org/apache/cassandra/hints/AlteredHints.java +++ b/test/unit/org/apache/cassandra/hints/AlteredHints.java @@ -17,7 +17,6 @@ */ package org.apache.cassandra.hints; -import java.io.File; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Iterator; @@ -28,6 +27,7 @@ import com.google.common.collect.ImmutableMap; import com.google.common.io.Files; +import org.apache.cassandra.io.util.File; import org.junit.Assert; import org.junit.BeforeClass; @@ -85,7 +85,7 @@ public void multiFlushAndDeserializeTest() throws Exception long ts = System.currentTimeMillis(); HintsDescriptor descriptor = new HintsDescriptor(hostId, ts, params()); - File dir = Files.createTempDir(); + File dir = new File(Files.createTempDir()); try (HintsWriter writer = HintsWriter.create(dir, descriptor)) { Assert.assertTrue(looksLegit(writer)); diff --git a/test/unit/org/apache/cassandra/hints/ChecksummedDataInputTest.java b/test/unit/org/apache/cassandra/hints/ChecksummedDataInputTest.java index 9f4cdfb92f5b..4642584005f3 100644 --- a/test/unit/org/apache/cassandra/hints/ChecksummedDataInputTest.java +++ b/test/unit/org/apache/cassandra/hints/ChecksummedDataInputTest.java @@ -17,13 +17,13 @@ */ package org.apache.cassandra.hints; -import java.io.File; import java.io.IOException; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; import java.util.Arrays; import java.util.zip.CRC32; +import org.apache.cassandra.io.util.File; import org.junit.BeforeClass; import org.junit.Test; diff --git a/test/unit/org/apache/cassandra/hints/HintWriteTTLTest.java b/test/unit/org/apache/cassandra/hints/HintWriteTTLTest.java index 21dbd7e8c377..22f29cb46e3c 100644 --- a/test/unit/org/apache/cassandra/hints/HintWriteTTLTest.java +++ b/test/unit/org/apache/cassandra/hints/HintWriteTTLTest.java @@ -18,7 +18,6 @@ package org.apache.cassandra.hints; -import java.io.File; import java.io.IOException; import java.nio.ByteBuffer; import java.nio.file.Files; @@ -27,6 +26,7 @@ import java.util.List; import java.util.concurrent.TimeUnit; +import org.apache.cassandra.io.util.File; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; @@ -94,7 +94,7 @@ public static void setupClass() throws Exception ttldHint = makeHint(tbm, 2, nowInSeconds - (TTL + 1), GC_GRACE); - File directory = Files.createTempDirectory(null).toFile(); + File directory = new File(Files.createTempDirectory(null)); HintsDescriptor descriptor = new HintsDescriptor(UUIDGen.getTimeUUID(), s2m(nowInSeconds)); try (HintsWriter writer = HintsWriter.create(directory, descriptor); diff --git a/test/unit/org/apache/cassandra/hints/HintsCatalogTest.java b/test/unit/org/apache/cassandra/hints/HintsCatalogTest.java index 92cfc7153bd4..1f8c95d6c788 100644 --- a/test/unit/org/apache/cassandra/hints/HintsCatalogTest.java +++ b/test/unit/org/apache/cassandra/hints/HintsCatalogTest.java @@ -17,7 +17,6 @@ */ package org.apache.cassandra.hints; -import java.io.File; import java.io.IOException; import java.nio.ByteBuffer; import java.nio.file.Files; @@ -26,6 +25,7 @@ import com.google.common.collect.ImmutableMap; import org.apache.cassandra.SchemaLoader; import org.apache.cassandra.db.Mutation; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.schema.KeyspaceParams; import org.apache.cassandra.schema.Schema; import org.apache.cassandra.utils.FBUtilities; @@ -57,7 +57,7 @@ public static void defineSchema() @Test public void loadCompletenessAndOrderTest() throws IOException { - File directory = Files.createTempDirectory(null).toFile(); + File directory = new File(Files.createTempDirectory(null)); try { loadCompletenessAndOrderTest(directory); @@ -107,7 +107,7 @@ private void loadCompletenessAndOrderTest(File directory) throws IOException @Test public void deleteHintsTest() throws IOException { - File directory = Files.createTempDirectory(null).toFile(); + File directory = new File(Files.createTempDirectory(null)); UUID hostId1 = UUID.randomUUID(); UUID hostId2 = UUID.randomUUID(); long now = System.currentTimeMillis(); @@ -138,7 +138,7 @@ public void deleteHintsTest() throws IOException @Test public void exciseHintFiles() throws IOException { - File directory = Files.createTempDirectory(null).toFile(); + File directory = new File(Files.createTempDirectory(null)); try { exciseHintFiles(directory); diff --git a/test/unit/org/apache/cassandra/hints/HintsDescriptorTest.java b/test/unit/org/apache/cassandra/hints/HintsDescriptorTest.java index 2fad7335fd0f..ee79f891c293 100644 --- a/test/unit/org/apache/cassandra/hints/HintsDescriptorTest.java +++ b/test/unit/org/apache/cassandra/hints/HintsDescriptorTest.java @@ -18,7 +18,6 @@ package org.apache.cassandra.hints; import java.io.DataInput; -import java.io.File; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; @@ -27,6 +26,7 @@ import com.google.common.collect.ImmutableMap; import com.google.common.io.ByteStreams; +import org.apache.cassandra.io.util.File; import org.junit.Test; import org.apache.cassandra.io.compress.LZ4Compressor; @@ -104,18 +104,18 @@ public void testReadFromFile() throws IOException ImmutableMap parameters = ImmutableMap.of(); HintsDescriptor expected = new HintsDescriptor(hostId, version, timestamp, parameters); - Path directory = Files.createTempDirectory("hints"); + File directory = new File(Files.createTempDirectory("hints")); try { - try (HintsWriter ignored = HintsWriter.create(directory.toFile(), expected)) + try (HintsWriter ignored = HintsWriter.create(directory, expected)) { } - HintsDescriptor actual = HintsDescriptor.readFromFile(directory.resolve(expected.fileName())); + HintsDescriptor actual = HintsDescriptor.readFromFile(new File(directory, expected.fileName())); assertEquals(expected, actual); } finally { - directory.toFile().deleteOnExit(); + directory.deleteOnExit(); } } @@ -146,7 +146,7 @@ public void testHandleIOE() throws IOException HintsDescriptor.handleDescriptorIOE(new IOException("test"), p); File newFile = new File(p.getParent().toFile(), p.getFileName().toString().replace(".hints", ".corrupt.hints")); assertThat(p).doesNotExist(); - assertThat(newFile).exists(); + assertThat(newFile.exists()); newFile.deleteOnExit(); } diff --git a/test/unit/org/apache/cassandra/hints/HintsReaderTest.java b/test/unit/org/apache/cassandra/hints/HintsReaderTest.java index f05d4cec44b5..af1c89b62937 100644 --- a/test/unit/org/apache/cassandra/hints/HintsReaderTest.java +++ b/test/unit/org/apache/cassandra/hints/HintsReaderTest.java @@ -18,7 +18,6 @@ package org.apache.cassandra.hints; -import java.io.File; import java.io.IOException; import java.nio.ByteBuffer; import java.nio.file.Files; @@ -29,6 +28,7 @@ import java.util.function.Function; import com.google.common.collect.Iterables; +import org.apache.cassandra.io.util.File; import org.junit.BeforeClass; import org.junit.Test; @@ -196,7 +196,7 @@ private void corruptFileHelper(byte[] toAppend, String ks) throws IOException SchemaLoader.standardCFMD(ks, CF_STANDARD1), SchemaLoader.standardCFMD(ks, CF_STANDARD2)); int numTable = 2; - directory = Files.createTempDirectory(null).toFile(); + directory = new File(Files.createTempDirectory(null)); try { generateHints(3, ks); @@ -206,7 +206,7 @@ private void corruptFileHelper(byte[] toAppend, String ks) throws IOException } finally { - directory.delete(); + directory.deleteRecursive(); } } @@ -219,7 +219,7 @@ public void testNormalRead() throws IOException SchemaLoader.standardCFMD(ks, CF_STANDARD1), SchemaLoader.standardCFMD(ks, CF_STANDARD2)); int numTable = 2; - directory = Files.createTempDirectory(null).toFile(); + directory = new File(Files.createTempDirectory(null)); try { generateHints(3, ks); @@ -227,7 +227,7 @@ public void testNormalRead() throws IOException } finally { - directory.delete(); + directory.tryDelete(); } } @@ -240,7 +240,7 @@ public void testDroppedTableRead() throws IOException SchemaLoader.standardCFMD(ks, CF_STANDARD1), SchemaLoader.standardCFMD(ks, CF_STANDARD2)); - directory = Files.createTempDirectory(null).toFile(); + directory = new File(Files.createTempDirectory(null)); try { generateHints(3, ks); @@ -249,7 +249,7 @@ public void testDroppedTableRead() throws IOException } finally { - directory.delete(); + directory.tryDelete(); } } } diff --git a/test/unit/org/apache/cassandra/hints/HintsStoreTest.java b/test/unit/org/apache/cassandra/hints/HintsStoreTest.java index 0bf9ef43239f..c9a0d57be751 100644 --- a/test/unit/org/apache/cassandra/hints/HintsStoreTest.java +++ b/test/unit/org/apache/cassandra/hints/HintsStoreTest.java @@ -19,7 +19,6 @@ package org.apache.cassandra.hints; import java.io.Closeable; -import java.io.File; import java.io.IOException; import java.nio.ByteBuffer; import java.nio.file.Files; @@ -36,6 +35,7 @@ import org.junit.Test; import org.apache.cassandra.SchemaLoader; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.schema.TableMetadata; import org.apache.cassandra.schema.Schema; import org.apache.cassandra.db.Mutation; @@ -56,7 +56,7 @@ public class HintsStoreTest @Before public void testSetup() throws IOException { - directory = Files.createTempDirectory(null).toFile(); + directory = new File(Files.createTempDirectory(null)); directory.deleteOnExit(); hostId = UUID.randomUUID(); } diff --git a/test/unit/org/apache/cassandra/index/CustomIndexTest.java b/test/unit/org/apache/cassandra/index/CustomIndexTest.java index 84a36dfc8eb0..9cf3b105683e 100644 --- a/test/unit/org/apache/cassandra/index/CustomIndexTest.java +++ b/test/unit/org/apache/cassandra/index/CustomIndexTest.java @@ -648,7 +648,7 @@ public void testFailing2iFlush() throws Throwable } // SSTables remain uncommitted. - assertEquals(1, getCurrentColumnFamilyStore().getDirectories().getDirectoryForNewSSTables().listFiles().length); + assertEquals(1, getCurrentColumnFamilyStore().getDirectories().getDirectoryForNewSSTables().tryList().length); } @Test diff --git a/test/unit/org/apache/cassandra/index/sasi/SASIIndexTest.java b/test/unit/org/apache/cassandra/index/sasi/SASIIndexTest.java index 70948fe70c76..e9fb34a5b6bf 100644 --- a/test/unit/org/apache/cassandra/index/sasi/SASIIndexTest.java +++ b/test/unit/org/apache/cassandra/index/sasi/SASIIndexTest.java @@ -17,8 +17,6 @@ */ package org.apache.cassandra.index.sasi; -import java.io.File; -import java.io.FileReader; import java.io.FileWriter; import java.io.IOException; import java.io.Writer; @@ -43,6 +41,7 @@ import org.apache.cassandra.cql3.QueryProcessor; import org.apache.cassandra.cql3.UntypedResultSet; import org.apache.cassandra.index.Index; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.schema.ColumnMetadata; import org.apache.cassandra.schema.Schema; import org.apache.cassandra.schema.TableMetadata; diff --git a/test/unit/org/apache/cassandra/index/sasi/disk/OnDiskIndexTest.java b/test/unit/org/apache/cassandra/index/sasi/disk/OnDiskIndexTest.java index 1afb7b4acdf6..0abddd9193b8 100644 --- a/test/unit/org/apache/cassandra/index/sasi/disk/OnDiskIndexTest.java +++ b/test/unit/org/apache/cassandra/index/sasi/disk/OnDiskIndexTest.java @@ -17,7 +17,6 @@ */ package org.apache.cassandra.index.sasi.disk; -import java.io.File; import java.nio.ByteBuffer; import java.util.*; import java.util.concurrent.ThreadLocalRandom; @@ -38,6 +37,7 @@ import org.apache.cassandra.db.marshal.LongType; import org.apache.cassandra.db.marshal.UTF8Type; import org.apache.cassandra.io.util.DataOutputBuffer; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.io.util.FileUtils; import org.apache.cassandra.utils.MurmurHash; import org.apache.cassandra.utils.Pair; diff --git a/test/unit/org/apache/cassandra/index/sasi/disk/PerSSTableIndexWriterTest.java b/test/unit/org/apache/cassandra/index/sasi/disk/PerSSTableIndexWriterTest.java index 97b3433a8fba..da0dbde7506b 100644 --- a/test/unit/org/apache/cassandra/index/sasi/disk/PerSSTableIndexWriterTest.java +++ b/test/unit/org/apache/cassandra/index/sasi/disk/PerSSTableIndexWriterTest.java @@ -17,7 +17,6 @@ */ package org.apache.cassandra.index.sasi.disk; -import java.io.File; import java.nio.ByteBuffer; import java.util.*; import java.util.concurrent.Callable; @@ -40,6 +39,7 @@ import org.apache.cassandra.exceptions.ConfigurationException; import org.apache.cassandra.io.FSError; import org.apache.cassandra.io.sstable.Descriptor; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.io.util.FileUtils; import org.apache.cassandra.schema.ColumnMetadata; import org.apache.cassandra.schema.KeyspaceMetadata; diff --git a/test/unit/org/apache/cassandra/index/sasi/disk/TokenTreeTest.java b/test/unit/org/apache/cassandra/index/sasi/disk/TokenTreeTest.java index 4339a6243826..6d067a1d1459 100644 --- a/test/unit/org/apache/cassandra/index/sasi/disk/TokenTreeTest.java +++ b/test/unit/org/apache/cassandra/index/sasi/disk/TokenTreeTest.java @@ -17,7 +17,6 @@ */ package org.apache.cassandra.index.sasi.disk; -import java.io.File; import java.io.IOException; import java.nio.ByteBuffer; import java.util.*; @@ -36,6 +35,7 @@ import org.apache.cassandra.index.sasi.utils.RangeIterator; import org.apache.cassandra.db.marshal.LongType; import org.apache.cassandra.index.sasi.utils.RangeUnionIterator; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.io.util.FileUtils; import org.apache.cassandra.io.util.SequentialWriterOption; import org.apache.cassandra.utils.MurmurHash; diff --git a/test/unit/org/apache/cassandra/index/sasi/utils/LongIteratorTest.java b/test/unit/org/apache/cassandra/index/sasi/utils/LongIteratorTest.java index 6358355994c2..f61252ac1856 100644 --- a/test/unit/org/apache/cassandra/index/sasi/utils/LongIteratorTest.java +++ b/test/unit/org/apache/cassandra/index/sasi/utils/LongIteratorTest.java @@ -51,4 +51,4 @@ public void testBasicITerator() throws IOException { it.close(); } -} \ No newline at end of file +} diff --git a/test/unit/org/apache/cassandra/index/sasi/utils/MappedBufferTest.java b/test/unit/org/apache/cassandra/index/sasi/utils/MappedBufferTest.java index e55f6bac9112..dcd79b91a465 100644 --- a/test/unit/org/apache/cassandra/index/sasi/utils/MappedBufferTest.java +++ b/test/unit/org/apache/cassandra/index/sasi/utils/MappedBufferTest.java @@ -25,6 +25,7 @@ import org.apache.cassandra.db.marshal.LongType; import org.apache.cassandra.io.util.ChannelProxy; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.io.util.FileUtils; import org.junit.Assert; @@ -452,7 +453,7 @@ public void testOpenWithoutPageBits() throws IOException File tmp = FileUtils.createTempFile("mapped-buffer", "tmp"); tmp.deleteOnExit(); - RandomAccessFile file = new RandomAccessFile(tmp, "rw"); + RandomAccessFile file = new RandomAccessFile(tmp.toJavaIOFile(), "rw"); long numValues = 1000; for (long i = 0; i < numValues; i++) @@ -460,7 +461,7 @@ public void testOpenWithoutPageBits() throws IOException file.getFD().sync(); - try (MappedBuffer buffer = new MappedBuffer(new ChannelProxy(tmp.getAbsolutePath(), file.getChannel()))) + try (MappedBuffer buffer = new MappedBuffer(new ChannelProxy(tmp.absolutePath(), file.getChannel()))) { Assert.assertEquals(numValues * 8, buffer.limit()); Assert.assertEquals(numValues * 8, buffer.capacity()); @@ -493,7 +494,7 @@ private MappedBuffer createTestFile(long numCount, int typeSize, int numPageBits final File testFile = FileUtils.createTempFile("mapped-buffer-test", "db"); testFile.deleteOnExit(); - RandomAccessFile file = new RandomAccessFile(testFile, "rw"); + RandomAccessFile file = new RandomAccessFile(testFile.toJavaIOFile(), "rw"); for (long i = 0; i < numCount; i++) { @@ -529,7 +530,7 @@ private MappedBuffer createTestFile(long numCount, int typeSize, int numPageBits try { - return new MappedBuffer(new ChannelProxy(testFile.getAbsolutePath(), file.getChannel()), numPageBits); + return new MappedBuffer(new ChannelProxy(testFile.absolutePath(), file.getChannel()), numPageBits); } finally { diff --git a/test/unit/org/apache/cassandra/index/sasi/utils/RangeUnionIteratorTest.java b/test/unit/org/apache/cassandra/index/sasi/utils/RangeUnionIteratorTest.java index 162b1c6f8eff..581f4e5bd4a1 100644 --- a/test/unit/org/apache/cassandra/index/sasi/utils/RangeUnionIteratorTest.java +++ b/test/unit/org/apache/cassandra/index/sasi/utils/RangeUnionIteratorTest.java @@ -373,4 +373,4 @@ public void emptyRangeTest() { Assert.assertTrue(range.hasNext()); Assert.assertEquals(10, range.getCount()); } -} \ No newline at end of file +} diff --git a/test/unit/org/apache/cassandra/io/compress/CompressedRandomAccessReaderTest.java b/test/unit/org/apache/cassandra/io/compress/CompressedRandomAccessReaderTest.java index d3d81f006e4f..8a2beaea110c 100644 --- a/test/unit/org/apache/cassandra/io/compress/CompressedRandomAccessReaderTest.java +++ b/test/unit/org/apache/cassandra/io/compress/CompressedRandomAccessReaderTest.java @@ -19,12 +19,12 @@ package org.apache.cassandra.io.compress; import java.io.EOFException; -import java.io.File; import java.io.IOException; import java.io.RandomAccessFile; import java.util.Arrays; import java.util.Random; +import org.apache.cassandra.io.util.File; import org.assertj.core.api.Assertions; import org.junit.BeforeClass; import org.junit.Test; @@ -97,7 +97,7 @@ public void testResetAndTruncateCompressedUncompressedChunksMmap() throws IOExce public void test6791() throws IOException, ConfigurationException { File f = FileUtils.createTempFile("compressed6791_", "3"); - String filename = f.getAbsolutePath(); + String filename = f.absolutePath(); MetadataCollector sstableMetadataCollector = new MetadataCollector(new ClusteringComparator(BytesType.instance)); try(CompressedSequentialWriter writer = new CompressedSequentialWriter(f, filename + ".metadata", null, SequentialWriterOption.DEFAULT, @@ -132,10 +132,10 @@ public void test6791() throws IOException, ConfigurationException finally { if (f.exists()) - assertTrue(f.delete()); + assertTrue(f.tryDelete()); File metadata = new File(filename+ ".metadata"); if (metadata.exists()) - metadata.delete(); + metadata.tryDelete(); } } @@ -145,8 +145,8 @@ public void test6791() throws IOException, ConfigurationException @Test public void testChunkIndexOverflow() throws IOException { - File file = File.createTempFile("chunk_idx_overflow", "1"); - String filename = file.getAbsolutePath(); + File file = FileUtils.createTempFile("chunk_idx_overflow", "1"); + String filename = file.absolutePath(); int chunkLength = 4096; // 4k try @@ -166,16 +166,16 @@ public void testChunkIndexOverflow() throws IOException finally { if (file.exists()) - assertTrue(file.delete()); + assertTrue(file.tryDelete()); File metadata = new File(filename + ".metadata"); if (metadata.exists()) - metadata.delete(); + metadata.tryDelete(); } } private static void testResetAndTruncate(File f, boolean compressed, boolean usemmap, int junkSize, double minCompressRatio) throws IOException { - final String filename = f.getAbsolutePath(); + final String filename = f.absolutePath(); writeSSTable(f, compressed ? CompressionParams.snappy() : null, junkSize); CompressionMetadata compressionMetadata = compressed ? new CompressionMetadata(filename + ".metadata", f.length(), true) : null; @@ -192,16 +192,16 @@ private static void testResetAndTruncate(File f, boolean compressed, boolean use finally { if (f.exists()) - assertTrue(f.delete()); + assertTrue(f.tryDelete()); File metadata = new File(filename + ".metadata"); if (compressed && metadata.exists()) - metadata.delete(); + metadata.tryDelete(); } } private static void writeSSTable(File f, CompressionParams params, int junkSize) throws IOException { - final String filename = f.getAbsolutePath(); + final String filename = f.absolutePath(); MetadataCollector sstableMetadataCollector = new MetadataCollector(new ClusteringComparator(BytesType.instance)); try(SequentialWriter writer = params != null ? new CompressedSequentialWriter(f, filename + ".metadata", @@ -237,14 +237,14 @@ public void testDataCorruptionDetection() throws IOException File file = new File("testDataCorruptionDetection"); file.deleteOnExit(); - File metadata = new File(file.getPath() + ".meta"); + File metadata = new File(file.path() + ".meta"); metadata.deleteOnExit(); - assertTrue(file.createNewFile()); - assertTrue(metadata.createNewFile()); + assertTrue(file.createFileIfNotExists()); + assertTrue(metadata.createFileIfNotExists()); MetadataCollector sstableMetadataCollector = new MetadataCollector(new ClusteringComparator(BytesType.instance)); - try (SequentialWriter writer = new CompressedSequentialWriter(file, metadata.getPath(), + try (SequentialWriter writer = new CompressedSequentialWriter(file, metadata.path(), null, SequentialWriterOption.DEFAULT, CompressionParams.snappy(), sstableMetadataCollector)) { @@ -253,16 +253,16 @@ public void testDataCorruptionDetection() throws IOException } // open compression metadata and get chunk information - CompressionMetadata meta = new CompressionMetadata(metadata.getPath(), file.length(), true); + CompressionMetadata meta = new CompressionMetadata(metadata.path(), file.length(), true); CompressionMetadata.Chunk chunk = meta.chunkFor(0); - try (FileHandle.Builder builder = new FileHandle.Builder(file.getPath()).withCompressionMetadata(meta); + try (FileHandle.Builder builder = new FileHandle.Builder(file.path()).withCompressionMetadata(meta); FileHandle fh = builder.complete(); RandomAccessReader reader = fh.createReader()) {// read and verify compressed data assertEquals(CONTENT, reader.readLine()); Random random = new Random(); - try(RandomAccessFile checksumModifier = new RandomAccessFile(file, "rw")) + try(RandomAccessFile checksumModifier = new RandomAccessFile(file.toJavaIOFile(), "rw")) { byte[] checksum = new byte[4]; @@ -311,6 +311,6 @@ private static void updateChecksum(RandomAccessFile file, long checksumOffset, b { file.seek(checksumOffset); file.write(checksum); - SyncUtil.sync(file); + SyncUtil.sync(file.getFD()); } } diff --git a/test/unit/org/apache/cassandra/io/compress/CompressedSequentialWriterTest.java b/test/unit/org/apache/cassandra/io/compress/CompressedSequentialWriterTest.java index 57802cbf1525..1e671ddb2d2e 100644 --- a/test/unit/org/apache/cassandra/io/compress/CompressedSequentialWriterTest.java +++ b/test/unit/org/apache/cassandra/io/compress/CompressedSequentialWriterTest.java @@ -19,11 +19,11 @@ import java.io.ByteArrayInputStream; import java.io.DataInputStream; -import java.io.File; import java.io.IOException; import java.nio.ByteBuffer; import java.util.*; +import org.apache.cassandra.io.util.File; import static org.apache.cassandra.schema.CompressionParams.DEFAULT_CHUNK_LENGTH; import static org.apache.commons.io.FileUtils.readFileToByteArray; import static org.junit.Assert.assertEquals; @@ -113,7 +113,7 @@ public void testNoopWriter() throws IOException private void testWrite(File f, int bytesToTest, boolean useMemmap) throws IOException { - final String filename = f.getAbsolutePath(); + final String filename = f.absolutePath(); MetadataCollector sstableMetadataCollector = new MetadataCollector(new ClusteringComparator(Collections.singletonList(BytesType.instance))); byte[] dataPre = new byte[bytesToTest]; @@ -171,10 +171,10 @@ private void testWrite(File f, int bytesToTest, boolean useMemmap) throws IOExce finally { if (f.exists()) - f.delete(); + f.tryDelete(); File metadata = new File(f + ".metadata"); if (metadata.exists()) - metadata.delete(); + metadata.tryDelete(); } } @@ -213,12 +213,12 @@ private void testUncompressedChunks(int size, double ratio, int extra) throws IO b.flip(); File f = FileUtils.createTempFile("testUncompressedChunks", "1"); - String filename = f.getPath(); + String filename = f.path(); MetadataCollector sstableMetadataCollector = new MetadataCollector(new ClusteringComparator(Collections.singletonList(BytesType.instance))); compressionParameters = new CompressionParams(MockCompressor.class.getTypeName(), MockCompressor.paramsFor(ratio, extra), DEFAULT_CHUNK_LENGTH, ratio); - try (CompressedSequentialWriter writer = new CompressedSequentialWriter(f, f.getPath() + ".metadata", + try (CompressedSequentialWriter writer = new CompressedSequentialWriter(f, f.path() + ".metadata", null, SequentialWriterOption.DEFAULT, compressionParameters, sstableMetadataCollector)) @@ -244,10 +244,10 @@ private void testUncompressedChunks(int size, double ratio, int extra) throws IO finally { if (f.exists()) - f.delete(); + f.tryDelete(); File metadata = new File(f + ".metadata"); if (metadata.exists()) - metadata.delete(); + metadata.tryDelete(); } } @@ -272,12 +272,12 @@ public void cleanup() @Override public void resetAndTruncateTest() { - File tempFile = new File(Files.createTempDir(), "reset.txt"); + File tempFile = new File(Files.createTempDir().toPath(), "reset.txt"); File offsetsFile = FileUtils.createDeletableTempFile("compressedsequentialwriter.offset", "test"); final int bufferSize = 48; final int writeSize = 64; byte[] toWrite = new byte[writeSize]; - try (SequentialWriter writer = new CompressedSequentialWriter(tempFile, offsetsFile.getPath(), + try (SequentialWriter writer = new CompressedSequentialWriter(tempFile, offsetsFile.path(), null, SequentialWriterOption.DEFAULT, CompressionParams.lz4(bufferSize), new MetadataCollector(new ClusteringComparator(UTF8Type.instance)))) @@ -331,7 +331,7 @@ private TestableCSW() throws IOException private TestableCSW(File file, File offsetsFile) throws IOException { - this(file, offsetsFile, new CompressedSequentialWriter(file, offsetsFile.getPath(), + this(file, offsetsFile, new CompressedSequentialWriter(file, offsetsFile.path(), null, SequentialWriterOption.DEFAULT, CompressionParams.lz4(BUFFER_SIZE, MAX_COMPRESSED), new MetadataCollector(new ClusteringComparator(UTF8Type.instance)))); @@ -348,7 +348,7 @@ protected void assertInProgress() throws Exception { Assert.assertTrue(file.exists()); Assert.assertFalse(offsetsFile.exists()); - byte[] compressed = readFileToByteArray(file); + byte[] compressed = readFileToByteArray(file.toJavaIOFile()); byte[] uncompressed = new byte[partialContents.length]; LZ4Compressor.create(Collections.emptyMap()).uncompress(compressed, 0, compressed.length - 4, uncompressed, 0); Assert.assertTrue(Arrays.equals(partialContents, uncompressed)); @@ -358,7 +358,7 @@ protected void assertPrepared() throws Exception { Assert.assertTrue(file.exists()); Assert.assertTrue(offsetsFile.exists()); - DataInputStream offsets = new DataInputStream(new ByteArrayInputStream(readFileToByteArray(offsetsFile))); + DataInputStream offsets = new DataInputStream(new ByteArrayInputStream(readFileToByteArray(offsetsFile.toJavaIOFile()))); Assert.assertTrue(offsets.readUTF().endsWith("LZ4Compressor")); Assert.assertEquals(0, offsets.readInt()); Assert.assertEquals(BUFFER_SIZE, offsets.readInt()); @@ -367,7 +367,7 @@ protected void assertPrepared() throws Exception Assert.assertEquals(2, offsets.readInt()); Assert.assertEquals(0, offsets.readLong()); int offset = (int) offsets.readLong(); - byte[] compressed = readFileToByteArray(file); + byte[] compressed = readFileToByteArray(file.toJavaIOFile()); byte[] uncompressed = new byte[fullContents.length]; LZ4Compressor.create(Collections.emptyMap()).uncompress(compressed, 0, offset - 4, uncompressed, 0); LZ4Compressor.create(Collections.emptyMap()).uncompress(compressed, offset, compressed.length - (4 + offset), uncompressed, partialContents.length); @@ -381,8 +381,8 @@ protected void assertAborted() throws Exception void cleanup() { - file.delete(); - offsetsFile.delete(); + file.tryDelete(); + offsetsFile.tryDelete(); } } diff --git a/test/unit/org/apache/cassandra/io/compress/CompressorTest.java b/test/unit/org/apache/cassandra/io/compress/CompressorTest.java index 29e8453c5105..dad3ae44aece 100644 --- a/test/unit/org/apache/cassandra/io/compress/CompressorTest.java +++ b/test/unit/org/apache/cassandra/io/compress/CompressorTest.java @@ -27,6 +27,7 @@ import java.util.Random; import com.google.common.io.Files; +import org.apache.cassandra.io.util.File; import static org.junit.Assert.*; import org.junit.Assert; import org.junit.Test; @@ -143,7 +144,7 @@ public void testMappedFile() throws IOException dest.clear(); channel.write(dest); - MappedByteBuffer mappedData = Files.map(temp); + MappedByteBuffer mappedData = Files.map(temp.toJavaIOFile()); ByteBuffer result = makeBB(data.length + 100); mappedData.position(outOffset).limit(outOffset + compressedLength); diff --git a/test/unit/org/apache/cassandra/io/sstable/BigTableWriterTest.java b/test/unit/org/apache/cassandra/io/sstable/BigTableWriterTest.java index 9e3594bc0fa5..14d3c5ef6f61 100644 --- a/test/unit/org/apache/cassandra/io/sstable/BigTableWriterTest.java +++ b/test/unit/org/apache/cassandra/io/sstable/BigTableWriterTest.java @@ -18,7 +18,7 @@ */ package org.apache.cassandra.io.sstable; -import java.io.File; +import org.apache.cassandra.io.util.File; import java.io.IOException; import org.junit.BeforeClass; diff --git a/test/unit/org/apache/cassandra/io/sstable/CQLSSTableWriterClientTest.java b/test/unit/org/apache/cassandra/io/sstable/CQLSSTableWriterClientTest.java index 61ac017430e2..778e8a771084 100644 --- a/test/unit/org/apache/cassandra/io/sstable/CQLSSTableWriterClientTest.java +++ b/test/unit/org/apache/cassandra/io/sstable/CQLSSTableWriterClientTest.java @@ -17,11 +17,12 @@ */ package org.apache.cassandra.io.sstable; -import java.io.File; -import java.io.FilenameFilter; + import java.io.IOException; +import java.util.function.BiPredicate; import com.google.common.io.Files; +import org.apache.cassandra.io.util.File; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -39,7 +40,7 @@ public class CQLSSTableWriterClientTest @Before public void setUp() { - this.testDirectory = Files.createTempDir(); + this.testDirectory = new File(Files.createTempDir()); DatabaseDescriptor.daemonInitialization(); } @@ -73,16 +74,9 @@ public void testWriterInClientMode() throws IOException, InvalidRequestException writer.close(); writer2.close(); - FilenameFilter filter = new FilenameFilter() - { - @Override - public boolean accept(File dir, String name) - { - return name.endsWith("-Data.db"); - } - }; + BiPredicate filter = (dir, name) -> name.endsWith("-Data.db"); - File[] dataFiles = this.testDirectory.listFiles(filter); + File[] dataFiles = this.testDirectory.tryList(filter); assertEquals(2, dataFiles.length); } diff --git a/test/unit/org/apache/cassandra/io/sstable/CQLSSTableWriterTest.java b/test/unit/org/apache/cassandra/io/sstable/CQLSSTableWriterTest.java index 79c6e7785255..2d04bb953e52 100644 --- a/test/unit/org/apache/cassandra/io/sstable/CQLSSTableWriterTest.java +++ b/test/unit/org/apache/cassandra/io/sstable/CQLSSTableWriterTest.java @@ -17,17 +17,18 @@ */ package org.apache.cassandra.io.sstable; -import java.io.File; -import java.io.FilenameFilter; + import java.io.IOException; import java.nio.ByteBuffer; import java.util.*; import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.BiPredicate; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; +import org.apache.cassandra.io.util.File; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Rule; @@ -85,8 +86,8 @@ public void perTestSetup() throws IOException keyspace = "cql_keyspace" + idGen.incrementAndGet(); table = "table" + idGen.incrementAndGet(); qualifiedTable = keyspace + '.' + table; - dataDir = new File(tempFolder.newFolder().getAbsolutePath() + File.separator + keyspace + File.separator + table); - assert dataDir.mkdirs(); + dataDir = new File(tempFolder.newFolder().getAbsolutePath() + File.pathSeparator() + keyspace + File.pathSeparator() + table); + assert dataDir.tryCreateDirectories(); } @Test @@ -190,14 +191,8 @@ public void testSyncWithinPartition() throws Exception writer.addRow(1, val); writer.close(); - FilenameFilter filterDataFiles = new FilenameFilter() - { - public boolean accept(File dir, String name) - { - return name.endsWith("-Data.db"); - } - }; - assert dataDir.list(filterDataFiles).length > 1 : Arrays.toString(dataDir.list(filterDataFiles)); + BiPredicate filterDataFiles = (dir, name) -> name.endsWith("-Data.db"); + assert dataDir.tryListNames(filterDataFiles).length > 1 : Arrays.toString(dataDir.tryListNames(filterDataFiles)); } diff --git a/test/unit/org/apache/cassandra/io/sstable/DescriptorTest.java b/test/unit/org/apache/cassandra/io/sstable/DescriptorTest.java index 5f79f570b725..988774e1c475 100644 --- a/test/unit/org/apache/cassandra/io/sstable/DescriptorTest.java +++ b/test/unit/org/apache/cassandra/io/sstable/DescriptorTest.java @@ -17,10 +17,10 @@ */ package org.apache.cassandra.io.sstable; -import java.io.File; import java.io.IOException; import java.util.UUID; +import org.apache.cassandra.io.util.File; import org.apache.commons.lang3.StringUtils; import org.junit.Assert; import org.junit.BeforeClass; @@ -45,7 +45,7 @@ public class DescriptorTest public DescriptorTest() throws IOException { // create CF directories, one without CFID and one with it - tempDataDir = FileUtils.createTempFile("DescriptorTest", null).getParentFile(); + tempDataDir = FileUtils.createTempFile("DescriptorTest", null).parent(); } @BeforeClass @@ -57,28 +57,28 @@ public static void setup() @Test public void testFromFilename() throws Exception { - File cfIdDir = new File(tempDataDir.getAbsolutePath() + File.separator + ksname + File.separator + cfname + '-' + cfId); + File cfIdDir = new File(tempDataDir.absolutePath() + File.pathSeparator() + ksname + File.pathSeparator() + cfname + '-' + cfId); testFromFilenameFor(cfIdDir); } @Test public void testFromFilenameInBackup() throws Exception { - File backupDir = new File(StringUtils.join(new String[]{tempDataDir.getAbsolutePath(), ksname, cfname + '-' + cfId, Directories.BACKUPS_SUBDIR}, File.separator)); + File backupDir = new File(StringUtils.join(new String[]{ tempDataDir.absolutePath(), ksname, cfname + '-' + cfId, Directories.BACKUPS_SUBDIR}, File.pathSeparator())); testFromFilenameFor(backupDir); } @Test public void testFromFilenameInSnapshot() throws Exception { - File snapshotDir = new File(StringUtils.join(new String[]{tempDataDir.getAbsolutePath(), ksname, cfname + '-' + cfId, Directories.SNAPSHOT_SUBDIR, "snapshot_name"}, File.separator)); + File snapshotDir = new File(StringUtils.join(new String[]{ tempDataDir.absolutePath(), ksname, cfname + '-' + cfId, Directories.SNAPSHOT_SUBDIR, "snapshot_name"}, File.pathSeparator())); testFromFilenameFor(snapshotDir); } @Test public void testFromFilenameInLegacyDirectory() throws Exception { - File cfDir = new File(tempDataDir.getAbsolutePath() + File.separator + ksname + File.separator + cfname); + File cfDir = new File(tempDataDir.absolutePath() + File.pathSeparator() + ksname + File.pathSeparator() + cfname); testFromFilenameFor(cfDir); } @@ -88,7 +88,7 @@ private void testFromFilenameFor(File dir) // secondary index String idxName = "myidx"; - File idxDir = new File(dir.getAbsolutePath() + File.separator + Directories.SECONDARY_INDEX_NAME_SEPARATOR + idxName); + File idxDir = new File(dir.absolutePath() + File.pathSeparator() + Directories.SECONDARY_INDEX_NAME_SEPARATOR + idxName); checkFromFilename(new Descriptor(idxDir, ksname, cfname + Directories.SECONDARY_INDEX_NAME_SEPARATOR + idxName, 4, SSTableFormat.Type.BIG)); } @@ -113,7 +113,7 @@ public void testEquality() // Descriptor should be equal when parent directory points to the same directory File dir = new File("."); Descriptor desc1 = new Descriptor(dir, "ks", "cf", 1, SSTableFormat.Type.BIG); - Descriptor desc2 = new Descriptor(dir.getAbsoluteFile(), "ks", "cf", 1, SSTableFormat.Type.BIG); + Descriptor desc2 = new Descriptor(dir.toAbsolute(), "ks", "cf", 1, SSTableFormat.Type.BIG); assertEquals(desc1, desc2); assertEquals(desc1.hashCode(), desc2.hashCode()); } @@ -124,7 +124,7 @@ public void validateNames() String[] names = { "ma-1-big-Data.db", // 2ndary index - ".idx1" + File.separator + "ma-1-big-Data.db", + ".idx1" + File.pathSeparator() + "ma-1-big-Data.db", }; for (String name : names) diff --git a/test/unit/org/apache/cassandra/io/sstable/LegacySSTableTest.java b/test/unit/org/apache/cassandra/io/sstable/LegacySSTableTest.java index b96e1f7e3573..2fcc54302ef1 100644 --- a/test/unit/org/apache/cassandra/io/sstable/LegacySSTableTest.java +++ b/test/unit/org/apache/cassandra/io/sstable/LegacySSTableTest.java @@ -17,9 +17,7 @@ */ package org.apache.cassandra.io.sstable; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; + import java.io.IOException; import java.util.ArrayList; import java.util.Collections; @@ -29,6 +27,9 @@ import com.google.common.collect.Lists; import com.google.common.collect.Iterables; +import org.apache.cassandra.io.util.File; +import org.apache.cassandra.io.util.FileInputStreamPlus; +import org.apache.cassandra.io.util.FileOutputStreamPlus; import org.junit.After; import org.junit.Assert; import org.junit.BeforeClass; @@ -113,7 +114,7 @@ public static void defineSchema() throws ConfigurationException String scp = System.getProperty(LEGACY_SSTABLE_PROP); Assert.assertNotNull("System property " + LEGACY_SSTABLE_PROP + " not set", scp); - LEGACY_SSTABLE_ROOT = new File(scp).getAbsoluteFile(); + LEGACY_SSTABLE_ROOT = new File(scp).toAbsolute(); Assert.assertTrue("System property " + LEGACY_SSTABLE_ROOT + " does not specify a directory", LEGACY_SSTABLE_ROOT.isDirectory()); SchemaLoader.prepareServer(); @@ -637,7 +638,7 @@ public void testGenerateSstables() throws Throwable StorageService.instance.forceKeyspaceFlush("legacy_tables"); File ksDir = new File(LEGACY_SSTABLE_ROOT, String.format("%s/legacy_tables", BigFormat.latestVersion)); - ksDir.mkdirs(); + ksDir.tryCreateDirectories(); copySstablesFromTestData(String.format("legacy_%s_simple", BigFormat.latestVersion), ksDir); copySstablesFromTestData(String.format("legacy_%s_simple_counter", BigFormat.latestVersion), ksDir); copySstablesFromTestData(String.format("legacy_%s_clust", BigFormat.latestVersion), ksDir); @@ -647,11 +648,11 @@ public void testGenerateSstables() throws Throwable public static void copySstablesFromTestData(String table, File ksDir) throws IOException { File cfDir = new File(ksDir, table); - cfDir.mkdir(); + cfDir.tryCreateDirectory(); for (File srcDir : Keyspace.open("legacy_tables").getColumnFamilyStore(table).getDirectories().getCFDirectories()) { - for (File file : srcDir.listFiles()) + for (File file : srcDir.tryList()) { copyFile(cfDir, file); } @@ -662,7 +663,7 @@ private static void copySstablesToTestData(String legacyVersion, String table, F { File tableDir = getTableDir(legacyVersion, table); Assert.assertTrue("The table directory " + tableDir + " was not found", tableDir.isDirectory()); - for (File file : tableDir.listFiles()) + for (File file : tableDir.tryList()) { copyFile(cfDir, file); } @@ -678,10 +679,10 @@ private static void copyFile(File cfDir, File file) throws IOException byte[] buf = new byte[65536]; if (file.isFile()) { - File target = new File(cfDir, file.getName()); + File target = new File(cfDir, file.name()); int rd; - try (FileInputStream is = new FileInputStream(file); - FileOutputStream os = new FileOutputStream(target);) { + try (FileInputStreamPlus is = new FileInputStreamPlus(file); + FileOutputStreamPlus os = new FileOutputStreamPlus(target);) { while ((rd = is.read(buf)) >= 0) os.write(buf, 0, rd); } diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableCorruptionDetectionTest.java b/test/unit/org/apache/cassandra/io/sstable/SSTableCorruptionDetectionTest.java index c4e5207accd8..6e4ed52f6c66 100644 --- a/test/unit/org/apache/cassandra/io/sstable/SSTableCorruptionDetectionTest.java +++ b/test/unit/org/apache/cassandra/io/sstable/SSTableCorruptionDetectionTest.java @@ -18,13 +18,13 @@ package org.apache.cassandra.io.sstable; -import java.io.File; import java.io.IOException; import java.io.RandomAccessFile; import java.nio.ByteBuffer; import java.util.*; import java.util.function.*; +import org.apache.cassandra.io.util.File; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableHeaderFixTest.java b/test/unit/org/apache/cassandra/io/sstable/SSTableHeaderFixTest.java index d07187b22bfc..f578f770877f 100644 --- a/test/unit/org/apache/cassandra/io/sstable/SSTableHeaderFixTest.java +++ b/test/unit/org/apache/cassandra/io/sstable/SSTableHeaderFixTest.java @@ -17,7 +17,6 @@ */ package org.apache.cassandra.io.sstable; -import java.io.File; import java.nio.ByteBuffer; import java.util.Arrays; import java.util.Collections; @@ -32,6 +31,7 @@ import java.util.stream.IntStream; import com.google.common.collect.Sets; +import org.apache.cassandra.io.util.File; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -89,8 +89,8 @@ public class SSTableHeaderFixTest public void setup() { File f = FileUtils.createTempFile("SSTableUDTFixTest", ""); - f.delete(); - f.mkdirs(); + f.tryDelete(); + f.tryCreateDirectories(); temporaryFolder = f; } @@ -794,7 +794,7 @@ private File buildFakeSSTable(File dir, int generation, TableMetadata.Builder co // Just create the component files - we don't really need those. for (Component component : requiredComponents) - assertTrue(new File(desc.filenameFor(component)).createNewFile()); + assertTrue(new File(desc.filenameFor(component)).createFileIfNotExists()); AbstractType partitionKey = headerMetadata.partitionKeyType; List> clusteringKey = headerMetadata.clusteringColumns() diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableLoaderTest.java b/test/unit/org/apache/cassandra/io/sstable/SSTableLoaderTest.java index ac0cda1cde80..51489a89af55 100644 --- a/test/unit/org/apache/cassandra/io/sstable/SSTableLoaderTest.java +++ b/test/unit/org/apache/cassandra/io/sstable/SSTableLoaderTest.java @@ -17,7 +17,6 @@ */ package org.apache.cassandra.io.sstable; -import java.io.File; import java.util.Collections; import java.util.List; import java.util.Objects; @@ -25,6 +24,7 @@ import com.google.common.io.Files; +import org.apache.cassandra.io.util.File; import org.junit.After; import org.junit.Before; import org.junit.BeforeClass; @@ -87,7 +87,7 @@ public static void defineSchema() @Before public void setup() throws Exception { - tmpdir = Files.createTempDir(); + tmpdir = new File(Files.createTempDir()); } @After @@ -184,7 +184,7 @@ public void testLoadingIncompleteSSTable() throws Exception cfs.forceBlockingFlush(); // wait for sstables to be on disk else we won't be able to stream them //make sure we have some tables... - assertTrue(Objects.requireNonNull(dataDir.listFiles()).length > 0); + assertTrue(Objects.requireNonNull(dataDir.tryList()).length > 0); final CountDownLatch latch = new CountDownLatch(2); //writer is still open so loader should not load anything @@ -212,8 +212,8 @@ public void testLoadingIncompleteSSTable() throws Exception @Test public void testLoadingSSTableToDifferentKeyspace() throws Exception { - File dataDir = new File(tmpdir.getAbsolutePath() + File.separator + KEYSPACE1 + File.separator + CF_STANDARD1); - assert dataDir.mkdirs(); + File dataDir = new File(tmpdir.absolutePath() + File.pathSeparator() + KEYSPACE1 + File.pathSeparator() + CF_STANDARD1); + assert dataDir.tryCreateDirectories(); TableMetadata metadata = Schema.instance.getTableMetadata(KEYSPACE1, CF_STANDARD1); String schema = "CREATE TABLE %s.%s (key ascii, name ascii, val ascii, val1 ascii, PRIMARY KEY (key, name))"; @@ -290,10 +290,10 @@ public void testLoadingBackupsTable() throws Exception private File dataDir(String cf) { - File dataDir = new File(tmpdir.getAbsolutePath() + File.separator + SSTableLoaderTest.KEYSPACE1 + File.separator + cf); - assert dataDir.mkdirs(); + File dataDir = new File(tmpdir.absolutePath() + File.pathSeparator() + SSTableLoaderTest.KEYSPACE1 + File.pathSeparator() + cf); + assert dataDir.tryCreateDirectories(); //make sure we have no tables... - assertEquals(Objects.requireNonNull(dataDir.listFiles()).length, 0); + assertEquals(Objects.requireNonNull(dataDir.tryList()).length, 0); return dataDir; } diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableReaderTest.java b/test/unit/org/apache/cassandra/io/sstable/SSTableReaderTest.java index f1fc4cb4b9b8..f64ed4eed314 100644 --- a/test/unit/org/apache/cassandra/io/sstable/SSTableReaderTest.java +++ b/test/unit/org/apache/cassandra/io/sstable/SSTableReaderTest.java @@ -17,7 +17,6 @@ */ package org.apache.cassandra.io.sstable; -import java.io.File; import java.io.IOException; import java.nio.ByteBuffer; import java.nio.file.Files; @@ -26,6 +25,7 @@ import java.util.concurrent.*; import com.google.common.collect.Sets; +import org.apache.cassandra.io.util.File; import org.junit.BeforeClass; import org.junit.Rule; import org.junit.Test; @@ -496,7 +496,7 @@ public void testOpeningSSTable() throws Exception // check that only the summary is regenerated when it is deleted components.add(Component.FILTER); summaryModified = Files.getLastModifiedTime(summaryPath).toMillis(); - summaryFile.delete(); + summaryFile.tryDelete(); TimeUnit.MILLISECONDS.sleep(1000); // sleep to ensure modified time will be different bloomModified = Files.getLastModifiedTime(bloomPath).toMillis(); @@ -794,7 +794,7 @@ public void testMoveAndOpenSSTable() throws IOException SSTableReader sstable = getNewSSTable(cfs); cfs.clearUnsafe(); sstable.selfRef().release(); - File tmpdir = Files.createTempDirectory("testMoveAndOpen").toFile(); + File tmpdir = new File(Files.createTempDirectory("testMoveAndOpen")); tmpdir.deleteOnExit(); Descriptor notLiveDesc = new Descriptor(tmpdir, sstable.descriptor.ksname, sstable.descriptor.cfname, 100); // make sure the new directory is empty and that the old files exist: @@ -861,7 +861,7 @@ public void testVerifyCompressionInfoExistenceThrows() // delete the compression info, so it is corrupted. File compressionInfoFile = new File(desc.filenameFor(Component.COMPRESSION_INFO)); - compressionInfoFile.delete(); + compressionInfoFile.tryDelete(); assertFalse("CompressionInfo file should not exist", compressionInfoFile.exists()); // discovert the components on disk after deletion @@ -881,7 +881,7 @@ public void testVerifyCompressionInfoExistenceWhenTOCUnableToOpen() // mark the toc file not readable in order to trigger the FSReadError File tocFile = new File(desc.filenameFor(Component.TOC)); - tocFile.setReadable(false); + tocFile.trySetReadable(false); expectedException.expect(FSReadError.class); expectedException.expectMessage("TOC.txt"); diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableRewriterTest.java b/test/unit/org/apache/cassandra/io/sstable/SSTableRewriterTest.java index 1895653ccd0b..72b4587a5e2e 100644 --- a/test/unit/org/apache/cassandra/io/sstable/SSTableRewriterTest.java +++ b/test/unit/org/apache/cassandra/io/sstable/SSTableRewriterTest.java @@ -18,8 +18,8 @@ package org.apache.cassandra.io.sstable; -import java.io.File; import java.nio.ByteBuffer; +import java.io.IOException; import java.util.*; import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicBoolean; @@ -52,6 +52,7 @@ import org.apache.cassandra.db.lifecycle.SSTableSet; import org.apache.cassandra.io.sstable.format.SSTableReader; import org.apache.cassandra.io.sstable.format.SSTableWriter; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.io.util.FileUtils; import org.apache.cassandra.db.lifecycle.LifecycleTransaction; import org.apache.cassandra.metrics.StorageMetrics; @@ -99,7 +100,7 @@ public void basicTest() writer.finish(); } LifecycleTransaction.waitForDeletions(); - assertEquals(1, assertFileCounts(sstables.iterator().next().descriptor.directory.list())); + assertEquals(1, assertFileCounts(sstables.iterator().next().descriptor.directory.tryListNames())); validateCFS(cfs); truncate(cfs); @@ -131,7 +132,7 @@ public void basicTest2() writer.finish(); } LifecycleTransaction.waitForDeletions(); - assertEquals(1, assertFileCounts(sstables.iterator().next().descriptor.directory.list())); + assertEquals(1, assertFileCounts(sstables.iterator().next().descriptor.directory.tryListNames())); validateCFS(cfs); } @@ -186,7 +187,7 @@ public void getPositionsTest() writer.finish(); } LifecycleTransaction.waitForDeletions(); - assertEquals(1, assertFileCounts(sstables.iterator().next().descriptor.directory.list())); + assertEquals(1, assertFileCounts(sstables.iterator().next().descriptor.directory.tryListNames())); validateCFS(cfs); truncate(cfs); @@ -244,7 +245,7 @@ public void testNumberOfFilesAndSizes() // tmplink and tmp files should be gone: assertEquals(sum, cfs.metric.totalDiskSpaceUsed.getCount()); - assertFileCounts(s.descriptor.directory.list()); + assertFileCounts(s.descriptor.directory.tryListNames()); validateCFS(cfs); } @@ -287,7 +288,7 @@ public void testNumberOfFiles_dont_clean_readers() assertEquals(files, cfs.getLiveSSTables().size()); LifecycleTransaction.waitForDeletions(); - assertFileCounts(s.descriptor.directory.list()); + assertFileCounts(s.descriptor.directory.tryListNames()); validateCFS(cfs); } @@ -426,7 +427,7 @@ private void testNumberOfFiles_abort(RewriterTest test) assertEquals(startSize, cfs.metric.liveDiskSpaceUsed.getCount()); assertEquals(1, cfs.getLiveSSTables().size()); - assertFileCounts(s.descriptor.directory.list()); + assertFileCounts(s.descriptor.directory.tryListNames()); assertEquals(cfs.getLiveSSTables().iterator().next().first, origFirst); assertEquals(cfs.getLiveSSTables().iterator().next().last, origLast); validateCFS(cfs); @@ -473,7 +474,7 @@ public void testNumberOfFiles_finish_empty_new_writer() LifecycleTransaction.waitForDeletions(); assertEquals(files - 1, cfs.getLiveSSTables().size()); // we never wrote anything to the last file - assertFileCounts(s.descriptor.directory.list()); + assertFileCounts(s.descriptor.directory.tryListNames()); validateCFS(cfs); } @@ -513,7 +514,7 @@ public void testNumberOfFiles_truncate() } LifecycleTransaction.waitForDeletions(); - assertFileCounts(s.descriptor.directory.list()); + assertFileCounts(s.descriptor.directory.tryListNames()); validateCFS(cfs); } @@ -554,7 +555,7 @@ public void testSmallFiles() assertEquals(files, sstables.size()); assertEquals(files, cfs.getLiveSSTables().size()); LifecycleTransaction.waitForDeletions(); - assertFileCounts(s.descriptor.directory.list()); + assertFileCounts(s.descriptor.directory.tryListNames()); validateCFS(cfs); } @@ -572,10 +573,10 @@ public void testSSTableSplit() SSTableSplitter splitter = new SSTableSplitter(cfs, txn, 10); splitter.split(); - assertFileCounts(s.descriptor.directory.list()); + assertFileCounts(s.descriptor.directory.tryListNames()); LifecycleTransaction.waitForDeletions(); - for (File f : s.descriptor.directory.listFiles()) + for (File f : s.descriptor.directory.tryList()) { // we need to clear out the data dir, otherwise tests running after this breaks FileUtils.deleteRecursive(f); @@ -651,7 +652,7 @@ private void testAbortHelper(boolean earlyException, boolean offline) LifecycleTransaction.waitForDeletions(); - int filecount = assertFileCounts(s.descriptor.directory.list()); + int filecount = assertFileCounts(s.descriptor.directory.tryListNames()); assertEquals(filecount, 1); if (!offline) { @@ -664,16 +665,16 @@ private void testAbortHelper(boolean earlyException, boolean offline) assertEquals(0, cfs.getLiveSSTables().size()); cfs.truncateBlocking(); } - filecount = assertFileCounts(s.descriptor.directory.list()); + filecount = assertFileCounts(s.descriptor.directory.tryListNames()); if (offline) { // the file is not added to the CFS, therefore not truncated away above assertEquals(1, filecount); - for (File f : s.descriptor.directory.listFiles()) + for (File f : s.descriptor.directory.tryList()) { FileUtils.deleteRecursive(f); } - filecount = assertFileCounts(s.descriptor.directory.list()); + filecount = assertFileCounts(s.descriptor.directory.tryListNames()); } assertEquals(0, filecount); diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableUtils.java b/test/unit/org/apache/cassandra/io/sstable/SSTableUtils.java index 731cee2e53fe..d6e4a9eab9d9 100644 --- a/test/unit/org/apache/cassandra/io/sstable/SSTableUtils.java +++ b/test/unit/org/apache/cassandra/io/sstable/SSTableUtils.java @@ -19,7 +19,7 @@ package org.apache.cassandra.io.sstable; -import java.io.File; +import org.apache.cassandra.io.util.File; import java.io.IOException; import java.util.*; @@ -73,14 +73,14 @@ public static File tempSSTableFile(String keyspaceName, String cfname) throws IO public static File tempSSTableFile(String keyspaceName, String cfname, int generation) throws IOException { File tempdir = FileUtils.createTempFile(keyspaceName, cfname); - if(!tempdir.delete() || !tempdir.mkdir()) + if(!tempdir.tryDelete() || !tempdir.tryCreateDirectory()) throw new IOException("Temporary directory creation failed."); tempdir.deleteOnExit(); - File cfDir = new File(tempdir, keyspaceName + File.separator + cfname); - cfDir.mkdirs(); + File cfDir = new File(tempdir, keyspaceName + File.pathSeparator() + cfname); + cfDir.tryCreateDirectories(); cfDir.deleteOnExit(); File datafile = new File(new Descriptor(cfDir, keyspaceName, cfname, generation, SSTableFormat.Type.BIG).filenameFor(Component.DATA)); - if (!datafile.createNewFile()) + if (!datafile.createFileIfNotExists()) throw new IOException("unable to create file " + datafile); datafile.deleteOnExit(); return datafile; @@ -219,7 +219,7 @@ public Collection write(int expectedSize, Appender appender) thro TableMetadata metadata = Schema.instance.getTableMetadata(ksname, cfname); ColumnFamilyStore cfs = Schema.instance.getColumnFamilyStoreInstance(metadata.id); SerializationHeader header = appender.header(); - SSTableTxnWriter writer = SSTableTxnWriter.create(cfs, Descriptor.fromFilename(datafile.getAbsolutePath()), expectedSize, UNREPAIRED_SSTABLE, NO_PENDING_REPAIR, false, 0, header); + SSTableTxnWriter writer = SSTableTxnWriter.create(cfs, Descriptor.fromFilename(datafile.absolutePath()), expectedSize, UNREPAIRED_SSTABLE, NO_PENDING_REPAIR, false, 0, header); while (appender.append(writer)) { /* pass */ } Collection readers = writer.finish(true); diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableWriterTest.java b/test/unit/org/apache/cassandra/io/sstable/SSTableWriterTest.java index e407abc0e7b2..84f80a8ce083 100644 --- a/test/unit/org/apache/cassandra/io/sstable/SSTableWriterTest.java +++ b/test/unit/org/apache/cassandra/io/sstable/SSTableWriterTest.java @@ -18,10 +18,10 @@ package org.apache.cassandra.io.sstable; -import java.io.File; import java.nio.ByteBuffer; import java.util.UUID; +import org.apache.cassandra.io.util.File; import org.junit.Test; import org.apache.cassandra.*; @@ -64,7 +64,7 @@ public void testAbortTxnWithOpenEarlyShouldRemoveSSTable() throws InterruptedExc SSTableReader s = writer.setMaxDataAge(1000).openEarly(); assert s != null; - assertFileCounts(dir.list()); + assertFileCounts(dir.tryListNames()); for (int i = 10000; i < 20000; i++) { UpdateBuilder builder = UpdateBuilder.create(cfs.metadata(), random(i, 10)).withTimestamp(1); @@ -74,11 +74,11 @@ public void testAbortTxnWithOpenEarlyShouldRemoveSSTable() throws InterruptedExc } SSTableReader s2 = writer.setMaxDataAge(1000).openEarly(); assertTrue(s.last.compareTo(s2.last) < 0); - assertFileCounts(dir.list()); + assertFileCounts(dir.tryListNames()); s.selfRef().release(); s2.selfRef().release(); - int datafiles = assertFileCounts(dir.list()); + int datafiles = assertFileCounts(dir.tryListNames()); assertEquals(datafiles, 1); // These checks don't work on Windows because the writer has the channel still @@ -86,12 +86,12 @@ public void testAbortTxnWithOpenEarlyShouldRemoveSSTable() throws InterruptedExc if (!FBUtilities.isWindows) { LifecycleTransaction.waitForDeletions(); - assertFileCounts(dir.list()); + assertFileCounts(dir.tryListNames()); } writer.abort(); txn.abort(); LifecycleTransaction.waitForDeletions(); - datafiles = assertFileCounts(dir.list()); + datafiles = assertFileCounts(dir.tryListNames()); assertEquals(datafiles, 0); validateCFS(cfs); } @@ -117,7 +117,7 @@ public void testAbortTxnWithClosedWriterShouldRemoveSSTable() throws Interrupted writer.append(builder.build().unfilteredIterator()); } - assertFileCounts(dir.list()); + assertFileCounts(dir.tryListNames()); for (int i = 10000; i < 20000; i++) { UpdateBuilder builder = UpdateBuilder.create(cfs.metadata(), random(i, 10)).withTimestamp(1); @@ -126,7 +126,7 @@ public void testAbortTxnWithClosedWriterShouldRemoveSSTable() throws Interrupted writer.append(builder.build().unfilteredIterator()); } SSTableReader sstable = writer.finish(true); - int datafiles = assertFileCounts(dir.list()); + int datafiles = assertFileCounts(dir.tryListNames()); assertEquals(datafiles, 1); sstable.selfRef().release(); @@ -135,12 +135,12 @@ public void testAbortTxnWithClosedWriterShouldRemoveSSTable() throws Interrupted if (!FBUtilities.isWindows) { LifecycleTransaction.waitForDeletions(); - assertFileCounts(dir.list()); + assertFileCounts(dir.tryListNames()); } txn.abort(); LifecycleTransaction.waitForDeletions(); - datafiles = assertFileCounts(dir.list()); + datafiles = assertFileCounts(dir.tryListNames()); assertEquals(datafiles, 0); validateCFS(cfs); } @@ -168,7 +168,7 @@ public void testAbortTxnWithClosedAndOpenWriterShouldRemoveAllSSTables() throws writer1.append(builder.build().unfilteredIterator()); } - assertFileCounts(dir.list()); + assertFileCounts(dir.tryListNames()); for (int i = 10000; i < 20000; i++) { UpdateBuilder builder = UpdateBuilder.create(cfs.metadata(), random(i, 10)).withTimestamp(1); @@ -179,9 +179,9 @@ public void testAbortTxnWithClosedAndOpenWriterShouldRemoveAllSSTables() throws SSTableReader sstable = writer1.finish(true); txn.update(sstable, false); - assertFileCounts(dir.list()); + assertFileCounts(dir.tryListNames()); - int datafiles = assertFileCounts(dir.list()); + int datafiles = assertFileCounts(dir.tryListNames()); assertEquals(datafiles, 2); // These checks don't work on Windows because the writer has the channel still @@ -189,11 +189,11 @@ public void testAbortTxnWithClosedAndOpenWriterShouldRemoveAllSSTables() throws if (!FBUtilities.isWindows) { LifecycleTransaction.waitForDeletions(); - assertFileCounts(dir.list()); + assertFileCounts(dir.tryListNames()); } txn.abort(); LifecycleTransaction.waitForDeletions(); - datafiles = assertFileCounts(dir.list()); + datafiles = assertFileCounts(dir.tryListNames()); assertEquals(datafiles, 0); validateCFS(cfs); } diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableWriterTestBase.java b/test/unit/org/apache/cassandra/io/sstable/SSTableWriterTestBase.java index 9e903654d598..41d026f93fed 100644 --- a/test/unit/org/apache/cassandra/io/sstable/SSTableWriterTestBase.java +++ b/test/unit/org/apache/cassandra/io/sstable/SSTableWriterTestBase.java @@ -18,7 +18,6 @@ package org.apache.cassandra.io.sstable; -import java.io.File; import java.nio.ByteBuffer; import java.util.HashSet; import java.util.Set; @@ -27,6 +26,7 @@ import java.util.concurrent.TimeUnit; import com.google.common.util.concurrent.Uninterruptibles; +import org.apache.cassandra.io.util.File; import org.junit.After; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -145,11 +145,11 @@ public static void validateCFS(ColumnFamilyStore cfs) } for (File dir : cfs.getDirectories().getCFDirectories()) { - for (File f : dir.listFiles()) + for (File f : dir.tryList()) { - if (f.getName().contains("Data")) + if (f.name().contains("Data")) { - Descriptor d = Descriptor.fromFilename(f.getAbsolutePath()); + Descriptor d = Descriptor.fromFilename(f.absolutePath()); assertTrue(d.toString(), liveDescriptors.contains(d.generation)); } } diff --git a/test/unit/org/apache/cassandra/io/sstable/format/SSTableFlushObserverTest.java b/test/unit/org/apache/cassandra/io/sstable/format/SSTableFlushObserverTest.java index 5f1920639b14..838f3a69fa64 100644 --- a/test/unit/org/apache/cassandra/io/sstable/format/SSTableFlushObserverTest.java +++ b/test/unit/org/apache/cassandra/io/sstable/format/SSTableFlushObserverTest.java @@ -17,7 +17,6 @@ */ package org.apache.cassandra.io.sstable.format; -import java.io.File; import java.io.IOException; import java.nio.ByteBuffer; import java.util.Arrays; @@ -26,6 +25,7 @@ import java.util.Iterator; import org.apache.cassandra.db.commitlog.CommitLog; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.schema.TableMetadata; import org.apache.cassandra.schema.ColumnMetadata; import org.apache.cassandra.config.DatabaseDescriptor; @@ -85,11 +85,11 @@ public void testFlushObserver() FlushObserver observer = new FlushObserver(); String sstableDirectory = DatabaseDescriptor.getAllDataFileLocations()[0]; - File directory = new File(sstableDirectory + File.pathSeparator + KS_NAME + File.pathSeparator + CF_NAME); + File directory = new File(sstableDirectory + File.pathSeparator() + KS_NAME + File.pathSeparator() + CF_NAME); directory.deleteOnExit(); - if (!directory.exists() && !directory.mkdirs()) - throw new FSWriteError(new IOException("failed to create tmp directory"), directory.getAbsolutePath()); + if (!directory.exists() && !directory.tryCreateDirectories()) + throw new FSWriteError(new IOException("failed to create tmp directory"), directory.absolutePath()); SSTableFormat.Type sstableFormat = SSTableFormat.Type.current(); diff --git a/test/unit/org/apache/cassandra/io/sstable/format/big/BigTableZeroCopyWriterTest.java b/test/unit/org/apache/cassandra/io/sstable/format/big/BigTableZeroCopyWriterTest.java index 3cf96f2698f8..d3aed25b511d 100644 --- a/test/unit/org/apache/cassandra/io/sstable/format/big/BigTableZeroCopyWriterTest.java +++ b/test/unit/org/apache/cassandra/io/sstable/format/big/BigTableZeroCopyWriterTest.java @@ -19,7 +19,6 @@ package org.apache.cassandra.io.sstable.format.big; import java.io.ByteArrayInputStream; -import java.io.File; import java.nio.ByteBuffer; import java.nio.file.Files; import java.nio.file.Paths; @@ -28,6 +27,7 @@ import java.util.function.Function; import com.google.common.collect.ImmutableSet; +import org.apache.cassandra.io.util.File; import org.junit.BeforeClass; import org.junit.Test; diff --git a/test/unit/org/apache/cassandra/io/sstable/metadata/MetadataSerializerTest.java b/test/unit/org/apache/cassandra/io/sstable/metadata/MetadataSerializerTest.java index 79cf83162877..6e1bd408ddee 100644 --- a/test/unit/org/apache/cassandra/io/sstable/metadata/MetadataSerializerTest.java +++ b/test/unit/org/apache/cassandra/io/sstable/metadata/MetadataSerializerTest.java @@ -17,8 +17,7 @@ */ package org.apache.cassandra.io.sstable.metadata; -import java.io.File; -import java.io.FileOutputStream; +import org.apache.cassandra.io.util.*; import java.io.IOException; import java.util.Arrays; import java.util.Collections; @@ -29,6 +28,7 @@ import org.junit.Test; import org.apache.cassandra.SchemaLoader; +import org.apache.cassandra.schema.TableMetadata; import org.apache.cassandra.config.DatabaseDescriptor; import org.apache.cassandra.db.SerializationHeader; import org.apache.cassandra.db.commitlog.CommitLogPosition; @@ -39,11 +39,9 @@ import org.apache.cassandra.io.sstable.format.SSTableFormat; import org.apache.cassandra.io.sstable.format.Version; import org.apache.cassandra.io.sstable.format.big.BigFormat; -import org.apache.cassandra.io.util.BufferedDataOutputStreamPlus; import org.apache.cassandra.io.util.DataOutputStreamPlus; import org.apache.cassandra.io.util.FileUtils; import org.apache.cassandra.io.util.RandomAccessReader; -import org.apache.cassandra.schema.TableMetadata; import org.apache.cassandra.utils.Throwables; import static org.junit.Assert.assertEquals; @@ -66,7 +64,7 @@ public void testSerialization() throws IOException MetadataSerializer serializer = new MetadataSerializer(); File statsFile = serialize(originalMetadata, serializer, BigFormat.latestVersion); - Descriptor desc = new Descriptor(statsFile.getParentFile(), "", "", 0, SSTableFormat.Type.BIG); + Descriptor desc = new Descriptor(statsFile.parent(), "", "", 0, SSTableFormat.Type.BIG); try (RandomAccessReader in = RandomAccessReader.open(statsFile)) { Map deserialized = serializer.deserialize(desc, in, EnumSet.allOf(MetadataType.class)); @@ -93,7 +91,7 @@ public void testHistogramSterilization() throws IOException // Serialize w/ overflowed histograms: MetadataSerializer serializer = new MetadataSerializer(); File statsFile = serialize(originalMetadata, serializer, BigFormat.latestVersion); - Descriptor desc = new Descriptor(statsFile.getParentFile(), "", "", 0, SSTableFormat.Type.BIG); + Descriptor desc = new Descriptor(statsFile.parent(), "", "", 0, SSTableFormat.Type.BIG); try (RandomAccessReader in = RandomAccessReader.open(statsFile)) { @@ -110,7 +108,7 @@ public File serialize(Map metadata, MetadataSer { // Serialize to tmp file File statsFile = FileUtils.createTempFile(Component.STATS.name, null); - try (DataOutputStreamPlus out = new BufferedDataOutputStreamPlus(new FileOutputStream(statsFile))) + try (DataOutputStreamPlus out = new FileOutputStreamPlus(statsFile)) { serializer.serialize(metadata, out, version); } @@ -176,7 +174,7 @@ public void testOldReadsNew(String oldV, String newV) throws IOException File statsFileLa = serialize(originalMetadata, serializer, BigFormat.instance.getVersion(oldV)); // Reading both as earlier version should yield identical results. SSTableFormat.Type stype = SSTableFormat.Type.current(); - Descriptor desc = new Descriptor(stype.info.getVersion(oldV), statsFileLb.getParentFile(), "", "", 0, stype); + Descriptor desc = new Descriptor(stype.info.getVersion(oldV), statsFileLb.parent(), "", "", 0, stype); try (RandomAccessReader inLb = RandomAccessReader.open(statsFileLb); RandomAccessReader inLa = RandomAccessReader.open(statsFileLa)) { diff --git a/test/unit/org/apache/cassandra/io/util/BufferedRandomAccessFileTest.java b/test/unit/org/apache/cassandra/io/util/BufferedRandomAccessFileTest.java index 764190c0f112..bb54f25d5dc4 100644 --- a/test/unit/org/apache/cassandra/io/util/BufferedRandomAccessFileTest.java +++ b/test/unit/org/apache/cassandra/io/util/BufferedRandomAccessFileTest.java @@ -21,10 +21,7 @@ import org.apache.cassandra.config.DatabaseDescriptor; import org.apache.cassandra.utils.ByteBufferUtil; -import org.apache.cassandra.utils.SyncUtil; -import java.io.File; -import java.io.FileOutputStream; import java.io.IOException; import java.nio.ByteBuffer; import java.util.Arrays; @@ -193,7 +190,7 @@ public void testLength() throws IOException w.finish(); // will use cachedlength - try (FileHandle.Builder builder = new FileHandle.Builder(tmpFile.getPath()); + try (FileHandle.Builder builder = new FileHandle.Builder(tmpFile.path()); FileHandle fh = builder.complete(); RandomAccessReader r = fh.createReader()) { @@ -354,7 +351,7 @@ public void testIsEOF() throws IOException for (final int offset : Arrays.asList(0, 8)) { File file1 = writeTemporaryFile(new byte[16]); - try (FileHandle.Builder builder = new FileHandle.Builder(file1.getPath()).bufferSize(bufferSize); + try (FileHandle.Builder builder = new FileHandle.Builder(file1.path()).bufferSize(bufferSize); FileHandle fh = builder.complete(); RandomAccessReader file = fh.createReader()) { @@ -366,7 +363,7 @@ public void testIsEOF() throws IOException for (final int n : Arrays.asList(1, 2, 4, 8)) { File file1 = writeTemporaryFile(new byte[16]); - try (FileHandle.Builder builder = new FileHandle.Builder(file1.getPath()).bufferSize(bufferSize); + try (FileHandle.Builder builder = new FileHandle.Builder(file1.path()).bufferSize(bufferSize); FileHandle fh = builder.complete(); RandomAccessReader file = fh.createReader()) { @@ -427,11 +424,11 @@ public void testBytesPastMark() throws IOException tmpFile.deleteOnExit(); // Create the BRAF by filename instead of by file. - try (FileHandle.Builder builder = new FileHandle.Builder(tmpFile.getPath()); + try (FileHandle.Builder builder = new FileHandle.Builder(tmpFile.path()); FileHandle fh = builder.complete(); RandomAccessReader r = fh.createReader()) { - assert tmpFile.getPath().equals(r.getPath()); + assert tmpFile.path().equals(r.getPath()); // Create a mark and move the rw there. final DataPosition mark = r.mark(); @@ -607,9 +604,9 @@ private File writeTemporaryFile(byte[] data) throws IOException { File f = FileUtils.createTempFile("BRAFTestFile", null); f.deleteOnExit(); - FileOutputStream fout = new FileOutputStream(f); + FileOutputStreamPlus fout = new FileOutputStreamPlus(f); fout.write(data); - SyncUtil.sync(fout); + fout.sync(); fout.close(); return f; } diff --git a/test/unit/org/apache/cassandra/io/util/ChecksummedRandomAccessReaderTest.java b/test/unit/org/apache/cassandra/io/util/ChecksummedRandomAccessReaderTest.java index 4963712fc008..91584efe436b 100644 --- a/test/unit/org/apache/cassandra/io/util/ChecksummedRandomAccessReaderTest.java +++ b/test/unit/org/apache/cassandra/io/util/ChecksummedRandomAccessReaderTest.java @@ -18,7 +18,6 @@ package org.apache.cassandra.io.util; -import java.io.File; import java.io.IOException; import java.io.RandomAccessFile; import java.util.Arrays; @@ -119,7 +118,7 @@ public void corruptionDetection() throws IOException assert data.exists(); // simulate corruption of file - try (RandomAccessFile dataFile = new RandomAccessFile(data, "rw")) + try (RandomAccessFile dataFile = new RandomAccessFile(data.toJavaIOFile(), "rw")) { dataFile.seek(1024); dataFile.write((byte) 5); diff --git a/test/unit/org/apache/cassandra/io/util/ChecksummedSequentialWriterTest.java b/test/unit/org/apache/cassandra/io/util/ChecksummedSequentialWriterTest.java index 6837d1df27e5..5d92d4584fa8 100644 --- a/test/unit/org/apache/cassandra/io/util/ChecksummedSequentialWriterTest.java +++ b/test/unit/org/apache/cassandra/io/util/ChecksummedSequentialWriterTest.java @@ -18,7 +18,6 @@ */ package org.apache.cassandra.io.util; -import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.List; @@ -44,7 +43,7 @@ public static void setupDD() public void cleanup() { for (TestableSW sw : writers) - sw.file.delete(); + sw.file.tryDelete(); writers.clear(); } diff --git a/test/unit/org/apache/cassandra/io/util/DataOutputTest.java b/test/unit/org/apache/cassandra/io/util/DataOutputTest.java index b6291c0fc435..4c3c6bb6e662 100644 --- a/test/unit/org/apache/cassandra/io/util/DataOutputTest.java +++ b/test/unit/org/apache/cassandra/io/util/DataOutputTest.java @@ -24,9 +24,6 @@ import java.io.DataOutput; import java.io.DataOutputStream; import java.io.EOFException; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; import java.io.IOException; import java.io.RandomAccessFile; import java.nio.BufferOverflowException; @@ -327,16 +324,16 @@ public void testWrappedFileOutputStream() throws IOException File file = FileUtils.createTempFile("dataoutput", "test"); try { - DataOutputStreamPlus write = new WrappedDataOutputStreamPlus(new FileOutputStream(file)); + DataOutputStreamPlus write = new WrappedDataOutputStreamPlus(new FileOutputStreamPlus(file)); DataInput canon = testWrite(write); write.close(); - DataInputStream test = new DataInputStream(new FileInputStream(file)); + DataInputStream test = new DataInputStream(new FileInputStreamPlus(file)); testRead(test, canon); test.close(); } finally { - Assert.assertTrue(file.delete()); + Assert.assertTrue(file.tryDelete()); } } @@ -346,16 +343,16 @@ public void testFileOutputStream() throws IOException File file = FileUtils.createTempFile("dataoutput", "test"); try { - DataOutputStreamPlus write = new BufferedDataOutputStreamPlus(new FileOutputStream(file)); + DataOutputStreamPlus write = new FileOutputStreamPlus(file); DataInput canon = testWrite(write); write.close(); - DataInputStream test = new DataInputStream(new FileInputStream(file)); + DataInputStream test = new DataInputStream(new FileInputStreamPlus(file)); testRead(test, canon); test.close(); } finally { - Assert.assertTrue(file.delete()); + Assert.assertTrue(file.tryDelete()); } } @@ -366,17 +363,17 @@ public void testRandomAccessFile() throws IOException try { @SuppressWarnings("resource") - final RandomAccessFile raf = new RandomAccessFile(file, "rw"); + final RandomAccessFile raf = new RandomAccessFile(file.toJavaIOFile(), "rw"); DataOutputStreamPlus write = new BufferedDataOutputStreamPlus(raf.getChannel()); DataInput canon = testWrite(write); write.close(); - DataInputStream test = new DataInputStream(new FileInputStream(file)); + DataInputStream test = new DataInputStream(new FileInputStreamPlus(file)); testRead(test, canon); test.close(); } finally { - Assert.assertTrue(file.delete()); + Assert.assertTrue(file.tryDelete()); } } @@ -390,10 +387,10 @@ public void testSequentialWriter() throws IOException DataInput canon = testWrite(write); write.flush(); write.close(); - DataInputStream test = new DataInputStream(new FileInputStream(file)); + DataInputStream test = new DataInputStream(new FileInputStreamPlus(file)); testRead(test, canon); test.close(); - Assert.assertTrue(file.delete()); + Assert.assertTrue(file.tryDelete()); } private DataInput testWrite(DataOutputPlus test) throws IOException diff --git a/test/unit/org/apache/cassandra/io/util/FileTest.java b/test/unit/org/apache/cassandra/io/util/FileTest.java new file mode 100644 index 000000000000..d12565c4df09 --- /dev/null +++ b/test/unit/org/apache/cassandra/io/util/FileTest.java @@ -0,0 +1,326 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cassandra.io.util; + +import java.io.IOException; +import java.io.PrintWriter; +import java.io.StringWriter; +import java.nio.file.Files; +import java.util.List; +import java.util.concurrent.ThreadLocalRandom; +import java.util.function.BiFunction; +import java.util.function.Function; +import java.util.function.Predicate; + +import com.google.common.collect.ImmutableList; +import com.google.common.util.concurrent.RateLimiter; +import org.junit.Assert; +import org.junit.Test; + +import org.psjava.util.Triple; + +import static java.util.concurrent.TimeUnit.NANOSECONDS; +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.apache.cassandra.config.CassandraRelevantProperties.JAVA_IO_TMPDIR; + +public class FileTest +{ + private static final java.io.File dir; + static + { + java.io.File parent = new java.io.File(JAVA_IO_TMPDIR.getString()); + String dirName = Long.toHexString(ThreadLocalRandom.current().nextLong()); + while (new java.io.File(parent, dirName).exists()) + dirName = Long.toHexString(ThreadLocalRandom.current().nextLong()); + dir = new java.io.File(parent, dirName); + dir.mkdirs(); + new File(dir).deleteRecursiveOnExit(); + } + + @Test + public void testEquivalence() throws IOException + { + java.io.File notExists = new java.io.File(dir, "notExists"); + java.io.File regular = new java.io.File(dir, "regular"); + regular.createNewFile(); + java.io.File regularLink = new java.io.File(dir, "regularLink"); + Files.createSymbolicLink(regularLink.toPath(), regular.toPath()); + java.io.File emptySubdir = new java.io.File(dir, "empty"); + java.io.File emptySubdirLink = new java.io.File(dir, "emptyLink"); + emptySubdir.mkdir(); + Files.createSymbolicLink(emptySubdirLink.toPath(), emptySubdir.toPath()); + java.io.File nonEmptySubdir = new java.io.File(dir, "nonEmpty"); + java.io.File nonEmptySubdirLink = new java.io.File(dir, "nonEmptyLink"); + nonEmptySubdir.mkdir(); + Files.createSymbolicLink(nonEmptySubdirLink.toPath(), nonEmptySubdir.toPath()); + new java.io.File(nonEmptySubdir, "something").createNewFile(); + + testEquivalence(""); + + List setup = ImmutableList.of( + () -> {}, + () -> dir.setWritable(false), + () -> dir.setReadable(false), + () -> dir.setWritable(true) + ); + + for (Runnable run : setup) + { + run.run(); + testEquivalence(notExists.getPath()); + testEquivalence(nonAbsolute(notExists)); + testEquivalence(regular.getPath()); + testEquivalence(nonAbsolute(regular)); + testEquivalence(regularLink.getPath()); + testEquivalence(nonAbsolute(regularLink)); + testEquivalence(emptySubdir.getPath()); + testEquivalence(nonAbsolute(emptySubdir)); + testEquivalence(emptySubdirLink.getPath()); + testEquivalence(nonAbsolute(emptySubdirLink)); + testEquivalence(nonEmptySubdir.getPath()); + testEquivalence(nonAbsolute(nonEmptySubdir)); + testEquivalence(nonEmptySubdirLink.getPath()); + testEquivalence(nonAbsolute(nonEmptySubdirLink)); + } + + emptySubdirLink.delete(); + regularLink.delete(); + regular.delete(); + emptySubdir.delete(); + } + + private static String nonAbsolute(java.io.File file) + { + return file.getParent() + File.pathSeparator() + ".." + File.pathSeparator() + file.getParentFile().getName() + File.pathSeparator() + file.getName(); + } + + private void testEquivalence(String path) throws IOException + { + java.io.File file = new java.io.File(path); + if (file.exists()) testExists(path); + else testNotExists(path); + } + + private void testBasic(String path) throws IOException + { + // TODO: confirm - it seems that accuracy of lastModified may differ between APIs on Linux?? + testEquivalence(path, f -> f.lastModified() / 1000, f -> f.lastModified() / 1000); + testEquivalence(path, java.io.File::length, File::length); + testEquivalence(path, java.io.File::canExecute, File::isExecutable); + testEquivalence(path, java.io.File::canRead, File::isReadable); + testEquivalence(path, java.io.File::canWrite, File::isWritable); + testEquivalence(path, java.io.File::exists, File::exists); + testEquivalence(path, java.io.File::isAbsolute, File::isAbsolute); + testEquivalence(path, java.io.File::isDirectory, File::isDirectory); + testEquivalence(path, java.io.File::isFile, File::isFile); + testEquivalence(path, java.io.File::getPath, File::path); + testEquivalence(path, java.io.File::getAbsolutePath, File::absolutePath); + testEquivalence(path, java.io.File::getCanonicalPath, File::canonicalPath); + testEquivalence(path, java.io.File::getParent, File::parentPath); + testEquivalence(path, java.io.File::toPath, File::toPath); + testEquivalence(path, java.io.File::list, File::tryListNames); + testEquivalence(path, java.io.File::listFiles, File::tryList); + java.io.File file = new java.io.File(path); + if (file.getParentFile() != null) testBasic(file.getParent()); + if (!file.equals(file.getAbsoluteFile())) testBasic(file.getAbsolutePath()); + if (!file.equals(file.getCanonicalFile())) testBasic(file.getCanonicalPath()); + } + + private void testPermissionsEquivalence(String path) + { + ImmutableList, BiFunction, Function>> tests = ImmutableList.of( + Triple.create(java.io.File::setReadable, File::trySetReadable, java.io.File::canRead), + Triple.create(java.io.File::setWritable, File::trySetWritable, java.io.File::canWrite), + Triple.create(java.io.File::setExecutable, File::trySetExecutable, java.io.File::canExecute) + ); + for (Triple, BiFunction, Function> test : tests) + { + java.io.File file = new java.io.File(path); + boolean cur = test.v3.apply(file); + boolean canRead = file.canRead(); + boolean canWrite = file.canWrite(); + boolean canExecute = file.canExecute(); + testEquivalence(path, f -> test.v1.apply(f, !cur), f -> test.v2.apply(f, !cur), (f, success) -> { + testEquivalence(path, java.io.File::canExecute, File::isExecutable); + testEquivalence(path, java.io.File::canRead, File::isReadable); + testEquivalence(path, java.io.File::canWrite, File::isWritable); + Assert.assertEquals(success != cur, test.v3.apply(file)); + test.v1.apply(f, cur); + }); + Assert.assertEquals(canRead, file.canRead()); + Assert.assertEquals(canWrite, file.canWrite()); + Assert.assertEquals(canExecute, file.canExecute()); + } + } + + private void testCreation(String path, IOConsumer afterEach) + { + testEquivalence(path, java.io.File::createNewFile, File::createFileIfNotExists, afterEach); + testEquivalence(path, java.io.File::mkdir, File::tryCreateDirectory, afterEach); + testEquivalence(path, java.io.File::mkdirs, File::tryCreateDirectories, afterEach); + } + + private void testExists(String path) throws IOException + { + testBasic(path); + testPermissionsEquivalence(path); + testCreation(path, ignore -> {}); + testEquivalence(path, java.io.File::delete, File::tryDelete, (f, s) -> {if (s) f.createNewFile(); }); + testTryVsConfirm(path, java.io.File::delete, File::delete, (f, s) -> {if (s) f.createNewFile(); }); + } + + private void testNotExists(String path) throws IOException + { + testBasic(path); + testPermissionsEquivalence(path); + testCreation(path, java.io.File::delete); + testEquivalence(path, java.io.File::delete, File::tryDelete); + testTryVsConfirm(path, java.io.File::delete, File::delete); + } + + interface IOFn { O apply(I in) throws IOException; } + interface IOConsumer { void accept(I1 i1) throws IOException; } + interface IOBiConsumer { void accept(I1 i1, I2 i2) throws IOException; } + + private void testEquivalence(String path, IOFn canonical, IOFn test) + { + testEquivalence(path, canonical, test, ignore -> {}); + } + + private void testEquivalence(String path, IOFn canonical, IOFn test, IOConsumer afterEach) + { + testEquivalence(path, canonical, test, (f, ignore) -> afterEach.accept(f)); + } + + private void testEquivalence(String path, IOFn canonical, IOFn test, IOBiConsumer afterEach) + { + java.io.File file = new java.io.File(path); + Object expect; + try + { + expect = canonical.apply(file); + } + catch (Throwable e) + { + expect = new Failed(e); + } + try { afterEach.accept(file, !(expect instanceof Failed) && !Boolean.FALSE.equals(expect)); } catch (IOException e) { throw new AssertionError(e); } + Object actual; + try + { + actual = test.apply(new File(path)); + } + catch (Throwable e) + { + actual = new Failed(e); + } + try { afterEach.accept(file, !(actual instanceof Failed) && !Boolean.FALSE.equals(actual)); } catch (IOException e) { throw new AssertionError(e); } + if (expect instanceof String[] && actual instanceof String[]) Assert.assertArrayEquals((String[])expect, (String[])actual); + else if (expect instanceof java.io.File[] && actual instanceof File[]) assertArrayEquals((java.io.File[]) expect, (File[]) actual); + else Assert.assertEquals(path + "," + canonical.toString(), expect, actual); + } + + private void testTryVsConfirm(String path, Predicate canonical, IOConsumer test) + { + testTryVsConfirm(path, canonical, test, (f, s) -> {}); + } + private void testTryVsConfirm(String path, Predicate canonical, IOConsumer test, IOConsumer afterEach) + { + testTryVsConfirm(path, canonical, test, (f, ignore) -> afterEach.accept(f)); + } + private void testTryVsConfirm(String path, Predicate canonical, IOConsumer test, IOBiConsumer afterEach) + { + java.io.File file = new java.io.File(path); + boolean expect = canonical.test(file); + try { afterEach.accept(file, expect); } catch (IOException e) { throw new AssertionError(e); } + boolean actual; + try + { + test.accept(new File(path)); + actual = true; + } + catch (Throwable e) + { + actual = false; + } + try { afterEach.accept(file, actual); } catch (IOException e) { throw new AssertionError(e); } + Assert.assertEquals(path + "," + canonical.toString(), expect, actual); + } + + private static void assertArrayEquals(java.io.File[] expect, File[] actual) + { + Assert.assertEquals(expect.length, actual.length); + for (int i = 0 ; i < expect.length ; ++i) + Assert.assertEquals(expect[i].getPath(), actual[i].path()); + } + + private static class Failed + { + final Throwable with; + + private Failed(Throwable with) + { + this.with = with; + } + + @Override + public boolean equals(Object obj) + { + return obj instanceof Failed; + } + + @Override + public String toString() + { + StringWriter sw = new StringWriter(); + with.printStackTrace(new PrintWriter(sw)); + return sw.toString(); + } + } + + @Test + public void testDeletes() throws IOException + { + File subdir = new File(dir, "deletes"); + File file = new File(dir, "f"); + subdir.tryCreateDirectory(); + Assert.assertTrue(new File(subdir, "subsubdir").tryCreateDirectory()); + subdir.deleteRecursive(); + Assert.assertFalse(subdir.exists()); + + subdir.tryCreateDirectory(); + file.createFileIfNotExists(); + Assert.assertTrue(new File(subdir, "subsubdir").tryCreateDirectory()); + long start = System.nanoTime(); + RateLimiter rateLimiter = RateLimiter.create(2); + subdir.deleteRecursive(rateLimiter); + file.delete(rateLimiter); + long end = System.nanoTime(); + Assert.assertTrue("" + NANOSECONDS.toMillis(end - start), SECONDS.toNanos(1) <= end - start); + Assert.assertFalse(subdir.exists()); + Assert.assertFalse(file.exists()); + } + + @Test + public void testAncestry() + { + Assert.assertTrue(new File("somewhere/../").isAncestorOf(new File("somewhere"))); + Assert.assertTrue(new File("../").isAncestorOf(new File(""))); + } +} diff --git a/test/unit/org/apache/cassandra/io/util/FileUtilsTest.java b/test/unit/org/apache/cassandra/io/util/FileUtilsTest.java index 7d19f516ff24..737434ccabcc 100644 --- a/test/unit/org/apache/cassandra/io/util/FileUtilsTest.java +++ b/test/unit/org/apache/cassandra/io/util/FileUtilsTest.java @@ -18,7 +18,6 @@ */ package org.apache.cassandra.io.util; -import java.io.File; import java.io.IOException; import java.io.RandomAccessFile; import java.nio.charset.StandardCharsets; @@ -88,11 +87,11 @@ public void testTruncate() throws IOException byte[] b = Files.readAllBytes(file.toPath()); assertEquals(expected, new String(b, StandardCharsets.UTF_8)); - FileUtils.truncate(file.getAbsolutePath(), 10); + FileUtils.truncate(file.absolutePath(), 10); b = Files.readAllBytes(file.toPath()); assertEquals("The quick ", new String(b, StandardCharsets.UTF_8)); - FileUtils.truncate(file.getAbsolutePath(), 0); + FileUtils.truncate(file.absolutePath(), 0); b = Files.readAllBytes(file.toPath()); assertEquals(0, b.length); } @@ -103,7 +102,7 @@ public void testFolderSize() throws Exception File folder = createFolder(Paths.get(DatabaseDescriptor.getAllDataFileLocations()[0], "testFolderSize")); folder.deleteOnExit(); - File childFolder = createFolder(Paths.get(folder.getPath(), "child")); + File childFolder = createFolder(Paths.get(folder.path(), "child")); File[] files = { createFile(new File(folder, "001"), 10000), @@ -222,14 +221,14 @@ public void testDeleteDirectoryIfEmpty() throws IOException private File createFolder(Path path) { - File folder = path.toFile(); + File folder = new File(path); FileUtils.createDirectory(folder); return folder; } private File createFile(File file, long size) { - try (RandomAccessFile f = new RandomAccessFile(file, "rw")) + try (RandomAccessFile f = new RandomAccessFile(file.toJavaIOFile(), "rw")) { f.setLength(size); } diff --git a/test/unit/org/apache/cassandra/io/util/MmappedRegionsTest.java b/test/unit/org/apache/cassandra/io/util/MmappedRegionsTest.java index 400bea92b248..5692342d49c1 100644 --- a/test/unit/org/apache/cassandra/io/util/MmappedRegionsTest.java +++ b/test/unit/org/apache/cassandra/io/util/MmappedRegionsTest.java @@ -18,7 +18,6 @@ package org.apache.cassandra.io.util; -import java.io.File; import java.io.IOException; import java.nio.ByteBuffer; import java.util.Random; @@ -302,11 +301,11 @@ public void testMapForCompressionMetadata() throws Exception File f = FileUtils.createTempFile("testMapForCompressionMetadata", "1"); f.deleteOnExit(); - File cf = FileUtils.createTempFile(f.getName() + ".metadata", "1"); + File cf = FileUtils.createTempFile(f.name() + ".metadata", "1"); cf.deleteOnExit(); MetadataCollector sstableMetadataCollector = new MetadataCollector(new ClusteringComparator(BytesType.instance)); - try(SequentialWriter writer = new CompressedSequentialWriter(f, cf.getAbsolutePath(), + try(SequentialWriter writer = new CompressedSequentialWriter(f, cf.absolutePath(), null, SequentialWriterOption.DEFAULT, CompressionParams.snappy(), sstableMetadataCollector)) { @@ -314,7 +313,7 @@ public void testMapForCompressionMetadata() throws Exception writer.finish(); } - CompressionMetadata metadata = new CompressionMetadata(cf.getAbsolutePath(), f.length(), true); + CompressionMetadata metadata = new CompressionMetadata(cf.absolutePath(), f.length(), true); try(ChannelProxy channel = new ChannelProxy(f); MmappedRegions regions = MmappedRegions.map(channel, metadata)) { diff --git a/test/unit/org/apache/cassandra/io/util/NIODataInputStreamTest.java b/test/unit/org/apache/cassandra/io/util/NIODataInputStreamTest.java index eba6d8d4fd49..7c42082a61c6 100644 --- a/test/unit/org/apache/cassandra/io/util/NIODataInputStreamTest.java +++ b/test/unit/org/apache/cassandra/io/util/NIODataInputStreamTest.java @@ -25,7 +25,6 @@ import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.EOFException; -import java.io.File; import java.io.IOException; import java.io.RandomAccessFile; import java.nio.ByteBuffer; @@ -224,7 +223,7 @@ public void testAvailable() throws Exception assertEquals(8190 - 10 - 4096, is.available()); File f = FileUtils.createTempFile("foo", "bar"); - RandomAccessFile fos = new RandomAccessFile(f, "rw"); + RandomAccessFile fos = new RandomAccessFile(f.toJavaIOFile(), "rw"); fos.write(new byte[10]); fos.seek(0); diff --git a/test/unit/org/apache/cassandra/io/util/RandomAccessReaderTest.java b/test/unit/org/apache/cassandra/io/util/RandomAccessReaderTest.java index 0d86f0baa8cc..967ddac002b6 100644 --- a/test/unit/org/apache/cassandra/io/util/RandomAccessReaderTest.java +++ b/test/unit/org/apache/cassandra/io/util/RandomAccessReaderTest.java @@ -20,7 +20,6 @@ */ package org.apache.cassandra.io.util; -import java.io.File; import java.io.IOException; import java.nio.ByteBuffer; import java.nio.ByteOrder; @@ -291,14 +290,14 @@ private static File writeFile(Parameters params) throws IOException private static void testReadFully(Parameters params) throws IOException { final File f = writeFile(params); - try (FileHandle.Builder builder = new FileHandle.Builder(f.getPath()) + try (FileHandle.Builder builder = new FileHandle.Builder(f.path()) .bufferType(params.bufferType).bufferSize(params.bufferSize)) { builder.mmapped(params.mmappedRegions); try (FileHandle fh = builder.complete(); RandomAccessReader reader = fh.createReader()) { - assertEquals(f.getAbsolutePath(), reader.getPath()); + assertEquals(f.absolutePath(), reader.getPath()); assertEquals(f.length(), reader.length()); assertEquals(f.length(), reader.bytesRemaining()); assertEquals(Math.min(Integer.MAX_VALUE, f.length()), reader.available()); @@ -332,11 +331,11 @@ public void testReadBytes() throws IOException assert f.exists(); - try (FileHandle.Builder builder = new FileHandle.Builder(f.getPath()); + try (FileHandle.Builder builder = new FileHandle.Builder(f.path()); FileHandle fh = builder.complete(); RandomAccessReader reader = fh.createReader()) { - assertEquals(f.getAbsolutePath(), reader.getPath()); + assertEquals(f.absolutePath(), reader.getPath()); assertEquals(expected.length(), reader.length()); ByteBuffer b = ByteBufferUtil.read(reader, expected.length()); @@ -363,7 +362,7 @@ public void testReset() throws IOException assert f.exists(); - try (FileHandle.Builder builder = new FileHandle.Builder(f.getPath()); + try (FileHandle.Builder builder = new FileHandle.Builder(f.path()); FileHandle fh = builder.complete(); RandomAccessReader reader = fh.createReader()) { @@ -443,7 +442,7 @@ private static void testSeek(int numThreads) throws IOException, InterruptedExce assert f.exists(); - try (FileHandle.Builder builder = new FileHandle.Builder(f.getPath())) + try (FileHandle.Builder builder = new FileHandle.Builder(f.path())) { final Runnable worker = () -> { @@ -520,7 +519,7 @@ public void testSkipBytesNonPositive() throws IOException { Parameters params = new Parameters(8192, 4096); final File f = writeFile(params); - try (FileHandle.Builder builder = new FileHandle.Builder(f.getPath()) + try (FileHandle.Builder builder = new FileHandle.Builder(f.path()) .bufferType(params.bufferType).bufferSize(params.bufferSize)) { builder.mmapped(params.mmappedRegions); @@ -538,7 +537,7 @@ public void testSkipBytesClosed() throws IOException { Parameters params = new Parameters(8192, 4096); final File f = writeFile(params); - try (FileHandle.Builder builder = new FileHandle.Builder(f.getPath()) + try (FileHandle.Builder builder = new FileHandle.Builder(f.path()) .bufferType(params.bufferType).bufferSize(params.bufferSize)) { try (FileHandle fh = builder.complete(); @@ -553,7 +552,7 @@ public void testSkipBytesClosed() throws IOException private static void testSkipBytes(Parameters params, int expectationMultiples) throws IOException { final File f = writeFile(params); - try (FileHandle.Builder builder = new FileHandle.Builder(f.getPath()) + try (FileHandle.Builder builder = new FileHandle.Builder(f.path()) .bufferType(params.bufferType).bufferSize(params.bufferSize)) { builder.mmapped(params.mmappedRegions); diff --git a/test/unit/org/apache/cassandra/io/util/RewindableDataInputStreamPlusTest.java b/test/unit/org/apache/cassandra/io/util/RewindableDataInputStreamPlusTest.java deleted file mode 100644 index 08c9ddf4e462..000000000000 --- a/test/unit/org/apache/cassandra/io/util/RewindableDataInputStreamPlusTest.java +++ /dev/null @@ -1,539 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.cassandra.io.util; - -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.DataOutputStream; -import java.io.File; -import java.io.IOException; - -import org.junit.Before; -import org.junit.Test; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -public class RewindableDataInputStreamPlusTest -{ - - private final int INITIAL_BUFFER_SIZE = 1; - - private File file; - - @Before - public void setup() throws Exception - { - this.file = new File(FileUtils.getTempDir(), "subdir/test.buffer"); - } - - @Test - public void testMarkAndResetSimple() throws Exception - { - byte[] testData; - - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - try (DataOutputStream out = new DataOutputStream(baos)) - { - // boolean - out.writeBoolean(true); - // byte - out.writeByte(0x1); - // char - out.writeChar('a'); - // short - out.writeShort(1); - // int - out.writeInt(1); - // long - out.writeLong(1L); - // float - out.writeFloat(1.0f); - // double - out.writeDouble(1.0d); - - // String - out.writeUTF("abc"); - testData = baos.toByteArray(); - } - - for (int memCapacity = 0; memCapacity <= 16; memCapacity++) - { - int diskCapacity = 16 - memCapacity; - try (RewindableDataInputStreamPlus reader = new RewindableDataInputStreamPlus(new ByteArrayInputStream(testData), - INITIAL_BUFFER_SIZE, memCapacity, file, - diskCapacity)) - { - try { - //should mark before resetting - reader.reset(null); - fail("Should have thrown IOException"); - } catch (IOException e) {} - - assertTrue(reader.readBoolean()); - - reader.mark(); - - try { - //cannot mark already marked stream - reader.mark(); - fail("Should have thrown IllegalStateException"); - } catch (IllegalStateException e) {} - - assertEquals(0x1, reader.readByte()); - assertEquals('a', reader.readChar()); - assertEquals(3, reader.bytesPastMark(null)); - reader.reset(null); - - try { - //cannot mark when reading from cache - reader.mark(); - fail("Should have thrown IllegalStateException"); - } catch (IllegalStateException e) {} - - //read again previous sequence - assertEquals(0x1, reader.readByte()); - assertEquals('a', reader.readChar()); - //finish reading again previous sequence - assertEquals(1, reader.readShort()); - - reader.mark(); - assertEquals(1, reader.readInt()); - assertEquals(1L, reader.readLong()); - assertEquals(1.0f, reader.readFloat(), 0); - assertEquals(16, reader.bytesPastMark(null)); - reader.reset(null); - - //read again previous sequence - assertEquals(1, reader.readInt()); - assertEquals(1L, reader.readLong()); - assertEquals(1.0f, reader.readFloat(), 0); - //finish reading again previous sequence - - //mark again - reader.mark(); - assertEquals(1.0d, reader.readDouble(), 0); - assertEquals(8, reader.bytesPastMark(null)); - reader.reset(null); - - //read again previous sequence - assertEquals(1.0d, reader.readDouble(), 0); - //finish reading again previous sequence - - //mark and reset - reader.mark(); - reader.reset(null); - - assertEquals("abc", reader.readUTF()); - - //check max file size - assertEquals(diskCapacity, file.length()); - } - assertFalse(file.exists()); - } - } - - @Test - public void testVeryLargeCapacity() throws Exception - { - byte[] testData; - - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - try (DataOutputStream out = new DataOutputStream(baos)) - { - out.writeUTF("abc"); - testData = baos.toByteArray(); - } - - try (RewindableDataInputStreamPlus reader = new RewindableDataInputStreamPlus(new ByteArrayInputStream(testData), - INITIAL_BUFFER_SIZE, Integer.MAX_VALUE, file, - Integer.MAX_VALUE)) - { - reader.mark(); - assertEquals("abc", reader.readUTF()); - reader.reset(); - assertEquals("abc", reader.readUTF()); - } - assertFalse(file.exists()); - - - baos = new ByteArrayOutputStream(); - try (DataOutputStream out = new DataOutputStream(baos)) - { - out.writeBoolean(true); - out.writeBoolean(true); - testData = baos.toByteArray(); - } - } - - @Test - public void testMarkAndResetBigBuffer() throws Exception - { - byte[] testData; - - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - try (DataOutputStream out = new DataOutputStream(baos)) - { - // boolean - out.writeBoolean(true); - // byte - out.writeByte(0x1); - // char - out.writeChar('a'); - // short - out.writeShort(1); - // int - out.writeInt(1); - // long - out.writeLong(1L); - // float - out.writeFloat(1.0f); - // double - out.writeDouble(1.0d); - - // String - out.writeUTF("abc"); - testData = baos.toByteArray(); - - // 1 (boolean) + 1 (byte) + 2 (char) + 2 (short) + 4 (int) + 8 (long) - // + 4 (float) + 8 (double) + 5 bytes (utf string) - } - - for (int memCapacity = 0; memCapacity <= 18; memCapacity++) - { - int diskCapacity = 18 - memCapacity; - try (RewindableDataInputStreamPlus reader = new RewindableDataInputStreamPlus(new ByteArrayInputStream(testData), - INITIAL_BUFFER_SIZE, memCapacity, file, - diskCapacity)) - { - //read a big amount before resetting - reader.mark(); - assertTrue(reader.readBoolean()); - assertEquals(0x1, reader.readByte()); - assertEquals('a', reader.readChar()); - assertEquals(1, reader.readShort()); - assertEquals(1, reader.readInt()); - assertEquals(1L, reader.readLong()); - reader.reset(); - - //read from buffer - assertTrue(reader.readBoolean()); - assertEquals(0x1, reader.readByte()); - assertEquals('a', reader.readChar()); - assertEquals(1, reader.readShort()); - assertEquals(1, reader.readInt()); - assertEquals(1L, reader.readLong()); - - assertEquals(17, reader.available()); - - //mark again - reader.mark(); - assertEquals(1.0f, reader.readFloat(), 0); - assertEquals(1.0d, reader.readDouble(), 0); - assertEquals("abc", reader.readUTF()); - reader.reset(); - - assertEquals(17, reader.available()); - - assertEquals(1.0f, reader.readFloat(), 0); - assertEquals(1.0d, reader.readDouble(), 0); - assertEquals("abc", reader.readUTF()); - } - assertFalse(file.exists()); - } - } - - - @Test - public void testCircularSpillFile() throws Exception - { - byte[] testData; - - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - try (DataOutputStream out = new DataOutputStream(baos)) - { - // boolean - out.writeBoolean(true); - // byte - out.writeByte(0x1); - // char - out.writeChar('a'); - // short - out.writeShort(1); - // int - out.writeInt(1); - - // String - out.writeUTF("ab"); - testData = baos.toByteArray(); - - // 1 (boolean) + 1 (byte) + 2 (char) + 2 (short) + 4 (int) + 4 bytes (utf string) - } - - //read at most 4 bytes multiple times (and then check file size) - int MEM_SIZE = 0; - int DISK_SIZE = 4; - try (RewindableDataInputStreamPlus reader = new RewindableDataInputStreamPlus(new ByteArrayInputStream(testData), - INITIAL_BUFFER_SIZE, MEM_SIZE, file, - DISK_SIZE)) - { - //read 2 bytes and reset - reader.mark(); - assertTrue(reader.readBoolean()); - assertEquals(0x1, reader.readByte()); - assertEquals(2, reader.bytesPastMark(null)); - reader.reset(); - - //read again previous sequence - assertTrue(reader.readBoolean()); - assertEquals(0x1, reader.readByte()); - //finish reading again previous sequence - - //read 4 bytes and reset - reader.mark(); - assertEquals('a', reader.readChar()); - assertEquals(1, reader.readShort()); - assertEquals(4, reader.bytesPastMark(null)); - reader.reset(); - - //read again previous sequence - assertEquals('a', reader.readChar()); - assertEquals(1, reader.readShort()); - //finish reading again previous sequence - - //read 4 bytes and reset - reader.mark(); - assertEquals(1, reader.readInt()); - assertEquals(4, reader.bytesPastMark(null)); - reader.reset(); - - //read again previous sequence - assertEquals(1, reader.readInt()); - - //check max file size - assertEquals(DISK_SIZE, file.length()); - } - assertFalse(file.exists()); - } - - @Test - public void testExhaustCapacity() throws Exception - { - byte[] testData; - - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - try (DataOutputStream out = new DataOutputStream(baos)) - { - // boolean - out.writeBoolean(true); - // byte - out.writeByte(0x1); - // char - out.writeChar('a'); - // short - out.writeShort(1); - testData = baos.toByteArray(); - } - - //test capacity exhausted when reading more than 4 bytes - testCapacityExhausted(testData, 0, 2); - testCapacityExhausted(testData, 2, 0); - testCapacityExhausted(testData, 1, 1); - } - - private void testCapacityExhausted(byte[] testData, int memSize, int diskSize) throws IOException - { - try (RewindableDataInputStreamPlus reader = new RewindableDataInputStreamPlus(new ByteArrayInputStream(testData), - INITIAL_BUFFER_SIZE, memSize, file, - diskSize)) - { - //read 2 bytes and reset - reader.mark(); - assertTrue(reader.readBoolean()); - assertEquals(0x1, reader.readByte()); - assertEquals(2, reader.bytesPastMark(null)); - reader.reset(); - - //read again previous sequence - assertTrue(reader.readBoolean()); - assertEquals(0x1, reader.readByte()); - //finish reading again previous sequence - - reader.mark(); - //read 3 bytes - START - assertEquals('a', reader.readChar()); - //read 1 more bytes - CAPACITY will exhaust when trying to reset :( - assertEquals(1, reader.readShort()); - - try - { - reader.reset(); - fail("Should have thrown IOException"); - } - catch (IOException e) {} - - //check max file size - assertEquals(diskSize, file.length()); - } - assertFalse(file.exists()); - } - - @Test - public void testMarkAndResetUnsignedRead() throws Exception - { - byte[] testData; - - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - try (DataOutputStream out = new DataOutputStream(baos)) - { - // byte - out.writeByte(0x1); - // short - out.writeShort(2); - testData = baos.toByteArray(); - } - - for (int memCapacity = 0; memCapacity <= 1; memCapacity++) - { - int diskCapacity = 1 - memCapacity; - try (RewindableDataInputStreamPlus reader = new RewindableDataInputStreamPlus(new ByteArrayInputStream(testData), - INITIAL_BUFFER_SIZE, memCapacity, file, - diskCapacity)) - { - reader.mark(); - assertEquals(1, reader.readUnsignedByte()); - reader.reset(); - assertEquals(1, reader.readUnsignedByte()); - - //will read first byte of short 2 - reader.mark(); - assertEquals(0, reader.readUnsignedByte()); - reader.reset(); - - assertEquals(2, reader.readUnsignedShort()); - - reader.mark(); - reader.reset(); - assertEquals(0, reader.available()); - } - } - assertFalse(file.exists()); - } - - @Test - public void testMarkAndResetSkipBytes() throws Exception - { - String testStr = "1234567890"; - byte[] testData = testStr.getBytes(); - - for (int memCapacity = 0; memCapacity <= 7; memCapacity++) - { - int diskCapacity = 7 - memCapacity; - try (RewindableDataInputStreamPlus reader = new RewindableDataInputStreamPlus(new ByteArrayInputStream(testData), - INITIAL_BUFFER_SIZE, memCapacity, file, - diskCapacity)) - { - reader.mark(); - // read first 5 bytes and rewind - byte[] out = new byte[5]; - reader.readFully(out, 0, 5); - assertEquals("12345", new String(out)); - - // then skip 2 bytes (67) - reader.skipBytes(2); - - assertEquals(7, reader.bytesPastMark(null)); - reader.reset(); - - //now read part of the previously skipped bytes - out = new byte[5]; - reader.readFully(out); - assertEquals("12345", new String(out)); - - //skip 3 bytes (2 from cache, 1 from stream) - reader.skip(3); - - // mark and read 2 more bytes - reader.mark(); - out = new byte[2]; - reader.readFully(out); - assertEquals("90", new String(out)); - assertEquals(0, reader.available()); - reader.reset(); - - //reset and read only the next byte "9" in the third position - reader.readFully(out, 1, 1); - assertEquals("99", new String(out)); - - //now we read the remainder via readline - assertEquals(1, reader.available()); - assertEquals("0", reader.readLine()); - - } - assertFalse(file.exists()); - } - } - - @Test - public void testMarkAndResetReadFully() throws Exception - { - String testStr = "1234567890"; - byte[] testData = testStr.getBytes(); - - for (int memCapacity = 0; memCapacity <= 5; memCapacity++) - { - int diskCapacity = 5 - memCapacity; - try (RewindableDataInputStreamPlus reader = new RewindableDataInputStreamPlus(new ByteArrayInputStream(testData), - INITIAL_BUFFER_SIZE, memCapacity, file, - diskCapacity)) - { - reader.mark(); - // read first 5 bytes and rewind - byte[] out = new byte[5]; - reader.readFully(out, 0, 5); - assertEquals("12345", new String(out)); - reader.reset(); - - // read half from cache, half from parent stream - out = new byte[7]; - reader.readFully(out); - assertEquals("1234567", new String(out)); - - // mark and read 3 more bytes - reader.mark(); - out = new byte[3]; - reader.readFully(out); - assertEquals("890", new String(out)); - assertEquals(0, reader.available()); - reader.reset(); - - //reset and read only the next byte "8" in the third position - reader.readFully(out, 2, 1); - assertEquals("898", new String(out)); - - //now we read the remainder via readline - assertEquals(2, reader.available()); - assertEquals("90", reader.readLine()); - } - assertFalse(file.exists()); - } - } -} diff --git a/test/unit/org/apache/cassandra/io/util/SequentialWriterTest.java b/test/unit/org/apache/cassandra/io/util/SequentialWriterTest.java index c1ffda257af5..80543b02298a 100644 --- a/test/unit/org/apache/cassandra/io/util/SequentialWriterTest.java +++ b/test/unit/org/apache/cassandra/io/util/SequentialWriterTest.java @@ -19,7 +19,6 @@ package org.apache.cassandra.io.util; import java.io.DataOutputStream; -import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; @@ -55,7 +54,7 @@ public static void setupDD() public void cleanup() { for (TestableSW sw : writers) - sw.file.delete(); + sw.file.tryDelete(); writers.clear(); } @@ -100,14 +99,14 @@ protected TestableSW(File file, SequentialWriter sw) throws IOException protected void assertInProgress() throws Exception { Assert.assertTrue(file.exists()); - byte[] bytes = readFileToByteArray(file); + byte[] bytes = readFileToByteArray(file.toJavaIOFile()); Assert.assertTrue(Arrays.equals(partialContents, bytes)); } protected void assertPrepared() throws Exception { Assert.assertTrue(file.exists()); - byte[] bytes = readFileToByteArray(file); + byte[] bytes = readFileToByteArray(file.toJavaIOFile()); Assert.assertTrue(Arrays.equals(fullContents, bytes)); } @@ -125,7 +124,7 @@ protected void assertCommitted() throws Exception protected static File tempFile(String prefix) { File file = FileUtils.createTempFile(prefix, "test"); - file.delete(); + file.tryDelete(); return file; } } @@ -133,7 +132,7 @@ protected static File tempFile(String prefix) @Test public void resetAndTruncateTest() { - File tempFile = new File(Files.createTempDir(), "reset.txt"); + File tempFile = new File(Files.createTempDir().toPath(), "reset.txt"); final int bufferSize = 48; final int writeSize = 64; byte[] toWrite = new byte[writeSize]; @@ -177,7 +176,7 @@ public void resetAndTruncateTest() @Test public void outputStream() { - File tempFile = new File(Files.createTempDir(), "test.txt"); + File tempFile = new File(Files.createTempDir().toPath(), "test.txt"); Assert.assertFalse("temp file shouldn't exist yet", tempFile.exists()); SequentialWriterOption option = SequentialWriterOption.newBuilder().finishOnClose(true).build(); diff --git a/test/unit/org/apache/cassandra/locator/PendingRangesTest.java b/test/unit/org/apache/cassandra/locator/PendingRangesTest.java index 7959366cea32..992c2dd8e09a 100644 --- a/test/unit/org/apache/cassandra/locator/PendingRangesTest.java +++ b/test/unit/org/apache/cassandra/locator/PendingRangesTest.java @@ -542,4 +542,4 @@ private static AbstractReplicationStrategy simpleStrategy(TokenMetadata tokenMet DatabaseDescriptor.getEndpointSnitch(), Collections.singletonMap("replication_factor", Integer.toString(replicationFactor))); } -} \ No newline at end of file +} diff --git a/test/unit/org/apache/cassandra/metrics/HintedHandOffMetricsTest.java b/test/unit/org/apache/cassandra/metrics/HintedHandOffMetricsTest.java index 15feca407bd4..744c5dfc8a44 100644 --- a/test/unit/org/apache/cassandra/metrics/HintedHandOffMetricsTest.java +++ b/test/unit/org/apache/cassandra/metrics/HintedHandOffMetricsTest.java @@ -52,7 +52,7 @@ public static void initDD() @Test public void testHintsMetrics() throws Exception { - DatabaseDescriptor.getHintsDirectory().mkdirs(); + DatabaseDescriptor.getHintsDirectory().tryCreateDirectories(); for (int i = 0; i < 99; i++) HintsService.instance.metrics.incrPastWindow(InetAddressAndPort.getLocalHost()); diff --git a/test/unit/org/apache/cassandra/metrics/LatencyMetricsTest.java b/test/unit/org/apache/cassandra/metrics/LatencyMetricsTest.java index d61c5501c9c0..6206ba64f1f2 100644 --- a/test/unit/org/apache/cassandra/metrics/LatencyMetricsTest.java +++ b/test/unit/org/apache/cassandra/metrics/LatencyMetricsTest.java @@ -105,4 +105,4 @@ public void testRelease() parent.release(); } -} \ No newline at end of file +} diff --git a/test/unit/org/apache/cassandra/net/AsyncStreamingOutputPlusTest.java b/test/unit/org/apache/cassandra/net/AsyncStreamingOutputPlusTest.java index 305dc55388ce..dd9a98f6ca7b 100644 --- a/test/unit/org/apache/cassandra/net/AsyncStreamingOutputPlusTest.java +++ b/test/unit/org/apache/cassandra/net/AsyncStreamingOutputPlusTest.java @@ -18,7 +18,6 @@ package org.apache.cassandra.net; -import java.io.File; import java.io.IOException; import java.io.RandomAccessFile; import java.nio.ByteBuffer; @@ -26,6 +25,7 @@ import java.nio.file.Files; import java.util.Random; +import org.apache.cassandra.io.util.File; import org.junit.Test; import io.netty.buffer.ByteBuf; @@ -138,7 +138,7 @@ private void testWriteFileToChannel(boolean zeroCopy) throws IOException EmbeddedChannel channel = new TestChannel(4); StreamManager.StreamRateLimiter limiter = new StreamManager.StreamRateLimiter(FBUtilities.getBroadcastAddressAndPort()); - try (RandomAccessFile raf = new RandomAccessFile(file.getPath(), "r"); + try (RandomAccessFile raf = new RandomAccessFile(file.path(), "r"); FileChannel fileChannel = raf.getChannel(); AsyncStreamingOutputPlus out = new AsyncStreamingOutputPlus(channel)) { @@ -159,7 +159,7 @@ private void testWriteFileToChannel(boolean zeroCopy) throws IOException private File populateTempData(String name) throws IOException { - File file = Files.createTempFile(name, ".txt").toFile(); + File file = new File(Files.createTempFile(name, ".txt")); file.deleteOnExit(); Random r = new Random(); diff --git a/test/unit/org/apache/cassandra/net/ChunkedInputPlusTest.java b/test/unit/org/apache/cassandra/net/ChunkedInputPlusTest.java index f90fcd17ca6d..3209759a49b9 100644 --- a/test/unit/org/apache/cassandra/net/ChunkedInputPlusTest.java +++ b/test/unit/org/apache/cassandra/net/ChunkedInputPlusTest.java @@ -156,4 +156,4 @@ private ShareableBytes chunk(int size, int fill) Arrays.fill(buffer.array(), (byte) fill); return ShareableBytes.wrap(buffer); } -} \ No newline at end of file +} diff --git a/test/unit/org/apache/cassandra/net/ManyToOneConcurrentLinkedQueueTest.java b/test/unit/org/apache/cassandra/net/ManyToOneConcurrentLinkedQueueTest.java index 2c92a392e09f..4f60d01f240d 100644 --- a/test/unit/org/apache/cassandra/net/ManyToOneConcurrentLinkedQueueTest.java +++ b/test/unit/org/apache/cassandra/net/ManyToOneConcurrentLinkedQueueTest.java @@ -298,4 +298,4 @@ public void run() assertEquals(numItems, itemsPolled.cardinality()); assertTrue(queue.relaxedIsEmpty()); } -} \ No newline at end of file +} diff --git a/test/unit/org/apache/cassandra/net/MockMessagingService.java b/test/unit/org/apache/cassandra/net/MockMessagingService.java index 3749bafba70e..54f9071ed31b 100644 --- a/test/unit/org/apache/cassandra/net/MockMessagingService.java +++ b/test/unit/org/apache/cassandra/net/MockMessagingService.java @@ -151,4 +151,4 @@ public static Matcher any(Matcher... matchers) return false; }; } -} \ No newline at end of file +} diff --git a/test/unit/org/apache/cassandra/net/MockMessagingSpy.java b/test/unit/org/apache/cassandra/net/MockMessagingSpy.java index 7c4b850e524b..f10d1ee74da2 100644 --- a/test/unit/org/apache/cassandra/net/MockMessagingSpy.java +++ b/test/unit/org/apache/cassandra/net/MockMessagingSpy.java @@ -243,4 +243,4 @@ public void run() } } } -} \ No newline at end of file +} diff --git a/test/unit/org/apache/cassandra/net/PrunableArrayQueueTest.java b/test/unit/org/apache/cassandra/net/PrunableArrayQueueTest.java index 34f61a611498..50e09281236c 100644 --- a/test/unit/org/apache/cassandra/net/PrunableArrayQueueTest.java +++ b/test/unit/org/apache/cassandra/net/PrunableArrayQueueTest.java @@ -200,4 +200,4 @@ public void onKept(Integer value) assertEquals("Queue size should be zero after draining. Seed: " + seed + ". Iteration: " + i, 0, testQueue.size()); } } -} \ No newline at end of file +} diff --git a/test/unit/org/apache/cassandra/net/SocketUtils.java b/test/unit/org/apache/cassandra/net/SocketUtils.java index a0a149029b24..78a49bddbf6d 100644 --- a/test/unit/org/apache/cassandra/net/SocketUtils.java +++ b/test/unit/org/apache/cassandra/net/SocketUtils.java @@ -54,4 +54,4 @@ public static synchronized int findAvailablePort() throws RuntimeException } } } -} \ No newline at end of file +} diff --git a/test/unit/org/apache/cassandra/schema/MigrationManagerTest.java b/test/unit/org/apache/cassandra/schema/MigrationManagerTest.java index ff58151f998f..1bb7d4709766 100644 --- a/test/unit/org/apache/cassandra/schema/MigrationManagerTest.java +++ b/test/unit/org/apache/cassandra/schema/MigrationManagerTest.java @@ -18,7 +18,6 @@ package org.apache.cassandra.schema; -import java.io.File; import java.nio.ByteBuffer; import java.util.HashMap; import java.util.Map; @@ -26,6 +25,7 @@ import java.util.function.Supplier; import com.google.common.collect.ImmutableMap; +import org.apache.cassandra.io.util.File; import org.junit.BeforeClass; import org.junit.Rule; import org.junit.Test; @@ -230,7 +230,7 @@ public void dropCf() throws ConfigurationException Supplier lambda = () -> { for (File file : store.getDirectories().sstableLister(Directories.OnTxnErr.THROW).listFiles()) { - if (file.getPath().endsWith("Data.db") && !new File(file.getPath().replace("Data.db", "Compacted")).exists()) + if (file.path().endsWith("Data.db") && !new File(file.path().replace("Data.db", "Compacted")).exists()) return false; } return true; diff --git a/test/unit/org/apache/cassandra/schema/MockSchema.java b/test/unit/org/apache/cassandra/schema/MockSchema.java index 901acbfcf2f2..432e4071bcbf 100644 --- a/test/unit/org/apache/cassandra/schema/MockSchema.java +++ b/test/unit/org/apache/cassandra/schema/MockSchema.java @@ -18,7 +18,7 @@ */ package org.apache.cassandra.schema; -import java.io.File; +import org.apache.cassandra.io.util.File; import java.io.IOException; import java.io.RandomAccessFile; import java.util.*; @@ -117,13 +117,7 @@ public static SSTableReader sstable(int generation, int size, boolean keepRef, l for (Component component : components) { File file = new File(descriptor.filenameFor(component)); - try - { - file.createNewFile(); - } - catch (IOException e) - { - } + file.createFileIfNotExists(); } // .complete() with size to make sstable.onDiskLength work try (FileHandle.Builder builder = new FileHandle.Builder(new ChannelProxy(tempFile)).bufferSize(size); @@ -134,7 +128,7 @@ public static SSTableReader sstable(int generation, int size, boolean keepRef, l try { File file = new File(descriptor.filenameFor(Component.DATA)); - try (RandomAccessFile raf = new RandomAccessFile(file, "rw")) + try (RandomAccessFile raf = new RandomAccessFile(file.toJavaIOFile(), "rw")) { raf.setLength(size); } @@ -231,7 +225,7 @@ public static void cleanup() File dir = new File(dirName); if (!dir.exists()) continue; - String[] children = dir.list(); + String[] children = dir.tryListNames(); for (String child : children) FileUtils.deleteRecursive(new File(dir, child)); } diff --git a/test/unit/org/apache/cassandra/security/EncryptionUtilsTest.java b/test/unit/org/apache/cassandra/security/EncryptionUtilsTest.java index 0fd46b81787d..cb8b1381f759 100644 --- a/test/unit/org/apache/cassandra/security/EncryptionUtilsTest.java +++ b/test/unit/org/apache/cassandra/security/EncryptionUtilsTest.java @@ -17,7 +17,6 @@ */ package org.apache.cassandra.security; -import java.io.File; import java.io.IOException; import java.io.RandomAccessFile; import java.nio.ByteBuffer; @@ -29,6 +28,7 @@ import javax.crypto.IllegalBlockSizeException; import javax.crypto.ShortBufferException; +import org.apache.cassandra.io.util.File; import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; @@ -82,7 +82,7 @@ public void encrypt() throws BadPaddingException, ShortBufferException, IllegalB File f = FileUtils.createTempFile("commitlog-enc-utils-", ".tmp"); f.deleteOnExit(); - FileChannel channel = new RandomAccessFile(f, "rw").getChannel(); + FileChannel channel = new RandomAccessFile(f.toJavaIOFile(), "rw").getChannel(); EncryptionUtils.encryptAndWrite(ByteBuffer.wrap(buf), channel, true, encryptor); channel.close(); @@ -111,7 +111,7 @@ public void fullRoundTrip() throws IOException, BadPaddingException, ShortBuffer Cipher encryptor = cipherFactory.getEncryptor(tdeOptions.cipher, tdeOptions.key_alias); File f = FileUtils.createTempFile("commitlog-enc-utils-", ".tmp"); f.deleteOnExit(); - FileChannel channel = new RandomAccessFile(f, "rw").getChannel(); + FileChannel channel = new RandomAccessFile(f.toJavaIOFile(), "rw").getChannel(); EncryptionUtils.encryptAndWrite(compressedBuffer, channel, true, encryptor); // decrypt diff --git a/test/unit/org/apache/cassandra/security/SSLFactoryTest.java b/test/unit/org/apache/cassandra/security/SSLFactoryTest.java index e14dfa8a812f..0cf97cc9abd4 100644 --- a/test/unit/org/apache/cassandra/security/SSLFactoryTest.java +++ b/test/unit/org/apache/cassandra/security/SSLFactoryTest.java @@ -18,7 +18,7 @@ */ package org.apache.cassandra.security; -import java.io.File; +import org.apache.cassandra.io.util.File; import java.io.IOException; import java.security.cert.CertificateException; import javax.net.ssl.TrustManagerFactory; @@ -163,7 +163,7 @@ public void testSslContextReload_HappyPath() throws IOException, InterruptedExce SSLFactory.checkCertFilesForHotReloading(options, options); - keystoreFile.setLastModified(System.currentTimeMillis() + 15000); + keystoreFile.trySetLastModified(System.currentTimeMillis() + 15000); SSLFactory.checkCertFilesForHotReloading(options, options); SslContext newCtx = SSLFactory.getOrCreateSslContext(options, true, SSLFactory.SocketType.CLIENT, OpenSsl @@ -204,7 +204,7 @@ public void testSslFactoryHotReload_BadPassword_DoesNotClearExistingSslContext() File keystoreFile = new File(options.keystore); SSLFactory.checkCertFilesForHotReloading(options, options); - keystoreFile.setLastModified(System.currentTimeMillis() + 5000); + keystoreFile.trySetLastModified(System.currentTimeMillis() + 5000); ServerEncryptionOptions modOptions = new ServerEncryptionOptions(options) .withKeyStorePassword("bad password"); @@ -228,8 +228,8 @@ public void testSslFactoryHotReload_CorruptOrNonExistentFile_DoesNotClearExistin ServerEncryptionOptions options = addKeystoreOptions(encryptionOptions); File testKeystoreFile = new File(options.keystore + ".test"); - FileUtils.copyFile(new File(options.keystore),testKeystoreFile); - options = options.withKeyStore(testKeystoreFile.getPath()); + FileUtils.copyFile(new File(options.keystore).toJavaIOFile(), testKeystoreFile.toJavaIOFile()); + options = options.withKeyStore(testKeystoreFile.path()); SSLFactory.initHotReloading(options, options, true); @@ -237,8 +237,8 @@ public void testSslFactoryHotReload_CorruptOrNonExistentFile_DoesNotClearExistin .isAvailable()); SSLFactory.checkCertFilesForHotReloading(options, options); - testKeystoreFile.setLastModified(System.currentTimeMillis() + 15000); - FileUtils.forceDelete(testKeystoreFile); + testKeystoreFile.trySetLastModified(System.currentTimeMillis() + 15000); + FileUtils.forceDelete(testKeystoreFile.toJavaIOFile()); SSLFactory.checkCertFilesForHotReloading(options, options); SslContext newCtx = SSLFactory.getOrCreateSslContext(options, true, SSLFactory.SocketType.CLIENT, OpenSsl @@ -253,7 +253,7 @@ public void testSslFactoryHotReload_CorruptOrNonExistentFile_DoesNotClearExistin finally { DatabaseDescriptor.loadConfig(); - FileUtils.deleteQuietly(new File(encryptionOptions.keystore + ".test")); + FileUtils.deleteQuietly(new File(encryptionOptions.keystore + ".test").toJavaIOFile()); } } diff --git a/test/unit/org/apache/cassandra/service/SSTablesGlobalTrackerTest.java b/test/unit/org/apache/cassandra/service/SSTablesGlobalTrackerTest.java index e4a5947b4db7..37c0fb54b64a 100644 --- a/test/unit/org/apache/cassandra/service/SSTablesGlobalTrackerTest.java +++ b/test/unit/org/apache/cassandra/service/SSTablesGlobalTrackerTest.java @@ -30,6 +30,7 @@ import org.apache.cassandra.io.sstable.Descriptor; import org.apache.cassandra.io.sstable.format.SSTableFormat; import org.apache.cassandra.io.sstable.format.VersionAndType; +import org.apache.cassandra.io.util.File; import org.assertj.core.util.Files; import org.quicktheories.core.Gen; import org.quicktheories.generators.Generate; @@ -99,7 +100,7 @@ private Gen descriptors() tables(), generations(), sstableVersionString(), - (f, k, t, g, v) -> new Descriptor(v, Files.currentFolder(), k, t, g, f)); + (f, k, t, g, v) -> new Descriptor(v, new File(Files.currentFolder()), k, t, g, f)); } private Gen> descriptorLists(int minSize) diff --git a/test/unit/org/apache/cassandra/service/SerializationsTest.java b/test/unit/org/apache/cassandra/service/SerializationsTest.java index 07a9baba99c2..ce813e840fd2 100644 --- a/test/unit/org/apache/cassandra/service/SerializationsTest.java +++ b/test/unit/org/apache/cassandra/service/SerializationsTest.java @@ -26,6 +26,7 @@ import java.util.UUID; import com.google.common.collect.Lists; +import org.apache.cassandra.io.util.FileInputStreamPlus; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -39,7 +40,6 @@ import org.apache.cassandra.dht.Range; import org.apache.cassandra.dht.Token; import org.apache.cassandra.io.IVersionedSerializer; -import org.apache.cassandra.io.util.DataInputPlus.DataInputStreamPlus; import org.apache.cassandra.io.util.DataOutputStreamPlus; import org.apache.cassandra.locator.InetAddressAndPort; import org.apache.cassandra.repair.SyncNodePair; @@ -103,7 +103,7 @@ public void testValidationRequestRead() throws IOException if (EXECUTE_WRITES) testValidationRequestWrite(); - try (DataInputStreamPlus in = getInput("service.ValidationRequest.bin")) + try (FileInputStreamPlus in = getInput("service.ValidationRequest.bin")) { ValidationRequest message = ValidationRequest.serializer.deserialize(in, getVersion()); assert DESC.equals(message.desc); @@ -142,7 +142,7 @@ public void testValidationCompleteRead() throws IOException if (EXECUTE_WRITES) testValidationCompleteWrite(); - try (DataInputStreamPlus in = getInput("service.ValidationComplete.bin")) + try (FileInputStreamPlus in = getInput("service.ValidationComplete.bin")) { // empty validation ValidationResponse message = ValidationResponse.serializer.deserialize(in, getVersion()); @@ -187,7 +187,7 @@ public void testSyncRequestRead() throws IOException InetAddressAndPort src = InetAddressAndPort.getByNameOverrideDefaults("127.0.0.2", PORT); InetAddressAndPort dest = InetAddressAndPort.getByNameOverrideDefaults("127.0.0.3", PORT); - try (DataInputStreamPlus in = getInput("service.SyncRequest.bin")) + try (FileInputStreamPlus in = getInput("service.SyncRequest.bin")) { SyncRequest message = SyncRequest.serializer.deserialize(in, getVersion()); assert DESC.equals(message.desc); @@ -226,7 +226,7 @@ public void testSyncCompleteRead() throws IOException InetAddressAndPort dest = InetAddressAndPort.getByNameOverrideDefaults("127.0.0.3", PORT); SyncNodePair nodes = new SyncNodePair(src, dest); - try (DataInputStreamPlus in = getInput("service.SyncComplete.bin")) + try (FileInputStreamPlus in = getInput("service.SyncComplete.bin")) { // success SyncResponse message = SyncResponse.serializer.deserialize(in, getVersion()); diff --git a/test/unit/org/apache/cassandra/service/StartupChecksTest.java b/test/unit/org/apache/cassandra/service/StartupChecksTest.java index 67217b3e2b2c..56cd089e7b88 100644 --- a/test/unit/org/apache/cassandra/service/StartupChecksTest.java +++ b/test/unit/org/apache/cassandra/service/StartupChecksTest.java @@ -17,12 +17,12 @@ */ package org.apache.cassandra.service; -import java.io.File; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; +import org.apache.cassandra.io.util.File; import org.junit.*; import org.apache.cassandra.SchemaLoader; @@ -57,7 +57,7 @@ public void setup() throws IOException FileUtils.deleteRecursive(dataDir); File dataDir = new File(DatabaseDescriptor.getAllDataFileLocations()[0]); - sstableDir = Paths.get(dataDir.getAbsolutePath(), "Keyspace1", "Standard1"); + sstableDir = Paths.get(dataDir.absolutePath(), "Keyspace1", "Standard1"); Files.createDirectories(sstableDir); startupChecks = new StartupChecks(); @@ -66,7 +66,7 @@ public void setup() throws IOException @After public void tearDown() throws IOException { - FileUtils.deleteRecursive(sstableDir.toFile()); + FileUtils.deleteRecursive(new File(sstableDir)); } @Test @@ -79,13 +79,13 @@ public void failStartupIfInvalidSSTablesFound() throws Exception verifyFailure(startupChecks, "Detected unreadable sstables"); // we should ignore invalid sstables in a snapshots directory - FileUtils.deleteRecursive(sstableDir.toFile()); + FileUtils.deleteRecursive(new File(sstableDir)); Path snapshotDir = sstableDir.resolve("snapshots"); Files.createDirectories(snapshotDir); copyInvalidLegacySSTables(snapshotDir); startupChecks.verify(); // and in a backups directory - FileUtils.deleteRecursive(sstableDir.toFile()); + FileUtils.deleteRecursive(new File(sstableDir)); Path backupDir = sstableDir.resolve("backups"); Files.createDirectories(backupDir); copyInvalidLegacySSTables(backupDir); @@ -98,7 +98,7 @@ public void compatibilityCheckIgnoresNonDbFiles() throws Exception startupChecks = startupChecks.withTest(StartupChecks.checkSSTablesFormat); copyLegacyNonSSTableFiles(sstableDir); - assertFalse(sstableDir.toFile().listFiles().length == 0); + assertFalse(new File(sstableDir).tryList().length == 0); startupChecks.verify(); } @@ -124,11 +124,11 @@ private void copyLegacyNonSSTableFiles(Path targetDir) throws IOException private void copyInvalidLegacySSTables(Path targetDir) throws IOException { - File legacySSTableRoot = Paths.get(System.getProperty(INVALID_LEGACY_SSTABLE_ROOT_PROP), + File legacySSTableRoot = new File(Paths.get(System.getProperty(INVALID_LEGACY_SSTABLE_ROOT_PROP), "Keyspace1", - "Standard1").toFile(); - for (File f : legacySSTableRoot.listFiles()) - Files.copy(f.toPath(), targetDir.resolve(f.getName())); + "Standard1")); + for (File f : legacySSTableRoot.tryList()) + Files.copy(f.toPath(), targetDir.resolve(f.name())); } diff --git a/test/unit/org/apache/cassandra/service/StorageServiceServerTest.java b/test/unit/org/apache/cassandra/service/StorageServiceServerTest.java index 9abe6b5ae0b2..88a464850d8f 100644 --- a/test/unit/org/apache/cassandra/service/StorageServiceServerTest.java +++ b/test/unit/org/apache/cassandra/service/StorageServiceServerTest.java @@ -19,7 +19,7 @@ package org.apache.cassandra.service; -import java.io.File; +import org.apache.cassandra.io.util.File; import java.io.FileWriter; import java.io.IOException; import java.io.PrintWriter; @@ -116,7 +116,7 @@ private void checkTempFilePresence(File f, boolean exist) for (int i = 0; i < 5; i++) { File subdir = new File(f, Integer.toString(i)); - subdir.mkdir(); + subdir.tryCreateDirectory(); for (int j = 0; j < 5; j++) { File subF = new File(subdir, Integer.toString(j)); @@ -133,15 +133,15 @@ public void testSnapshotFailureHandler() throws IOException // Initial "run" of Cassandra, nothing in failed snapshot file WindowsFailedSnapshotTracker.deleteOldSnapshots(); - File f = new File(System.getenv("TEMP") + File.separator + Integer.toString(new Random().nextInt())); - f.mkdir(); + File f = new File(System.getenv("TEMP") + File.pathSeparator() + Integer.toString(new Random().nextInt())); + f.tryCreateDirectory(); f.deleteOnExit(); for (int i = 0; i < 5; i++) { File subdir = new File(f, Integer.toString(i)); - subdir.mkdir(); + subdir.tryCreateDirectory(); for (int j = 0; j < 5; j++) - new File(subdir, Integer.toString(j)).createNewFile(); + new File(subdir, Integer.toString(j)).createFileIfNotExists(); } checkTempFilePresence(f, true); @@ -166,9 +166,9 @@ public void testSnapshotFailureHandler() throws IOException tempPrinter.close(); File protectedDir = new File(".safeDir"); - protectedDir.mkdir(); + protectedDir.tryCreateDirectory(); File protectedFile = new File(protectedDir, ".safeFile"); - protectedFile.createNewFile(); + protectedFile.createFileIfNotExists(); WindowsFailedSnapshotTracker.handleFailedSnapshot(protectedDir); WindowsFailedSnapshotTracker.deleteOldSnapshots(); @@ -176,8 +176,8 @@ public void testSnapshotFailureHandler() throws IOException assertTrue(protectedDir.exists()); assertTrue(protectedFile.exists()); - protectedFile.delete(); - protectedDir.delete(); + protectedFile.tryDelete(); + protectedDir.tryDelete(); } @Test diff --git a/test/unit/org/apache/cassandra/service/reads/SpeculativeRetryParseTest.java b/test/unit/org/apache/cassandra/service/reads/SpeculativeRetryParseTest.java index 86b307e60fbb..dca62f65491b 100644 --- a/test/unit/org/apache/cassandra/service/reads/SpeculativeRetryParseTest.java +++ b/test/unit/org/apache/cassandra/service/reads/SpeculativeRetryParseTest.java @@ -150,4 +150,4 @@ public void testParameterParse() SpeculativeRetryPolicy.fromString(string); } } -} \ No newline at end of file +} diff --git a/test/unit/org/apache/cassandra/service/reads/repair/TestableReadRepair.java b/test/unit/org/apache/cassandra/service/reads/repair/TestableReadRepair.java index 5d7996d41a06..2529a71ad00d 100644 --- a/test/unit/org/apache/cassandra/service/reads/repair/TestableReadRepair.java +++ b/test/unit/org/apache/cassandra/service/reads/repair/TestableReadRepair.java @@ -126,4 +126,4 @@ public boolean dataWasConsumed() { return partitionListenerClosed && rowListenerClosed; } -} \ No newline at end of file +} diff --git a/test/unit/org/apache/cassandra/service/snapshot/SnapshotManagerTest.java b/test/unit/org/apache/cassandra/service/snapshot/SnapshotManagerTest.java index d38f58947ba0..20cc8e2bf061 100644 --- a/test/unit/org/apache/cassandra/service/snapshot/SnapshotManagerTest.java +++ b/test/unit/org/apache/cassandra/service/snapshot/SnapshotManagerTest.java @@ -18,7 +18,6 @@ package org.apache.cassandra.service.snapshot; -import java.io.File; import java.io.IOException; import java.time.Instant; import java.util.Arrays; @@ -33,9 +32,11 @@ import org.junit.rules.TemporaryFolder; import org.apache.cassandra.config.DatabaseDescriptor; +import org.apache.cassandra.io.util.File; import org.apache.cassandra.io.util.FileUtils; import org.apache.cassandra.service.DefaultFSErrorHandler; +import static org.apache.cassandra.service.snapshot.TableSnapshotTest.createFolders; import static org.assertj.core.api.Assertions.assertThat; public class SnapshotManagerTest @@ -52,20 +53,6 @@ public static void beforeClass() @ClassRule public static TemporaryFolder temporaryFolder = new TemporaryFolder(); - public Set createFolders() throws IOException { - File folder = temporaryFolder.newFolder(); - Set folders = new HashSet<>(); - for (String folderName : Arrays.asList("foo", "bar", "buzz")) { - File subfolder = new File(folder, folderName); - subfolder.mkdir(); - assertThat(subfolder).exists(); - folders.add(subfolder); - }; - - return folders; - } - - private TableSnapshot generateSnapshotDetails(String tag, Instant expiration) throws Exception { return new TableSnapshot( "ks", @@ -73,7 +60,7 @@ private TableSnapshot generateSnapshotDetails(String tag, Instant expiration) th tag, Instant.EPOCH, expiration, - createFolders(), + createFolders(temporaryFolder), (file) -> 0L ); } diff --git a/test/unit/org/apache/cassandra/service/snapshot/SnapshotManifestTest.java b/test/unit/org/apache/cassandra/service/snapshot/SnapshotManifestTest.java index 2dc73bff6602..5afae1c8ace9 100644 --- a/test/unit/org/apache/cassandra/service/snapshot/SnapshotManifestTest.java +++ b/test/unit/org/apache/cassandra/service/snapshot/SnapshotManifestTest.java @@ -18,9 +18,9 @@ package org.apache.cassandra.service.snapshot; -import java.io.File; import java.io.FileOutputStream; import java.io.IOException; +import java.io.OutputStream; import java.time.Instant; import java.util.HashMap; import java.util.Map; @@ -30,11 +30,14 @@ import org.junit.Test; import org.junit.rules.TemporaryFolder; +import static org.apache.cassandra.utils.Clock.Global.currentTimeMillis; import static org.assertj.core.api.Assertions.assertThatIOException; import static org.assertj.core.api.Assertions.assertThat; import org.apache.cassandra.config.Duration; import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.cassandra.io.util.File; +import org.apache.cassandra.io.util.FileOutputStreamPlus; public class SnapshotManifestTest { @@ -43,13 +46,13 @@ public class SnapshotManifestTest @Test public void testDeserializeFromInvalidFile() throws IOException { - File manifestFile = tempFolder.newFile("invalid"); + File manifestFile = new File(tempFolder.newFile("invalid")); assertThatIOException().isThrownBy( () -> { SnapshotManifest.deserializeFromJsonFile(manifestFile); }); - FileOutputStream out = new FileOutputStream(manifestFile); + FileOutputStreamPlus out = new FileOutputStreamPlus(manifestFile); out.write(1); out.write(2); out.write(3); @@ -69,8 +72,8 @@ public void testDeserializeManifest() throws IOException map.put("files", Arrays.asList("db1", "db2", "db3")); ObjectMapper mapper = new ObjectMapper(); - File manifestFile = tempFolder.newFile("manifest.json"); - mapper.writeValue(manifestFile, map); + File manifestFile = new File(tempFolder.newFile("manifest.json")); + mapper.writeValue((OutputStream) new FileOutputStreamPlus(manifestFile), map); SnapshotManifest manifest = SnapshotManifest.deserializeFromJsonFile(manifestFile); assertThat(manifest.getExpiresAt()).isEqualTo(Instant.parse(expiresAt)); @@ -83,8 +86,8 @@ public void testOptionalFields() throws IOException { Map map = new HashMap<>(); map.put("files", Arrays.asList("db1", "db2", "db3")); ObjectMapper mapper = new ObjectMapper(); - File manifestFile = tempFolder.newFile("manifest.json"); - mapper.writeValue(manifestFile, map); + File manifestFile = new File(tempFolder.newFile("manifest.json")); + mapper.writeValue((OutputStream) new FileOutputStreamPlus(manifestFile), map); SnapshotManifest manifest = SnapshotManifest.deserializeFromJsonFile(manifestFile); assertThat(manifest.getExpiresAt()).isNull(); @@ -98,16 +101,17 @@ public void testIngoredFields() throws IOException { map.put("files", Arrays.asList("db1", "db2", "db3")); map.put("dummy", "dummy"); ObjectMapper mapper = new ObjectMapper(); - File manifestFile = tempFolder.newFile("manifest.json"); - mapper.writeValue(manifestFile, map); + File manifestFile = new File(tempFolder.newFile("manifest.json")); + mapper.writeValue((OutputStream) new FileOutputStreamPlus(manifestFile), map); SnapshotManifest manifest = SnapshotManifest.deserializeFromJsonFile(manifestFile); assertThat(manifest.getFiles()).contains("db1").contains("db2").contains("db3").hasSize(3); } @Test public void testSerializeAndDeserialize() throws Exception { - SnapshotManifest manifest = new SnapshotManifest(Arrays.asList("db1", "db2", "db3"), new Duration("2m"), Instant.now()); - File manifestFile = tempFolder.newFile("manifest.json"); + SnapshotManifest manifest = new SnapshotManifest(Arrays.asList("db1", "db2", "db3"), new Duration("2m"), Instant.ofEpochMilli(currentTimeMillis())); + File manifestFile = new File(tempFolder.newFile("manifest.json")); + manifest.serializeToJsonFile(manifestFile); manifest = SnapshotManifest.deserializeFromJsonFile(manifestFile); assertThat(manifest.getExpiresAt()).isNotNull(); diff --git a/test/unit/org/apache/cassandra/service/snapshot/TableSnapshotTest.java b/test/unit/org/apache/cassandra/service/snapshot/TableSnapshotTest.java index 460bc0bb2032..c49b10e21975 100644 --- a/test/unit/org/apache/cassandra/service/snapshot/TableSnapshotTest.java +++ b/test/unit/org/apache/cassandra/service/snapshot/TableSnapshotTest.java @@ -18,7 +18,6 @@ package org.apache.cassandra.service.snapshot; -import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.time.Instant; @@ -32,6 +31,8 @@ import org.junit.rules.TemporaryFolder; import org.apache.cassandra.config.DatabaseDescriptor; +import org.apache.cassandra.io.util.File; +import org.apache.cassandra.io.util.FileOutputStreamPlus; import org.apache.cassandra.io.util.FileUtils; import static org.assertj.core.api.Assertions.assertThat; @@ -47,25 +48,22 @@ public void setup() @ClassRule public static TemporaryFolder tempFolder = new TemporaryFolder(); - public Set createFolders() throws IOException - { - File folder = tempFolder.newFolder(); + public static Set createFolders(TemporaryFolder temp) throws IOException { + File folder = new File(temp.newFolder()); Set folders = new HashSet<>(); - for (String folderName : Arrays.asList("foo", "bar", "buzz")) - { + for (String folderName : Arrays.asList("foo", "bar", "buzz")) { File subfolder = new File(folder, folderName); - subfolder.mkdir(); - assertThat(subfolder).exists(); + subfolder.tryCreateDirectories(); + assertThat(subfolder.exists()); folders.add(subfolder); - } - + }; return folders; } @Test public void testSnapshotExists() throws IOException { - Set folders = createFolders(); + Set folders = createFolders(tempFolder); TableSnapshot snapshot = new TableSnapshot( "ks", @@ -87,7 +85,7 @@ public void testSnapshotExists() throws IOException @Test public void testSnapshotExpiring() throws IOException { - Set folders = createFolders(); + Set folders = createFolders(tempFolder); TableSnapshot snapshot = new TableSnapshot( "ks", @@ -144,7 +142,7 @@ public void testSnapshotExpiring() throws IOException private Long writeBatchToFile(File file) throws IOException { - FileOutputStream out = new FileOutputStream(file); + FileOutputStreamPlus out = new FileOutputStreamPlus(file); out.write(1); out.write(2); out.write(3); @@ -155,7 +153,7 @@ private Long writeBatchToFile(File file) throws IOException @Test public void testComputeSizeOnDisk() throws IOException { - Set folders = createFolders(); + Set folders = createFolders(tempFolder); TableSnapshot tableDetails = new TableSnapshot( "ks", @@ -184,7 +182,7 @@ public void testComputeSizeOnDisk() throws IOException @Test public void testComputeTrueSize() throws IOException { - Set folders = createFolders(); + Set folders = createFolders(tempFolder); TableSnapshot tableDetails = new TableSnapshot( "ks", @@ -211,7 +209,7 @@ public void testComputeTrueSize() throws IOException @Test public void testGetCreatedAt() throws IOException { - Set folders = createFolders(); + Set folders = createFolders(tempFolder); // When createdAt is not null, getCreatedAt() should return it Instant createdAt = Instant.EPOCH; diff --git a/test/unit/org/apache/cassandra/streaming/compression/CompressedInputStreamTest.java b/test/unit/org/apache/cassandra/streaming/compression/CompressedInputStreamTest.java index 7c3ce20d8aeb..a7a55cf6022b 100644 --- a/test/unit/org/apache/cassandra/streaming/compression/CompressedInputStreamTest.java +++ b/test/unit/org/apache/cassandra/streaming/compression/CompressedInputStreamTest.java @@ -20,6 +20,7 @@ import java.io.*; import java.util.*; +import org.apache.cassandra.io.util.File; import org.junit.BeforeClass; import org.junit.Rule; import org.junit.Test; @@ -113,7 +114,7 @@ private void testCompressedReadWith(long[] valuesToCheck, boolean testTruncate, assert valuesToCheck != null && valuesToCheck.length > 0; // write compressed data file of longs - File parentDir = tempFolder.newFolder(); + File parentDir = new File(tempFolder.newFolder()); Descriptor desc = new Descriptor(parentDir, "ks", "cf", 1); File tmp = new File(desc.filenameFor(Component.DATA)); MetadataCollector collector = new MetadataCollector(new ClusteringComparator(BytesType.instance)); @@ -133,7 +134,7 @@ private void testCompressedReadWith(long[] valuesToCheck, boolean testTruncate, writer.finish(); } - CompressionMetadata comp = CompressionMetadata.create(tmp.getAbsolutePath()); + CompressionMetadata comp = CompressionMetadata.create(tmp.absolutePath()); List sections = new ArrayList<>(); for (long l : valuesToCheck) { @@ -153,7 +154,7 @@ private void testCompressedReadWith(long[] valuesToCheck, boolean testTruncate, size += (c.length + 4); // 4bytes CRC byte[] toRead = new byte[size]; - try (RandomAccessFile f = new RandomAccessFile(tmp, "r")) + try (RandomAccessFile f = new RandomAccessFile(tmp.toJavaIOFile(), "r")) { int pos = 0; for (CompressionMetadata.Chunk c : chunks) diff --git a/test/unit/org/apache/cassandra/tools/AuditLogViewerTest.java b/test/unit/org/apache/cassandra/tools/AuditLogViewerTest.java index 5c0cb9daefd1..514857efc2f6 100644 --- a/test/unit/org/apache/cassandra/tools/AuditLogViewerTest.java +++ b/test/unit/org/apache/cassandra/tools/AuditLogViewerTest.java @@ -28,6 +28,7 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; +import org.apache.cassandra.io.util.File; import org.apache.commons.io.FileUtils; import org.junit.After; @@ -65,10 +66,10 @@ public void setUp() throws IOException @After public void tearDown() throws IOException { - if (path.toFile().exists() && path.toFile().isDirectory()) + if (new File(path).exists() && new File(path).isDirectory()) { //Deletes directory and all of it's contents - FileUtils.deleteDirectory(path.toFile()); + FileUtils.deleteDirectory(new File(path).toJavaIOFile()); } } diff --git a/test/unit/org/apache/cassandra/tools/CompactionStressTest.java b/test/unit/org/apache/cassandra/tools/CompactionStressTest.java index 09b82fede53d..d65f6a2ae030 100644 --- a/test/unit/org/apache/cassandra/tools/CompactionStressTest.java +++ b/test/unit/org/apache/cassandra/tools/CompactionStressTest.java @@ -18,8 +18,8 @@ package org.apache.cassandra.tools; -import java.io.File; +import org.apache.cassandra.io.util.File; import org.junit.Test; import org.junit.runner.RunWith; @@ -41,7 +41,7 @@ public void testWriteAndCompact() { ClassLoader classLoader = getClass().getClassLoader(); File file = new File(classLoader.getResource("blogpost.yaml").getFile()); - String profileFile = file.getAbsolutePath(); + String profileFile = file.absolutePath(); ToolResult tool = ToolRunner.invokeClass("org.apache.cassandra.stress.CompactionStress", "write", diff --git a/test/unit/org/apache/cassandra/tools/JMXToolTest.java b/test/unit/org/apache/cassandra/tools/JMXToolTest.java index e6f46155ed2a..dbd18f3b9d6e 100644 --- a/test/unit/org/apache/cassandra/tools/JMXToolTest.java +++ b/test/unit/org/apache/cassandra/tools/JMXToolTest.java @@ -187,4 +187,4 @@ private static Gen> gen() { return SourceDSL.maps().of(Generators.IDENTIFIER_GEN, infoGen).ofSizeBetween(0, 10); } -} \ No newline at end of file +} diff --git a/test/unit/org/apache/cassandra/tools/LoaderOptionsTest.java b/test/unit/org/apache/cassandra/tools/LoaderOptionsTest.java index fa8c2e81cb8b..0d2d67a93fda 100644 --- a/test/unit/org/apache/cassandra/tools/LoaderOptionsTest.java +++ b/test/unit/org/apache/cassandra/tools/LoaderOptionsTest.java @@ -17,8 +17,8 @@ */ package org.apache.cassandra.tools; -import java.io.File; import java.nio.file.Paths; +import org.apache.cassandra.io.util.File; import org.junit.Test; import static org.apache.cassandra.tools.OfflineToolUtils.sstableDirName; @@ -30,15 +30,15 @@ public class LoaderOptionsTest @Test public void testNativePort() throws Exception { //Default Cassandra config - File config = Paths.get(".", "test", "conf", "cassandra.yaml").normalize().toFile(); - String[] args = {"-d", "127.9.9.1", "-f", config.getAbsolutePath(), sstableDirName("legacy_sstables", "legacy_ma_simple")}; + File config = new File(Paths.get(".", "test", "conf", "cassandra.yaml").normalize()); + String[] args = { "-d", "127.9.9.1", "-f", config.absolutePath(), sstableDirName("legacy_sstables", "legacy_ma_simple")}; LoaderOptions options = LoaderOptions.builder().parseArgs(args).build(); assertEquals(9042, options.nativePort); // SSL Enabled Cassandra config - config = Paths.get(".", "test", "conf", "unit-test-conf/test-native-port.yaml").normalize().toFile(); - String[] args2 = {"-d", "127.9.9.1", "-f", config.getAbsolutePath(), sstableDirName("legacy_sstables", "legacy_ma_simple")}; + config = new File(Paths.get(".", "test", "conf", "unit-test-conf/test-native-port.yaml").normalize()); + String[] args2 = { "-d", "127.9.9.1", "-f", config.absolutePath(), sstableDirName("legacy_sstables", "legacy_ma_simple")}; options = LoaderOptions.builder().parseArgs(args2).build(); assertEquals(9142, options.nativePort); } diff --git a/test/unit/org/apache/cassandra/tools/OfflineToolUtils.java b/test/unit/org/apache/cassandra/tools/OfflineToolUtils.java index ae7ef2d94d7d..6e4df83c0f3f 100644 --- a/test/unit/org/apache/cassandra/tools/OfflineToolUtils.java +++ b/test/unit/org/apache/cassandra/tools/OfflineToolUtils.java @@ -18,7 +18,6 @@ package org.apache.cassandra.tools; -import java.io.File; import java.io.IOException; import java.lang.management.ManagementFactory; import java.lang.management.ThreadInfo; @@ -32,6 +31,7 @@ import java.util.regex.Pattern; import java.util.stream.Collectors; +import org.apache.cassandra.io.util.File; import org.apache.commons.io.FileUtils; import org.junit.BeforeClass; @@ -192,20 +192,20 @@ public static void setupTester() public static String findOneSSTable(String ks, String cf) throws IOException { File cfDir = sstableDir(ks, cf); - File[] sstableFiles = cfDir.listFiles((file) -> file.isFile() && file.getName().endsWith("-Data.db")); - return sstableFiles[0].getAbsolutePath(); + File[] sstableFiles = cfDir.tryList((file) -> file.isFile() && file.name().endsWith("-Data.db")); + return sstableFiles[0].absolutePath(); } public static String sstableDirName(String ks, String cf) throws IOException { - return sstableDir(ks, cf).getAbsolutePath(); + return sstableDir(ks, cf).absolutePath(); } public static File sstableDir(String ks, String cf) throws IOException { File dataDir = copySSTables(); File ksDir = new File(dataDir, ks); - File[] cfDirs = ksDir.listFiles((dir, name) -> cf.equals(name) || name.startsWith(cf + '-')); + File[] cfDirs = ksDir.tryList((dir, name) -> cf.equals(name) || name.startsWith(cf + '-')); return cfDirs[0]; } @@ -213,7 +213,7 @@ public static File copySSTables() throws IOException { File dataDir = new File("build/test/cassandra/data"); File srcDir = new File("test/data/legacy-sstables/ma"); - FileUtils.copyDirectory(new File(srcDir, "legacy_tables"), new File(dataDir, "legacy_sstables")); + FileUtils.copyDirectory(new File(srcDir, "legacy_tables").toJavaIOFile(), new File(dataDir, "legacy_sstables").toJavaIOFile()); return dataDir; } diff --git a/test/unit/org/apache/cassandra/tools/SSTableRepairedAtSetterTest.java b/test/unit/org/apache/cassandra/tools/SSTableRepairedAtSetterTest.java index e6f7e8035c3c..c0974da4916b 100644 --- a/test/unit/org/apache/cassandra/tools/SSTableRepairedAtSetterTest.java +++ b/test/unit/org/apache/cassandra/tools/SSTableRepairedAtSetterTest.java @@ -18,10 +18,10 @@ package org.apache.cassandra.tools; -import java.io.File; import java.io.IOException; import java.nio.file.Files; +import org.apache.cassandra.io.util.File; import org.junit.Test; import org.junit.runner.RunWith; @@ -113,7 +113,7 @@ public void testFilesArg() throws Exception tmpFile.deleteOnExit(); Files.write(tmpFile.toPath(), findOneSSTable("legacy_sstables", "legacy_ma_simple").getBytes()); - String file = tmpFile.getAbsolutePath(); + String file = tmpFile.absolutePath(); ToolResult tool = ToolRunner.invokeClass(SSTableRepairedAtSetter.class, "--really-set", "--is-repaired", "-f", file); tool.assertOnCleanExit(); assertNoUnexpectedThreadsStarted(OPTIONAL_THREADS_WITH_SCHEMA); diff --git a/test/unit/org/apache/cassandra/tools/StandaloneSplitterWithCQLTesterTest.java b/test/unit/org/apache/cassandra/tools/StandaloneSplitterWithCQLTesterTest.java index 9785d840f807..ece4a02ffe9c 100644 --- a/test/unit/org/apache/cassandra/tools/StandaloneSplitterWithCQLTesterTest.java +++ b/test/unit/org/apache/cassandra/tools/StandaloneSplitterWithCQLTesterTest.java @@ -18,7 +18,6 @@ package org.apache.cassandra.tools; -import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; @@ -28,6 +27,7 @@ import com.google.common.io.Files; +import org.apache.cassandra.io.util.File; import org.junit.Test; import org.junit.runner.RunWith; @@ -79,10 +79,10 @@ public void testSplittingSSTable() throws Throwable restoreOrigSstables(); ToolResult tool = ToolRunner.invokeClass(StandaloneSplitter.class, "-s", "1", sstableFileName); - List splitFiles = Arrays.asList(sstablesDir.listFiles()); + List splitFiles = Arrays.asList(sstablesDir.tryList()); splitFiles.stream().forEach(f -> { - if (f.getName().endsWith("Data.db") && !origSstables.contains(f)) - assertTrue(f.getName() + " is way bigger than 1MB: [" + f.length() + "] bytes", + if (f.name().endsWith("Data.db") && !origSstables.contains(f)) + assertTrue(f.name() + " is way bigger than 1MB: [" + f.length() + "] bytes", f.length() <= 1024 * 1024 * 1.2); //give a 20% margin on size check }); assertTrue(origSstables.size() < splitFiles.size()); @@ -97,16 +97,16 @@ public void testSplittingMultipleSSTables() throws Throwable restoreOrigSstables(); ArrayList args = new ArrayList<>(Arrays.asList("-s", "1")); - args.addAll(Arrays.asList(sstablesDir.listFiles()) + args.addAll(Arrays.asList(sstablesDir.tryList()) .stream() - .map(f -> f.getAbsolutePath()) + .map(f -> f.absolutePath()) .collect(Collectors.toList())); ToolResult tool = ToolRunner.invokeClass(StandaloneSplitter.class, args.toArray(new String[args.size()])); - List splitFiles = Arrays.asList(sstablesDir.listFiles()); + List splitFiles = Arrays.asList(sstablesDir.tryList()); splitFiles.stream().forEach(f -> { - if (f.getName().endsWith("Data.db") && !origSstables.contains(f)) - assertTrue(f.getName() + " is way bigger than 1MB: [" + f.length() + "] bytes", + if (f.name().endsWith("Data.db") && !origSstables.contains(f)) + assertTrue(f.name() + " is way bigger than 1MB: [" + f.length() + "] bytes", f.length() <= 1024 * 1024 * 1.2); //give a 20% margin on size check }); assertTrue(origSstables.size() < splitFiles.size()); @@ -119,7 +119,7 @@ public void testNoSnapshotOption() throws Throwable { restoreOrigSstables(); ToolResult tool = ToolRunner.invokeClass(StandaloneSplitter.class, "-s", "1", "--no-snapshot", sstableFileName); - assertTrue(origSstables.size() < Arrays.asList(sstablesDir.listFiles()).size()); + assertTrue(origSstables.size() < Arrays.asList(sstablesDir.tryList()).size()); assertTrue(tool.getStdout(), tool.getStdout().isEmpty()); assertTrue(tool.getCleanedStderr(), tool.getCleanedStderr().isEmpty()); assertEquals(0, tool.getExitCode()); @@ -144,17 +144,17 @@ private void setupTestSstables() throws Throwable Set sstables = cfs.getLiveSSTables(); sstableFileName = sstables.iterator().next().getFilename(); assertTrue("Generated sstable must be at least 1MB", (new File(sstableFileName)).length() > 1024*1024); - sstablesDir = new File(sstableFileName).getParentFile(); - sstablesBackupDir = new File(sstablesDir.getAbsolutePath() + "/testbackup"); - sstablesBackupDir.mkdir(); - origSstables = Arrays.asList(sstablesDir.listFiles()); + sstablesDir = new File(sstableFileName).parent(); + sstablesBackupDir = new File(sstablesDir.absolutePath() + "/testbackup"); + sstablesBackupDir.tryCreateDirectory(); + origSstables = Arrays.asList(sstablesDir.tryList()); // Back up orig sstables origSstables.stream().forEach(f -> { if (f.isFile()) try { - Files.copy(f, new File(sstablesBackupDir.getAbsolutePath() + "/" + f.getName())); + Files.copy(f.toJavaIOFile(), new File(sstablesBackupDir.absolutePath() + "/" + f.name()).toJavaIOFile()); } catch(IOException e) { @@ -167,15 +167,15 @@ private void setupTestSstables() throws Throwable private void restoreOrigSstables() { - Arrays.asList(sstablesDir.listFiles()).stream().forEach(f -> { + Arrays.asList(sstablesDir.tryList()).stream().forEach(f -> { if (f.isFile()) - f.delete(); + f.tryDelete(); }); - Arrays.asList(sstablesBackupDir.listFiles()).stream().forEach(f -> { + Arrays.asList(sstablesBackupDir.tryList()).stream().forEach(f -> { if (f.isFile()) try { - Files.copy(f, new File(sstablesDir.getAbsolutePath() + "/" + f.getName())); + Files.copy(f.toJavaIOFile(), new File(sstablesDir.absolutePath() + "/" + f.name()).toJavaIOFile()); } catch(IOException e) { diff --git a/test/unit/org/apache/cassandra/tools/StandaloneUpgraderOnSStablesTest.java b/test/unit/org/apache/cassandra/tools/StandaloneUpgraderOnSStablesTest.java index 570610b964d5..e33df265c232 100644 --- a/test/unit/org/apache/cassandra/tools/StandaloneUpgraderOnSStablesTest.java +++ b/test/unit/org/apache/cassandra/tools/StandaloneUpgraderOnSStablesTest.java @@ -18,13 +18,13 @@ package org.apache.cassandra.tools; -import java.io.File; import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Set; import java.util.stream.Collectors; +import org.apache.cassandra.io.util.File; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -147,8 +147,8 @@ private List getSStableFiles(String ks, String table) throws StartupExce return Lists.emptyList(); String sstableFileName = sstables.iterator().next().getFilename(); - File sstablesDir = new File(sstableFileName).getParentFile(); - return Arrays.asList(sstablesDir.listFiles()) + File sstablesDir = new File(sstableFileName).parent(); + return Arrays.asList(sstablesDir.tryList()) .stream() .filter(f -> f.isFile()) .map(file -> file.toString()) diff --git a/test/unit/org/apache/cassandra/tools/nodetool/ClearSnapshotTest.java b/test/unit/org/apache/cassandra/tools/nodetool/ClearSnapshotTest.java index 07a89d7e9f0b..06307aa9f5a4 100644 --- a/test/unit/org/apache/cassandra/tools/nodetool/ClearSnapshotTest.java +++ b/test/unit/org/apache/cassandra/tools/nodetool/ClearSnapshotTest.java @@ -111,4 +111,4 @@ public void testClearSnapshot_RemoveMultiple() assertThat(snapshots_after).isEmpty(); } -} \ No newline at end of file +} diff --git a/test/unit/org/apache/cassandra/tools/nodetool/InvalidateCredentialsCacheTest.java b/test/unit/org/apache/cassandra/tools/nodetool/InvalidateCredentialsCacheTest.java index 7ed553064018..be19e5a9c964 100644 --- a/test/unit/org/apache/cassandra/tools/nodetool/InvalidateCredentialsCacheTest.java +++ b/test/unit/org/apache/cassandra/tools/nodetool/InvalidateCredentialsCacheTest.java @@ -55,8 +55,8 @@ public static void setup() throws Exception new AuthTestUtils.LocalCassandraAuthorizer(), new AuthTestUtils.LocalCassandraNetworkAuthorizer()); - roleManager.createRole(AuthenticatedUser.SYSTEM_USER, ROLE_A, AuthTestUtils.getLoginRoleOprions()); - roleManager.createRole(AuthenticatedUser.SYSTEM_USER, ROLE_B, AuthTestUtils.getLoginRoleOprions()); + roleManager.createRole(AuthenticatedUser.SYSTEM_USER, ROLE_A, AuthTestUtils.getLoginRoleOptions()); + roleManager.createRole(AuthenticatedUser.SYSTEM_USER, ROLE_B, AuthTestUtils.getLoginRoleOptions()); roleANegotiator = authenticator.newSaslNegotiator(null); roleANegotiator.evaluateResponse(new PlainTextAuthProvider(ROLE_A.getRoleName(), "ignored") diff --git a/test/unit/org/apache/cassandra/tools/nodetool/InvalidateJmxPermissionsCacheTest.java b/test/unit/org/apache/cassandra/tools/nodetool/InvalidateJmxPermissionsCacheTest.java index 81fa2c52935a..9a4c231f5a4c 100644 --- a/test/unit/org/apache/cassandra/tools/nodetool/InvalidateJmxPermissionsCacheTest.java +++ b/test/unit/org/apache/cassandra/tools/nodetool/InvalidateJmxPermissionsCacheTest.java @@ -60,10 +60,10 @@ public static void setup() throws Exception JMXResource rootJmxResource = JMXResource.root(); Set jmxPermissions = rootJmxResource.applicablePermissions(); - roleManager.createRole(AuthenticatedUser.SYSTEM_USER, ROLE_A, AuthTestUtils.getLoginRoleOprions()); + roleManager.createRole(AuthenticatedUser.SYSTEM_USER, ROLE_A, AuthTestUtils.getLoginRoleOptions()); authorizer.grant(AuthenticatedUser.SYSTEM_USER, jmxPermissions, rootJmxResource, ROLE_A); - roleManager.createRole(AuthenticatedUser.SYSTEM_USER, ROLE_B, AuthTestUtils.getLoginRoleOprions()); + roleManager.createRole(AuthenticatedUser.SYSTEM_USER, ROLE_B, AuthTestUtils.getLoginRoleOptions()); authorizer.grant(AuthenticatedUser.SYSTEM_USER, jmxPermissions, rootJmxResource, ROLE_B); startJMXServer(); diff --git a/test/unit/org/apache/cassandra/tools/nodetool/InvalidateNetworkPermissionsCacheTest.java b/test/unit/org/apache/cassandra/tools/nodetool/InvalidateNetworkPermissionsCacheTest.java index cef29b312577..32681b00171a 100644 --- a/test/unit/org/apache/cassandra/tools/nodetool/InvalidateNetworkPermissionsCacheTest.java +++ b/test/unit/org/apache/cassandra/tools/nodetool/InvalidateNetworkPermissionsCacheTest.java @@ -47,8 +47,8 @@ public static void setup() throws Exception new AuthTestUtils.LocalCassandraAuthorizer(), new AuthTestUtils.LocalCassandraNetworkAuthorizer()); - roleManager.createRole(AuthenticatedUser.SYSTEM_USER, ROLE_A, AuthTestUtils.getLoginRoleOprions()); - roleManager.createRole(AuthenticatedUser.SYSTEM_USER, ROLE_B, AuthTestUtils.getLoginRoleOprions()); + roleManager.createRole(AuthenticatedUser.SYSTEM_USER, ROLE_A, AuthTestUtils.getLoginRoleOptions()); + roleManager.createRole(AuthenticatedUser.SYSTEM_USER, ROLE_B, AuthTestUtils.getLoginRoleOptions()); startJMXServer(); } diff --git a/test/unit/org/apache/cassandra/tools/nodetool/InvalidatePermissionsCacheTest.java b/test/unit/org/apache/cassandra/tools/nodetool/InvalidatePermissionsCacheTest.java index caaabf21cdbf..a7da1a2582cc 100644 --- a/test/unit/org/apache/cassandra/tools/nodetool/InvalidatePermissionsCacheTest.java +++ b/test/unit/org/apache/cassandra/tools/nodetool/InvalidatePermissionsCacheTest.java @@ -63,8 +63,8 @@ public static void setup() throws Exception authorizer, new AuthTestUtils.LocalCassandraNetworkAuthorizer()); - roleManager.createRole(AuthenticatedUser.SYSTEM_USER, ROLE_A, AuthTestUtils.getLoginRoleOprions()); - roleManager.createRole(AuthenticatedUser.SYSTEM_USER, ROLE_B, AuthTestUtils.getLoginRoleOprions()); + roleManager.createRole(AuthenticatedUser.SYSTEM_USER, ROLE_A, AuthTestUtils.getLoginRoleOptions()); + roleManager.createRole(AuthenticatedUser.SYSTEM_USER, ROLE_B, AuthTestUtils.getLoginRoleOptions()); List resources = Arrays.asList( DataResource.root(), diff --git a/test/unit/org/apache/cassandra/tools/nodetool/InvalidateRolesCacheTest.java b/test/unit/org/apache/cassandra/tools/nodetool/InvalidateRolesCacheTest.java index 80596b353c6c..e63a7cba306e 100644 --- a/test/unit/org/apache/cassandra/tools/nodetool/InvalidateRolesCacheTest.java +++ b/test/unit/org/apache/cassandra/tools/nodetool/InvalidateRolesCacheTest.java @@ -47,8 +47,8 @@ public static void setup() throws Exception new AuthTestUtils.LocalCassandraAuthorizer(), new AuthTestUtils.LocalCassandraNetworkAuthorizer()); - roleManager.createRole(AuthenticatedUser.SYSTEM_USER, ROLE_A, AuthTestUtils.getLoginRoleOprions()); - roleManager.createRole(AuthenticatedUser.SYSTEM_USER, ROLE_B, AuthTestUtils.getLoginRoleOprions()); + roleManager.createRole(AuthenticatedUser.SYSTEM_USER, ROLE_A, AuthTestUtils.getLoginRoleOptions()); + roleManager.createRole(AuthenticatedUser.SYSTEM_USER, ROLE_B, AuthTestUtils.getLoginRoleOptions()); startJMXServer(); } diff --git a/test/unit/org/apache/cassandra/tools/nodetool/formatter/TableBuilderTest.java b/test/unit/org/apache/cassandra/tools/nodetool/formatter/TableBuilderTest.java index 9782b5bb1a2e..99a692949069 100644 --- a/test/unit/org/apache/cassandra/tools/nodetool/formatter/TableBuilderTest.java +++ b/test/unit/org/apache/cassandra/tools/nodetool/formatter/TableBuilderTest.java @@ -111,4 +111,4 @@ public void testDelimiter() } assertEquals(String.format("a\tbb\tccc%n"), baos.toString()); } -} \ No newline at end of file +} diff --git a/test/unit/org/apache/cassandra/transport/CQLUserAuditTest.java b/test/unit/org/apache/cassandra/transport/CQLUserAuditTest.java index 019f06c5cf9d..ddcfb7f25bbc 100644 --- a/test/unit/org/apache/cassandra/transport/CQLUserAuditTest.java +++ b/test/unit/org/apache/cassandra/transport/CQLUserAuditTest.java @@ -253,4 +253,4 @@ private static ArrayList executeAs(List queries, String user auditEvents.drainTo(ret); return ret; } -} \ No newline at end of file +} diff --git a/test/unit/org/apache/cassandra/utils/AbstractIteratorTest.java b/test/unit/org/apache/cassandra/utils/AbstractIteratorTest.java index 8119dcbabf29..c6b49d96c7dd 100644 --- a/test/unit/org/apache/cassandra/utils/AbstractIteratorTest.java +++ b/test/unit/org/apache/cassandra/utils/AbstractIteratorTest.java @@ -389,4 +389,4 @@ private static class SomeCheckedException extends Exception private static class SomeUncheckedException extends RuntimeException { } -} \ No newline at end of file +} diff --git a/test/unit/org/apache/cassandra/utils/BloomFilterTest.java b/test/unit/org/apache/cassandra/utils/BloomFilterTest.java index 1c3afff2efab..78f2dd09e491 100644 --- a/test/unit/org/apache/cassandra/utils/BloomFilterTest.java +++ b/test/unit/org/apache/cassandra/utils/BloomFilterTest.java @@ -18,21 +18,22 @@ */ package org.apache.cassandra.utils; -import java.io.*; +import org.apache.cassandra.io.util.*; + +import java.io.ByteArrayInputStream; +import java.io.DataInputStream; +import java.io.IOException; import java.nio.ByteBuffer; import java.util.HashSet; import java.util.Iterator; import java.util.Random; import java.util.Set; +import org.apache.cassandra.io.util.File; import org.junit.*; import org.apache.cassandra.dht.IPartitioner; import org.apache.cassandra.dht.Murmur3Partitioner; -import org.apache.cassandra.io.util.BufferedDataOutputStreamPlus; -import org.apache.cassandra.io.util.DataOutputBuffer; -import org.apache.cassandra.io.util.DataOutputStreamPlus; -import org.apache.cassandra.io.util.FileUtils; import org.apache.cassandra.utils.IFilter.FilterKey; import org.apache.cassandra.utils.KeyGenerator.RandomStringGenerator; import org.apache.cassandra.utils.obs.IBitSet; @@ -210,12 +211,12 @@ public void testHugeBFSerialization() throws IOException File file = FileUtils.createDeletableTempFile("bloomFilterTest-", ".dat"); BloomFilter filter = (BloomFilter) FilterFactory.getFilter(((long) Integer.MAX_VALUE / 8) + 1, 0.01d); filter.add(FilterTestHelper.wrap(test)); - DataOutputStreamPlus out = new BufferedDataOutputStreamPlus(new FileOutputStream(file)); + DataOutputStreamPlus out = new FileOutputStreamPlus(file); BloomFilterSerializer.serialize(filter, out); out.close(); filter.close(); - DataInputStream in = new DataInputStream(new FileInputStream(file)); + DataInputStream in = new DataInputStream(new FileInputStreamPlus(file)); BloomFilter filter2 = BloomFilterSerializer.deserialize(in, false); Assert.assertTrue(filter2.isPresent(FilterTestHelper.wrap(test))); FileUtils.closeQuietly(in); diff --git a/test/unit/org/apache/cassandra/utils/GeneratorsTest.java b/test/unit/org/apache/cassandra/utils/GeneratorsTest.java index 7fa056120315..b9358cce8e51 100644 --- a/test/unit/org/apache/cassandra/utils/GeneratorsTest.java +++ b/test/unit/org/apache/cassandra/utils/GeneratorsTest.java @@ -45,4 +45,4 @@ public void dnsDomainName() { qt().forAll(Generators.DNS_DOMAIN_NAME).checkAssert(InternetDomainName::from); } -} \ No newline at end of file +} diff --git a/test/unit/org/apache/cassandra/utils/KeyGenerator.java b/test/unit/org/apache/cassandra/utils/KeyGenerator.java index df958678685c..d84d7aae2374 100644 --- a/test/unit/org/apache/cassandra/utils/KeyGenerator.java +++ b/test/unit/org/apache/cassandra/utils/KeyGenerator.java @@ -18,7 +18,13 @@ */ package org.apache.cassandra.utils; -import java.io.*; +import org.apache.cassandra.io.util.FileInputStreamPlus; + +import java.io.BufferedReader; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStreamReader; import java.nio.ByteBuffer; import java.util.Random; @@ -119,7 +125,7 @@ static class WordGenerator implements ResetableIterator static { - try (BufferedReader br = new BufferedReader(new InputStreamReader(new FileInputStream("/usr/share/dict/words")))) + try (BufferedReader br = new BufferedReader(new InputStreamReader(new FileInputStreamPlus("/usr/share/dict/words")))) { while (br.ready()) { @@ -156,7 +162,7 @@ public void reset() { reader = new BufferedReader(new InputStreamReader(new FileInputStream("/usr/share/dict/words"))); } - catch (FileNotFoundException e) + catch (FileNotFoundException e) { throw new RuntimeException(e); } diff --git a/test/unit/org/apache/cassandra/utils/NativeLibraryTest.java b/test/unit/org/apache/cassandra/utils/NativeLibraryTest.java index 1a26351a9b3f..52a7f84f8af6 100644 --- a/test/unit/org/apache/cassandra/utils/NativeLibraryTest.java +++ b/test/unit/org/apache/cassandra/utils/NativeLibraryTest.java @@ -18,8 +18,8 @@ */ package org.apache.cassandra.utils; -import java.io.File; +import org.apache.cassandra.io.util.File; import org.junit.Assert; import org.junit.Test; @@ -32,7 +32,7 @@ public void testSkipCache() { File file = FileUtils.createDeletableTempFile("testSkipCache", "1"); - NativeLibrary.trySkipCache(file.getPath(), 0, 0); + NativeLibrary.trySkipCache(file.path(), 0, 0); } @Test diff --git a/test/unit/org/apache/cassandra/utils/SerializationsTest.java b/test/unit/org/apache/cassandra/utils/SerializationsTest.java index 6597f3bb562d..6fc3872337ff 100644 --- a/test/unit/org/apache/cassandra/utils/SerializationsTest.java +++ b/test/unit/org/apache/cassandra/utils/SerializationsTest.java @@ -21,6 +21,7 @@ import java.io.DataInputStream; import java.io.IOException; +import org.apache.cassandra.io.util.FileInputStreamPlus; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; @@ -36,9 +37,10 @@ import org.apache.cassandra.dht.Murmur3Partitioner; import org.apache.cassandra.utils.obs.OffHeapBitSet; -import java.io.File; import java.io.FileInputStream; +import org.apache.cassandra.io.util.File; + public class SerializationsTest extends AbstractSerializationsTester { // Helper function to serialize old Bloomfilter format, should be removed once the old format is not supported @@ -80,7 +82,7 @@ public void testBloomFilterRead1000() throws IOException testBloomFilterWrite1000(true); } - try (DataInputStream in = getInput("4.0", "utils.BloomFilter1000.bin"); + try (FileInputStreamPlus in = getInput("4.0", "utils.BloomFilter1000.bin"); IFilter filter = BloomFilterSerializer.deserialize(in, false)) { boolean present; @@ -96,7 +98,7 @@ public void testBloomFilterRead1000() throws IOException } } - try (DataInputStream in = getInput("3.0", "utils.BloomFilter1000.bin"); + try (FileInputStreamPlus in = getInput("3.0", "utils.BloomFilter1000.bin"); IFilter filter = BloomFilterSerializer.deserialize(in, true)) { boolean present; @@ -123,7 +125,7 @@ private static void testBloomFilterTable(String file, boolean oldBfFormat) throw { Murmur3Partitioner partitioner = new Murmur3Partitioner(); - try (DataInputStream in = new DataInputStream(new FileInputStream(new File(file))); + try (DataInputStream in = new DataInputStream(new FileInputStreamPlus(new File(file))); IFilter filter = BloomFilterSerializer.deserialize(in, oldBfFormat)) { for (int i = 1; i <= 10; i++) @@ -176,7 +178,7 @@ public void testEstimatedHistogramRead() throws IOException if (EXECUTE_WRITES) testEstimatedHistogramWrite(); - try (DataInputStreamPlus in = getInput("utils.EstimatedHistogram.bin")) + try (FileInputStreamPlus in = getInput("utils.EstimatedHistogram.bin")) { Assert.assertNotNull(EstimatedHistogram.serializer.deserialize(in)); Assert.assertNotNull(EstimatedHistogram.serializer.deserialize(in)); diff --git a/test/unit/org/apache/cassandra/utils/binlog/BinLogTest.java b/test/unit/org/apache/cassandra/utils/binlog/BinLogTest.java index 3a21ed167a38..86b63a2d425f 100644 --- a/test/unit/org/apache/cassandra/utils/binlog/BinLogTest.java +++ b/test/unit/org/apache/cassandra/utils/binlog/BinLogTest.java @@ -18,7 +18,6 @@ package org.apache.cassandra.utils.binlog; -import java.io.File; import java.nio.file.Path; import java.nio.file.Files; import java.util.ArrayList; @@ -29,6 +28,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Supplier; +import org.apache.cassandra.io.util.File; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -79,9 +79,9 @@ public void tearDown() throws Exception { binLog.stop(); } - for (File f : path.toFile().listFiles()) + for (File f : new File(path).tryList()) { - f.delete(); + f.tryDelete(); } } diff --git a/test/unit/org/apache/cassandra/utils/binlog/DeletingArchiverTest.java b/test/unit/org/apache/cassandra/utils/binlog/DeletingArchiverTest.java index cd6b7a3a545a..940d121a751e 100644 --- a/test/unit/org/apache/cassandra/utils/binlog/DeletingArchiverTest.java +++ b/test/unit/org/apache/cassandra/utils/binlog/DeletingArchiverTest.java @@ -18,7 +18,6 @@ package org.apache.cassandra.utils.binlog; -import java.io.File; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; @@ -26,6 +25,7 @@ import java.util.List; import java.util.Random; +import org.apache.cassandra.io.util.File; import org.junit.Test; import static org.junit.Assert.assertEquals; @@ -40,7 +40,7 @@ public void testDelete() throws IOException DeletingArchiver da = new DeletingArchiver(45); List files = generateFiles(10, 5); for (File f : files) - da.onReleased(1, f); + da.onReleased(1, f.toJavaIOFile()); // adding 5 files, each with size 10, this means the first one should have been deleted: assertFalse(files.get(0).exists()); for (int i = 1; i < files.size(); i++) @@ -53,7 +53,7 @@ public void testArchiverBigFile() throws IOException { DeletingArchiver da = new DeletingArchiver(45); List largeFiles = generateFiles(50, 1); - da.onReleased(1, largeFiles.get(0)); + da.onReleased(1, largeFiles.get(0).toJavaIOFile()); assertFalse(largeFiles.get(0).exists()); assertEquals(0, da.getBytesInStoreFiles()); } @@ -67,11 +67,11 @@ public void testArchiverSizeTracking() throws IOException for (File f : smallFiles) { - da.onReleased(1, f); + da.onReleased(1, f.toJavaIOFile()); } assertEquals(40, da.getBytesInStoreFiles()); // we now have 40 bytes in deleting archiver, adding the large 40 byte file should delete all the small ones - da.onReleased(1, largeFiles.get(0)); + da.onReleased(1, largeFiles.get(0).toJavaIOFile()); for (File f : smallFiles) assertFalse(f.exists()); @@ -79,7 +79,7 @@ public void testArchiverSizeTracking() throws IOException // make sure that size tracking is ok - all 4 new small files should still be there and the large one should be gone for (File f : smallFiles) - da.onReleased(1, f); + da.onReleased(1, f.toJavaIOFile()); assertFalse(largeFiles.get(0).exists()); for (File f : smallFiles) @@ -99,7 +99,7 @@ private List generateFiles(int size, int count) throws IOException { Path p = Files.createTempFile("logfile", ".cq4"); Files.write(p, content); - files.add(p.toFile()); + files.add(new File(p)); } files.forEach(File::deleteOnExit); return files; diff --git a/test/unit/org/apache/cassandra/utils/binlog/ExternalArchiverTest.java b/test/unit/org/apache/cassandra/utils/binlog/ExternalArchiverTest.java index 284ff5a747be..8e3c2db2f74e 100644 --- a/test/unit/org/apache/cassandra/utils/binlog/ExternalArchiverTest.java +++ b/test/unit/org/apache/cassandra/utils/binlog/ExternalArchiverTest.java @@ -18,7 +18,6 @@ package org.apache.cassandra.utils.binlog; -import java.io.File; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; @@ -30,6 +29,7 @@ import java.util.concurrent.atomic.AtomicInteger; import com.google.common.collect.Sets; +import org.apache.cassandra.io.util.File; import org.junit.Test; import net.openhft.chronicle.queue.impl.single.SingleChronicleQueue; @@ -48,21 +48,21 @@ public void testArchiver() throws IOException, InterruptedException String script = s.left; String dir = s.right; Path logdirectory = Files.createTempDirectory("logdirectory"); - File logfileToArchive = Files.createTempFile(logdirectory, "logfile", "xyz").toFile(); + File logfileToArchive = new File(Files.createTempFile(logdirectory, "logfile", "xyz")); Files.write(logfileToArchive.toPath(), "content".getBytes()); ExternalArchiver ea = new ExternalArchiver(script+" %path", null, 10); - ea.onReleased(1, logfileToArchive); + ea.onReleased(1, logfileToArchive.toJavaIOFile()); while (logfileToArchive.exists()) { Thread.sleep(100); } - File movedFile = new File(dir, logfileToArchive.getName()); + File movedFile = new File(dir, logfileToArchive.name()); assertTrue(movedFile.exists()); movedFile.deleteOnExit(); ea.stop(); - assertEquals(0, logdirectory.toFile().listFiles().length); + assertEquals(0, new File(logdirectory).tryList().length); } @Test @@ -75,7 +75,7 @@ public void testArchiveExisting() throws IOException, InterruptedException Path dir = Files.createTempDirectory("archive"); for (int i = 0; i < 10; i++) { - File logfileToArchive = Files.createTempFile(dir, "logfile", SingleChronicleQueue.SUFFIX).toFile(); + File logfileToArchive = new File(Files.createTempFile(dir, "logfile", SingleChronicleQueue.SUFFIX)); logfileToArchive.deleteOnExit(); Files.write(logfileToArchive.toPath(), ("content"+i).getBytes()); existingFiles.add(logfileToArchive); @@ -94,13 +94,13 @@ public void testArchiveExisting() throws IOException, InterruptedException Thread.sleep(100); break; } - File movedFile = new File(moveDir, f.getName()); + File movedFile = new File(moveDir, f.name()); assertTrue(movedFile.exists()); movedFile.deleteOnExit(); } } ea.stop(); - assertEquals(0, dir.toFile().listFiles().length); + assertEquals(0, new File(dir).tryList().length); } @Test @@ -114,7 +114,7 @@ public void testArchiveOnShutdown() throws IOException, InterruptedException List existingFiles = new ArrayList<>(); for (int i = 0; i < 10; i++) { - File logfileToArchive = Files.createTempFile(dir, "logfile", SingleChronicleQueue.SUFFIX).toFile(); + File logfileToArchive = new File(Files.createTempFile(dir, "logfile", SingleChronicleQueue.SUFFIX)); logfileToArchive.deleteOnExit(); Files.write(logfileToArchive.toPath(), ("content"+i).getBytes()); existingFiles.add(logfileToArchive); @@ -124,7 +124,7 @@ public void testArchiveOnShutdown() throws IOException, InterruptedException for (File f : existingFiles) { assertFalse(f.exists()); - File movedFile = new File(moveDir, f.getName()); + File movedFile = new File(moveDir, f.name()); assertTrue(movedFile.exists()); movedFile.deleteOnExit(); } @@ -144,7 +144,7 @@ public void testRetries() throws IOException, InterruptedException String script = s.left; String moveDir = s.right; Path logdirectory = Files.createTempDirectory("logdirectory"); - File logfileToArchive = Files.createTempFile(logdirectory, "logfile", "xyz").toFile(); + File logfileToArchive = new File(Files.createTempFile(logdirectory, "logfile", "xyz")); Files.write(logfileToArchive.toPath(), "content".getBytes()); AtomicInteger tryCounter = new AtomicInteger(); AtomicBoolean success = new AtomicBoolean(); @@ -154,7 +154,7 @@ public void testRetries() throws IOException, InterruptedException ExternalArchiver.exec(cmd); success.set(true); }); - ea.onReleased(0, logfileToArchive); + ea.onReleased(0, logfileToArchive.toJavaIOFile()); while (tryCounter.get() < 2) // while we have only executed this 0 or 1 times, the file should still be on disk { Thread.sleep(100); @@ -167,7 +167,7 @@ public void testRetries() throws IOException, InterruptedException // there will be 3 attempts in total, 2 failing ones, then the successful one: assertEquals(3, tryCounter.get()); assertFalse(logfileToArchive.exists()); - File movedFile = new File(moveDir, logfileToArchive.getName()); + File movedFile = new File(moveDir, logfileToArchive.name()); assertTrue(movedFile.exists()); ea.stop(); } @@ -188,7 +188,7 @@ public void testMaxRetries() throws IOException, InterruptedException String script = s.left; String moveDir = s.right; Path logdirectory = Files.createTempDirectory("logdirectory"); - File logfileToArchive = Files.createTempFile(logdirectory, "logfile", "xyz").toFile(); + File logfileToArchive = new File(Files.createTempFile(logdirectory, "logfile", "xyz")); Files.write(logfileToArchive.toPath(), "content".getBytes()); AtomicInteger tryCounter = new AtomicInteger(); @@ -206,7 +206,7 @@ public void testMaxRetries() throws IOException, InterruptedException throw t; } }); - ea.onReleased(0, logfileToArchive); + ea.onReleased(0, logfileToArchive.toJavaIOFile()); while (tryCounter.get() < 3) Thread.sleep(500); assertTrue(logfileToArchive.exists()); @@ -214,9 +214,9 @@ public void testMaxRetries() throws IOException, InterruptedException Thread.sleep(5000); assertTrue(logfileToArchive.exists()); assertFalse(success.get()); - File [] fs = new File(moveDir).listFiles(f -> + File [] fs = new File(moveDir).tryList(f -> { - if (f.getName().startsWith("file.")) + if (f.name().startsWith("file.")) { f.deleteOnExit(); return true; @@ -230,24 +230,24 @@ public void testMaxRetries() throws IOException, InterruptedException private Pair createScript() throws IOException { - File f = Files.createTempFile("script", "", PosixFilePermissions.asFileAttribute(Sets.newHashSet(PosixFilePermission.OWNER_WRITE, + File f = new File(Files.createTempFile("script", "", PosixFilePermissions.asFileAttribute(Sets.newHashSet(PosixFilePermission.OWNER_WRITE, PosixFilePermission.OWNER_READ, - PosixFilePermission.OWNER_EXECUTE))).toFile(); + PosixFilePermission.OWNER_EXECUTE)))); f.deleteOnExit(); - File dir = Files.createTempDirectory("archive").toFile(); + File dir = new File(Files.createTempDirectory("archive")); dir.deleteOnExit(); - String script = "#!/bin/sh\nmv $1 "+dir.getAbsolutePath(); + String script = "#!/bin/sh\nmv $1 "+dir.absolutePath(); Files.write(f.toPath(), script.getBytes()); - return Pair.create(f.getAbsolutePath(), dir.getAbsolutePath()); + return Pair.create(f.absolutePath(), dir.absolutePath()); } private Pair createFailingScript(int failures) throws IOException { - File f = Files.createTempFile("script", "", PosixFilePermissions.asFileAttribute(Sets.newHashSet(PosixFilePermission.OWNER_WRITE, + File f = new File(Files.createTempFile("script", "", PosixFilePermissions.asFileAttribute(Sets.newHashSet(PosixFilePermission.OWNER_WRITE, PosixFilePermission.OWNER_READ, - PosixFilePermission.OWNER_EXECUTE))).toFile(); + PosixFilePermission.OWNER_EXECUTE)))); f.deleteOnExit(); - File dir = Files.createTempDirectory("archive").toFile(); + File dir = new File(Files.createTempDirectory("archive")); dir.deleteOnExit(); // this script counts files in dir.getAbsolutePath, then if there are more than failures files in there, it moves the actual file String script = "#!/bin/bash%n" + @@ -262,7 +262,7 @@ private Pair createFailingScript(int failures) throws IOExceptio " mv $1 $DIR%n"+ "fi%n"; - Files.write(f.toPath(), String.format(script, dir.getAbsolutePath(), failures).getBytes()); - return Pair.create(f.getAbsolutePath(), dir.getAbsolutePath()); + Files.write(f.toPath(), String.format(script, dir.absolutePath(), failures).getBytes()); + return Pair.create(f.absolutePath(), dir.absolutePath()); } } diff --git a/test/unit/org/apache/cassandra/utils/concurrent/RefCountedTest.java b/test/unit/org/apache/cassandra/utils/concurrent/RefCountedTest.java index 0d1f9f61c7e3..e99d3683d922 100644 --- a/test/unit/org/apache/cassandra/utils/concurrent/RefCountedTest.java +++ b/test/unit/org/apache/cassandra/utils/concurrent/RefCountedTest.java @@ -22,7 +22,7 @@ import org.junit.Assert; -import java.io.File; +import org.apache.cassandra.io.util.File; import java.lang.ref.WeakReference; import java.util.HashMap; import java.util.HashSet; diff --git a/test/unit/org/apache/cassandra/utils/streamhist/StreamingTombstoneHistogramBuilderTest.java b/test/unit/org/apache/cassandra/utils/streamhist/StreamingTombstoneHistogramBuilderTest.java old mode 100755 new mode 100644 diff --git a/tools/fqltool/src/org/apache/cassandra/fqltool/ResultComparator.java b/tools/fqltool/src/org/apache/cassandra/fqltool/ResultComparator.java index eeebe209cdce..3935e940a4b0 100644 --- a/tools/fqltool/src/org/apache/cassandra/fqltool/ResultComparator.java +++ b/tools/fqltool/src/org/apache/cassandra/fqltool/ResultComparator.java @@ -151,4 +151,4 @@ else if (cd.wasFailed()) } return sb.toString(); } -} \ No newline at end of file +} diff --git a/tools/stress/src/org/apache/cassandra/io/sstable/StressCQLSSTableWriter.java b/tools/stress/src/org/apache/cassandra/io/sstable/StressCQLSSTableWriter.java index c087cab252e6..7028b84c3bf8 100644 --- a/tools/stress/src/org/apache/cassandra/io/sstable/StressCQLSSTableWriter.java +++ b/tools/stress/src/org/apache/cassandra/io/sstable/StressCQLSSTableWriter.java @@ -331,7 +331,7 @@ private ByteBuffer serialize(Object value, TypeCodec codec) */ public File getInnermostDirectory() { - return cfs.getDirectories().getDirectoryForNewSSTables(); + return cfs.getDirectories().getDirectoryForNewSSTables().toJavaIOFile(); } /** @@ -613,7 +613,7 @@ public static ColumnFamilyStore createOfflineTable(CreateTableStatement.Raw sche .build(); Keyspace.setInitialized(); - Directories directories = new Directories(tableMetadata, directoryList.stream().map(Directories.DataDirectory::new).collect(Collectors.toList())); + Directories directories = new Directories(tableMetadata, directoryList.stream().map(f -> new Directories.DataDirectory(new org.apache.cassandra.io.util.File(f.toPath()))).collect(Collectors.toList())); Keyspace ks = Keyspace.openWithoutSSTables(keyspace); ColumnFamilyStore cfs = ColumnFamilyStore.createColumnFamilyStore(ks, tableMetadata.name, TableMetadataRef.forOfflineTools(tableMetadata), directories, false, false, true); diff --git a/tools/stress/src/org/apache/cassandra/stress/generate/values/Bytes.java b/tools/stress/src/org/apache/cassandra/stress/generate/values/Bytes.java index 3c15c8790d12..f95061fde7a5 100644 --- a/tools/stress/src/org/apache/cassandra/stress/generate/values/Bytes.java +++ b/tools/stress/src/org/apache/cassandra/stress/generate/values/Bytes.java @@ -51,4 +51,4 @@ public ByteBuffer generate() bytes[i++] = (byte)v; return ByteBuffer.wrap(Arrays.copyOf(bytes, size)); } -} \ No newline at end of file +} diff --git a/tools/stress/src/org/apache/cassandra/stress/operations/userdefined/CASQuery.java b/tools/stress/src/org/apache/cassandra/stress/operations/userdefined/CASQuery.java index ff3c2a71fa50..1487a0d3afd1 100644 --- a/tools/stress/src/org/apache/cassandra/stress/operations/userdefined/CASQuery.java +++ b/tools/stress/src/org/apache/cassandra/stress/operations/userdefined/CASQuery.java @@ -224,4 +224,4 @@ private BoundStatement prepare(final Row row, final Object[] casDbValues) } return statement.bind(bindBuffer); } -} \ No newline at end of file +} diff --git a/tools/stress/src/org/apache/cassandra/stress/report/Timer.java b/tools/stress/src/org/apache/cassandra/stress/report/Timer.java index 09377210aed7..3938b0c4c65e 100644 --- a/tools/stress/src/org/apache/cassandra/stress/report/Timer.java +++ b/tools/stress/src/org/apache/cassandra/stress/report/Timer.java @@ -62,4 +62,4 @@ public void start() { startTimeNs = nanoTime(); } -} \ No newline at end of file +} diff --git a/tools/stress/src/org/apache/cassandra/stress/settings/Command.java b/tools/stress/src/org/apache/cassandra/stress/settings/Command.java index d8ac5d172fec..884020959f7b 100644 --- a/tools/stress/src/org/apache/cassandra/stress/settings/Command.java +++ b/tools/stress/src/org/apache/cassandra/stress/settings/Command.java @@ -138,4 +138,4 @@ public final Runnable helpPrinter() throw new AssertionError(); } -} \ No newline at end of file +} diff --git a/tools/stress/src/org/apache/cassandra/stress/settings/SettingsGraph.java b/tools/stress/src/org/apache/cassandra/stress/settings/SettingsGraph.java index 5abe5896d4cb..e953c15d9153 100644 --- a/tools/stress/src/org/apache/cassandra/stress/settings/SettingsGraph.java +++ b/tools/stress/src/org/apache/cassandra/stress/settings/SettingsGraph.java @@ -54,7 +54,7 @@ public SettingsGraph(GraphOptions options, SettingsCommand stressCommand) if (inGraphMode()) { - temporaryLogFile = FileUtils.createTempFile("cassandra-stress", ".log"); + temporaryLogFile = FileUtils.createTempFile("cassandra-stress", ".log").toJavaIOFile(); } else { diff --git a/tools/stress/test/unit/org/apache/cassandra/stress/util/MultiResultLoggerTest.java b/tools/stress/test/unit/org/apache/cassandra/stress/util/MultiResultLoggerTest.java index f0c99b8631fc..417251f9b465 100644 --- a/tools/stress/test/unit/org/apache/cassandra/stress/util/MultiResultLoggerTest.java +++ b/tools/stress/test/unit/org/apache/cassandra/stress/util/MultiResultLoggerTest.java @@ -102,4 +102,4 @@ public void delegatesPrintlnToAdditionalPrintStreams() throws Exception assertEquals("\n", output.toString()); } -} \ No newline at end of file +}