Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix shard ordering bug on some filesystems. #138

Open
wants to merge 3 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

<groupId>org.disq-bio</groupId>
<artifactId>disq</artifactId>
<version>0.3.7-SNAPSHOT</version>
<version>0.3.7-bugfix-SNAPSHOT</version>
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This change isn't necessary for this pull request -- versioning will be handled by the Maven release plugin on the next release cycle.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Woops! That's funny, I literally told @tedsharpe the same thing on his last PR and then I did it myself...

<name>Disq</name>
<description>A library for manipulating bioinformatics sequencing formats in Apache Spark.</description>
<url>https://github.com/disq-bio/disq</url>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,11 @@
package org.disq_bio.disq.impl.file;

import java.io.IOException;
import java.util.List;
import java.util.stream.Collectors;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
Expand Down Expand Up @@ -84,4 +88,14 @@ public RecordReader<Void, FileSplit> createRecordReader(
InputSplit split, TaskAttemptContext context) {
return new FileSplitRecordReader();
}

/**
* We override this method because super.listStatus returns files in an effectively random order
* on some filesystems. This breaks ordering assumptions and results in data corruption. We sort
* the results here in order to guarantee part files are returned in numeric order.
*/
@Override
protected List<FileStatus> listStatus(final JobContext job) throws IOException {
return super.listStatus(job).stream().sorted().collect(Collectors.toList());
}
}
17 changes: 17 additions & 0 deletions src/test/java/org/disq_bio/disq/AnySamTestUtil.java
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,8 @@
import java.nio.file.Path;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.stream.Collectors;
import org.disq_bio.disq.impl.file.NioFileSystemWrapper;
import org.disq_bio.disq.impl.formats.BoundedTraversalUtil;
import org.disq_bio.disq.impl.formats.sam.SamFormat;
Expand Down Expand Up @@ -255,4 +257,19 @@ private static int size(Iterator<SAMRecord> iterator) {
}
return count;
}

public static List<SAMRecord> loadEntireReadsFile(String samPath, String referencePath)
throws IOException {
ReferenceSource referenceSource =
referencePath == null
? null
: new ReferenceSource(NioFileSystemWrapper.asPath(referencePath));
try (SamReader reader =
SamReaderFactory.makeDefault()
.validationStringency(ValidationStringency.DEFAULT_STRINGENCY)
.referenceSource(referenceSource)
.open(SamInputResource.of(NioFileSystemWrapper.asPath(samPath)))) {
return reader.iterator().stream().collect(Collectors.toList());
}
}
}
14 changes: 13 additions & 1 deletion src/test/java/org/disq_bio/disq/HtsjdkReadsRddTest.java
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@
import java.nio.file.Paths;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import junitparams.JUnitParamsRunner;
import junitparams.Parameters;
import org.apache.spark.api.java.JavaRDD;
Expand Down Expand Up @@ -152,6 +153,11 @@ public void testReadAndWrite(
Assert.assertEquals(expectedCount, SamtoolsTestUtil.countReads(outputPath, refPath));
}

// check the actual reads match
List<SAMRecord> expectedSamRecords = AnySamTestUtil.loadEntireReadsFile(inputPath, refPath);
List<SAMRecord> actualSamRecords = AnySamTestUtil.loadEntireReadsFile(outputPath, refPath);
Assert.assertEquals(expectedSamRecords, actualSamRecords);

// check we can read back what we've just written
Assert.assertEquals(expectedCount, htsjdkReadsRddStorage.read(outputPath).getReads().count());
}
Expand Down Expand Up @@ -257,7 +263,13 @@ public void testReadAndWriteMultiple(
}

// check we can read back what we've just written
Assert.assertEquals(expectedCount, htsjdkReadsRddStorage.read(outputPath).getReads().count());
final JavaRDD<SAMRecord> readsRdd = htsjdkReadsRddStorage.read(outputPath).getReads();
Assert.assertEquals(expectedCount, readsRdd.count());

// check the actual reads match
List<SAMRecord> expectedSamRecords = AnySamTestUtil.loadEntireReadsFile(inputPath, refPath);
final List<SAMRecord> actualSamRecords = readsRdd.collect();
Assert.assertEquals(expectedSamRecords, actualSamRecords);
}

protected Object[] parametersForTestReadAndWriteSubset() {
Expand Down