Skip to content

Commit 00efa3c

Browse files
committed
[SPARK-11424] Guard against double-close() of RecordReaders (branch-1.4 backport)
This is a branch-1.4 backport of apache#9382, a fix for SPARK-11424. Author: Josh Rosen <joshrosen@databricks.com> Closes apache#9388 from JoshRosen/hadoop-decompressor-pooling-fix-branch-1.4.
1 parent ef42ce6 commit 00efa3c

File tree

4 files changed

+50
-27
lines changed

4 files changed

+50
-27
lines changed

core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala

Lines changed: 15 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -257,8 +257,21 @@ class HadoopRDD[K, V](
257257
}
258258

259259
override def close() {
260-
try {
261-
reader.close()
260+
if (reader != null) {
261+
// Close the reader and release it. Note: it's very important that we don't close the
262+
// reader more than once, since that exposes us to MAPREDUCE-5918 when running against
263+
// Hadoop 1.x and older Hadoop 2.x releases. That bug can lead to non-deterministic
264+
// corruption issues when reading compressed input.
265+
try {
266+
reader.close()
267+
} catch {
268+
case e: Exception =>
269+
if (!ShutdownHookManager.inShutdown()) {
270+
logWarning("Exception in RecordReader.close()", e)
271+
}
272+
} finally {
273+
reader = null
274+
}
262275
if (bytesReadCallback.isDefined) {
263276
inputMetrics.updateBytesRead()
264277
} else if (split.inputSplit.value.isInstanceOf[FileSplit] ||
@@ -272,12 +285,6 @@ class HadoopRDD[K, V](
272285
logWarning("Unable to get input size to set InputMetrics for task", e)
273286
}
274287
}
275-
} catch {
276-
case e: Exception => {
277-
if (!ShutdownHookManager.inShutdown()) {
278-
logWarning("Exception in RecordReader.close()", e)
279-
}
280-
}
281288
}
282289
}
283290
}

core/src/main/scala/org/apache/spark/rdd/NewHadoopRDD.scala

Lines changed: 16 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -128,7 +128,7 @@ class NewHadoopRDD[K, V](
128128
configurable.setConf(conf)
129129
case _ =>
130130
}
131-
val reader = format.createRecordReader(
131+
var reader = format.createRecordReader(
132132
split.serializableHadoopSplit.value, hadoopAttemptContext)
133133
reader.initialize(split.serializableHadoopSplit.value, hadoopAttemptContext)
134134

@@ -158,8 +158,21 @@ class NewHadoopRDD[K, V](
158158
}
159159

160160
private def close() {
161-
try {
162-
reader.close()
161+
if (reader != null) {
162+
// Close the reader and release it. Note: it's very important that we don't close the
163+
// reader more than once, since that exposes us to MAPREDUCE-5918 when running against
164+
// Hadoop 1.x and older Hadoop 2.x releases. That bug can lead to non-deterministic
165+
// corruption issues when reading compressed input.
166+
try {
167+
reader.close()
168+
} catch {
169+
case e: Exception =>
170+
if (!ShutdownHookManager.inShutdown()) {
171+
logWarning("Exception in RecordReader.close()", e)
172+
}
173+
} finally {
174+
reader = null
175+
}
163176
if (bytesReadCallback.isDefined) {
164177
inputMetrics.updateBytesRead()
165178
} else if (split.serializableHadoopSplit.value.isInstanceOf[FileSplit] ||
@@ -173,12 +186,6 @@ class NewHadoopRDD[K, V](
173186
logWarning("Unable to get input size to set InputMetrics for task", e)
174187
}
175188
}
176-
} catch {
177-
case e: Exception => {
178-
if (!ShutdownHookManager.inShutdown()) {
179-
logWarning("Exception in RecordReader.close()", e)
180-
}
181-
}
182189
}
183190
}
184191
}

core/src/main/scala/org/apache/spark/util/NextIterator.scala

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -60,8 +60,10 @@ private[spark] abstract class NextIterator[U] extends Iterator[U] {
6060
*/
6161
def closeIfNeeded() {
6262
if (!closed) {
63-
close()
63+
// Note: it's important that we set closed = true before calling close(), since setting it
64+
// afterwards would permit us to call close() multiple times if close() threw an exception.
6465
closed = true
66+
close()
6567
}
6668
}
6769

sql/core/src/main/scala/org/apache/spark/sql/sources/SqlNewHadoopRDD.scala

Lines changed: 16 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -148,7 +148,7 @@ private[sql] class SqlNewHadoopRDD[K, V](
148148
configurable.setConf(conf)
149149
case _ =>
150150
}
151-
val reader = format.createRecordReader(
151+
var reader = format.createRecordReader(
152152
split.serializableHadoopSplit.value, hadoopAttemptContext)
153153
reader.initialize(split.serializableHadoopSplit.value, hadoopAttemptContext)
154154

@@ -178,8 +178,21 @@ private[sql] class SqlNewHadoopRDD[K, V](
178178
}
179179

180180
private def close() {
181-
try {
182-
reader.close()
181+
if (reader != null) {
182+
// Close the reader and release it. Note: it's very important that we don't close the
183+
// reader more than once, since that exposes us to MAPREDUCE-5918 when running against
184+
// Hadoop 1.x and older Hadoop 2.x releases. That bug can lead to non-deterministic
185+
// corruption issues when reading compressed input.
186+
try {
187+
reader.close()
188+
} catch {
189+
case e: Exception =>
190+
if (!ShutdownHookManager.inShutdown()) {
191+
logWarning("Exception in RecordReader.close()", e)
192+
}
193+
} finally {
194+
reader = null;
195+
}
183196
if (bytesReadCallback.isDefined) {
184197
inputMetrics.updateBytesRead()
185198
} else if (split.serializableHadoopSplit.value.isInstanceOf[FileSplit] ||
@@ -193,12 +206,6 @@ private[sql] class SqlNewHadoopRDD[K, V](
193206
logWarning("Unable to get input size to set InputMetrics for task", e)
194207
}
195208
}
196-
} catch {
197-
case e: Exception => {
198-
if (!ShutdownHookManager.inShutdown()) {
199-
logWarning("Exception in RecordReader.close()", e)
200-
}
201-
}
202209
}
203210
}
204211
}

0 commit comments

Comments
 (0)