@@ -25,6 +25,7 @@ import java.nio.file.Files
25
25
import scala .collection .mutable .ArrayBuffer
26
26
27
27
import org .apache .spark .{SparkConf , SparkEnv , SparkException }
28
+ import org .apache .spark .errors .SparkCoreErrors
28
29
import org .apache .spark .internal .{config , Logging }
29
30
import org .apache .spark .io .NioBufferedFileInputStream
30
31
import org .apache .spark .network .buffer .{FileSegmentManagedBuffer , ManagedBuffer }
@@ -227,7 +228,8 @@ private[spark] class IndexShuffleBlockResolver(
227
228
remoteShuffleMaxDisk.foreach { maxBytes =>
228
229
val bytesUsed = getShuffleBytesStored()
229
230
if (maxBytes < bytesUsed) {
230
- throw new SparkException (s " Not storing remote shuffles $bytesUsed exceeds $maxBytes" )
231
+ throw SparkException .internalError(
232
+ s " Not storing remote shuffles $bytesUsed exceeds $maxBytes" , category = " SHUFFLE" )
231
233
}
232
234
}
233
235
val file = blockId match {
@@ -236,8 +238,8 @@ private[spark] class IndexShuffleBlockResolver(
236
238
case ShuffleDataBlockId (shuffleId, mapId, _) =>
237
239
getDataFile(shuffleId, mapId)
238
240
case _ =>
239
- throw new IllegalStateException (s " Unexpected shuffle block transfer ${ blockId} as " +
240
- s " ${blockId.getClass().getSimpleName()}" )
241
+ throw SparkException .internalError (s " Unexpected shuffle block transfer $blockId as " +
242
+ s " ${blockId.getClass().getSimpleName()}" , category = " SHUFFLE " )
241
243
}
242
244
val fileTmp = createTempFile(file)
243
245
val channel = Channels .newChannel(
@@ -263,7 +265,7 @@ private[spark] class IndexShuffleBlockResolver(
263
265
file.delete()
264
266
}
265
267
if (! fileTmp.renameTo(file)) {
266
- throw new IOException ( s " fail to rename file ${ fileTmp} to ${ file} " )
268
+ throw SparkCoreErrors .failedRenameTempFileError( fileTmp, file)
267
269
}
268
270
}
269
271
blockManager.reportBlockStatus(blockId, BlockStatus (StorageLevel .DISK_ONLY , 0 , diskSize))
@@ -300,7 +302,7 @@ private[spark] class IndexShuffleBlockResolver(
300
302
301
303
// Make sure the index exist.
302
304
if (! indexFile.exists()) {
303
- throw new FileNotFoundException (" Index file is deleted already." )
305
+ throw SparkException .internalError (" Index file is deleted already." , category = " SHUFFLE " )
304
306
}
305
307
if (dataFile.exists()) {
306
308
List ((dataBlockId, dataBlockData), (indexBlockId, indexBlockData))
@@ -389,7 +391,7 @@ private[spark] class IndexShuffleBlockResolver(
389
391
dataFile.delete()
390
392
}
391
393
if (dataTmp != null && dataTmp.exists() && ! dataTmp.renameTo(dataFile)) {
392
- throw new IOException ( " fail to rename file " + dataTmp + " to " + dataFile)
394
+ throw SparkCoreErrors .failedRenameTempFileError( dataTmp, dataFile)
393
395
}
394
396
395
397
// write the checksum file
@@ -462,11 +464,10 @@ private[spark] class IndexShuffleBlockResolver(
462
464
}
463
465
464
466
if (! tmpFile.renameTo(targetFile)) {
465
- val errorMsg = s " fail to rename file $tmpFile to $targetFile"
466
467
if (propagateError) {
467
- throw new IOException (errorMsg )
468
+ throw SparkCoreErrors .failedRenameTempFileError(tmpFile, targetFile )
468
469
} else {
469
- logWarning(errorMsg )
470
+ logWarning(s " fail to rename file $tmpFile to $targetFile " )
470
471
}
471
472
}
472
473
}
@@ -567,7 +568,8 @@ private[spark] class IndexShuffleBlockResolver(
567
568
case batchId : ShuffleBlockBatchId =>
568
569
(batchId.shuffleId, batchId.mapId, batchId.startReduceId, batchId.endReduceId)
569
570
case _ =>
570
- throw new IllegalArgumentException (" unexpected shuffle block id format: " + blockId)
571
+ throw SparkException .internalError(
572
+ s " unexpected shuffle block id format: $blockId" , category = " SHUFFLE" )
571
573
}
572
574
// The block is actually going to be a range of a single map output file for this map, so
573
575
// find out the consolidated file, then the offset within that from our index
@@ -589,8 +591,9 @@ private[spark] class IndexShuffleBlockResolver(
589
591
val actualPosition = channel.position()
590
592
val expectedPosition = endReduceId * 8L + 8
591
593
if (actualPosition != expectedPosition) {
592
- throw new Exception (s " SPARK-22982: Incorrect channel position after index file reads: " +
593
- s " expected $expectedPosition but actual position was $actualPosition. " )
594
+ throw SparkException .internalError(s " SPARK-22982: Incorrect channel position after index " +
595
+ s " file reads: expected $expectedPosition but actual position was $actualPosition. " ,
596
+ category = " SHUFFLE" )
594
597
}
595
598
new FileSegmentManagedBuffer (
596
599
transportConf,
0 commit comments