Skip to content
This repository has been archived by the owner on Dec 20, 2018. It is now read-only.

Commit

Permalink
LayerToGeotiff / MultibandLayerToGeotiff use foreachPartition with se…
Browse files Browse the repository at this point in the history
…rial hadoopConfiguration
  • Loading branch information
aklink committed Mar 8, 2018
1 parent 9bd5f16 commit b50a234
Show file tree
Hide file tree
Showing 2 changed files with 54 additions and 25 deletions.
40 changes: 27 additions & 13 deletions src/main/scala/biggis/landuse/spark/examples/LayerToGeotiff.scala
Original file line number Diff line number Diff line change
Expand Up @@ -97,20 +97,34 @@ object LayerToGeotiff extends LazyLogging {
//.tileToLayout(metadata.cellType, metadata.layout, Utils.RESAMPLING_METHOD)
//.repartition(Utils.RDD_PARTITIONS)

/*
outputRdd.foreachPartition{ partition =>
partition.map(_.write(new Path("hdfs://..."), serConf.value))
} // */
outputRdd.foreach(mbtile => {
val (key, tile) = mbtile
val (col, row) = (key.col, key.row)
val tileextent: Extent = metadata.layout.mapTransform(key)
val filename = new Path(outputPath + "_" + col + "_" + row + ".tif")
logger info s" writing: '${filename.toString}'"
GeoTiff(tile, tileextent, crs)
.write(filename, serConf.value)
val useSerializedHadoopConfig = true
if(useSerializedHadoopConfig){
// ToDo: test Spark Cluster version
outputRdd.foreachPartition { partition =>
partition.foreach { tuple =>
val (key, tile) = tuple
val (col, row) = (key.col, key.row)
val tileextent: Extent = metadata.layout.mapTransform(key)
val filename = new Path(outputPath + "_" + col + "_" + row + ".tif")
logger info s" writing: '${filename.toString}'"
GeoTiff(tile, tileextent, crs)
.write(filename, serConf.value)
}
}
} else {
// only for local debugging - do not use in cloud // ToDo: delete after testing
outputRdd.foreach(mbtile => {
val (key, tile) = mbtile
val (col, row) = (key.col, key.row)
val tileextent: Extent = metadata.layout.mapTransform(key)
//val filename = new Path(outputPath + "_" + col + "_" + row + ".tif")
//logger info s" writing: '${filename.toString}'"
GeoTiff(tile, tileextent, crs)
//.write(filename.toString) //.write(filename, serConf.value)
.write(outputPath + "_" + col + "_" + row + ".tif")
}
)
}
)
}

//sc.stop()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -153,19 +153,34 @@ object MultibandLayerToGeotiff extends LazyLogging{
//.repartition(myRDD_PARTITIONS)
//.tileToLayout(myMetadata.cellType, myMetadata.layout, myRESAMPLING_METHOD)

/*
outputRdd.foreachPartition{ partition =>
partition.map(_.write(new Path("hdfs://..."), serConf.value))
} // */
outputRdd.foreach(mbtile => {
val (key, tile) = mbtile
val (col, row) = (key.col, key.row)
val tileextent: Extent = metadata.layout.mapTransform(key)
val filename = new Path(outputPath + "_" + col + "_" + row + ".tif")
MultibandGeoTiff(tile, tileextent, crs)
.write(filename, serConf.value)
val useSerializedHadoopConfig = true
if (useSerializedHadoopConfig) {
// ToDo: test Spark Cluster version
outputRdd.foreachPartition { partition =>
partition.foreach { tuple =>
val (key, tile) = tuple
val (col, row) = (key.col, key.row)
val tileextent: Extent = metadata.layout.mapTransform(key)
val filename = new Path(outputPath + "_" + col + "_" + row + ".tif")
logger info s" writing: '${filename.toString}'"
MultibandGeoTiff(tile, tileextent, crs)
.write(filename, serConf.value)
}
}
} else {
// only for local debugging - do not use in cloud // ToDo: delete after testing
outputRdd.foreach(mbtile => {
val (key, tile) = mbtile
val (col, row) = (key.col, key.row)
val tileextent: Extent = metadata.layout.mapTransform(key)
//val filename = new Path(outputPath + "_" + col + "_" + row + ".tif")
//logger info s" writing: '${filename.toString}'"
MultibandGeoTiff(tile, tileextent, crs)
//.write(filename.toString) //.write(filename, serConf.value)
.write(outputPath + "_" + col + "_" + row + ".tif")
}
)
}
)
}

////val raster: Raster[MultibandTile] = tile.reproject(metadata.extent, metadata.crs, metadata.crs)
Expand Down

0 comments on commit b50a234

Please sign in to comment.