/
HadoopLayerProvider.scala
67 lines (58 loc) · 2.41 KB
/
HadoopLayerProvider.scala
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
/*
* Copyright 2017 Azavea
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package geotrellis.spark.io.hadoop
import geotrellis.spark._
import geotrellis.spark.io._
import geotrellis.util.UriUtils
import com.github.blemale.scaffeine.{Scaffeine, Cache}
import org.apache.hadoop.fs.Path
import org.apache.hadoop.conf.Configuration
import org.apache.spark.SparkContext
import java.net.URI
object HadoopLayerProvider {
private val cache: Cache[Path, AttributeStore] = Scaffeine().softValues().build()
}
/**
* Provides [[HadoopAttributeStore]] instance for URI with `hdfs`, `hdfs+file`, `s3n`, and `s3a` schemes.
* The uri represents Hadoop [[Path]] of catalog root.
* This Provider intentinally does not handle the `s3` scheme because the Hadoop implemintation is poor.
* That support is provided by [[S3Attributestore]]
*/
class HadoopLayerProvider extends AttributeStoreProvider
with LayerReaderProvider with LayerWriterProvider {
val schemes: Array[String] = Array("hdfs", "hdfs+file", "s3n", "s3a")
private def trim(uri: URI): URI =
if (uri.getScheme.startsWith("hdfs+"))
new URI(uri.toString.stripPrefix("hdfs+"))
else uri
def canProcess(uri: URI): Boolean = schemes contains uri.getScheme.toLowerCase
def attributeStore(uri: URI): AttributeStore = {
val path = new Path(trim(uri))
val conf = new Configuration()
HadoopLayerProvider.cache.get(path, new HadoopAttributeStore(_, conf))
}
def layerReader(uri: URI, store: AttributeStore, sc: SparkContext): FilteringLayerReader[LayerId] = {
// don't need uri because HadoopLayerHeader contains full path of the layer
new HadoopLayerReader(store)(sc)
}
def layerWriter(uri: URI, store: AttributeStore): LayerWriter[LayerId] = {
val _uri = trim(uri)
val path = new Path(_uri)
val params = UriUtils.getParams(_uri)
val interval = params.getOrElse("interval", "4").toInt
new HadoopLayerWriter(path, store, interval)
}
}