diff --git a/warehouse/query-core/src/main/java/datawave/mr/bulk/MultiRfileInputformat.java b/warehouse/query-core/src/main/java/datawave/mr/bulk/MultiRfileInputformat.java index 4b7f1011730..1614fe2991f 100644 --- a/warehouse/query-core/src/main/java/datawave/mr/bulk/MultiRfileInputformat.java +++ b/warehouse/query-core/src/main/java/datawave/mr/bulk/MultiRfileInputformat.java @@ -66,6 +66,7 @@ public class MultiRfileInputformat extends RFileInputFormat { private static LoadingCache>>> locationMap = null; protected static final Map dfsUriMap = new ConcurrentHashMap<>(); + protected static final Map dfsDirMap = new ConcurrentHashMap<>(); @Override public RecordReader createRecordReader(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException { @@ -147,11 +148,12 @@ public static List computeSplitPoints(AccumuloClient client, Configu /** * Attempt the following 1) try to get the default namespace from accumulo 2) Use the custom config option 3) use default name in the hdfs configuration */ - if (dfsUriMap.get(tableId) == null) { + if (dfsUriMap.get(tableId) == null || dfsDirMap.get(tableId) == null) { synchronized (MultiRfileInputformat.class) { final InstanceOperations instOps = client.instanceOperations(); - dfsUriMap.put(tableId, instOps.getSystemConfiguration().get(Property.INSTANCE_VOLUMES.getKey())); + dfsUriMap.put(tableId, instOps.getSystemConfiguration().get(Property.INSTANCE_DFS_URI.getKey())); + dfsDirMap.put(tableId, instOps.getSystemConfiguration().get(Property.INSTANCE_DFS_DIR.getKey())); } } @@ -165,7 +167,7 @@ public static List computeSplitPoints(AccumuloClient client, Configu } } - basePath = dfsUriMap.get(tableId); + basePath = dfsDirMap.get(tableId); if (StringUtils.isEmpty(basePath)) { basePath = ACCUMULO_BASE_PATH;