diff --git a/Makefile b/Makefile index 0116e52..aed40f7 100644 --- a/Makefile +++ b/Makefile @@ -22,11 +22,3 @@ DATA = hdfs_fdw--2.0.2.sql hdfs_fdw--2.0.3.sql hdfs_fdw--2.0.1--2.0.2.sql hdfs_f PG_CONFIG = pg_config PGXS := $(shell $(PG_CONFIG) --pgxs) include $(PGXS) - -ifndef MAJORVERSION - MAJORVERSION := $(basename $(VERSION)) -endif - -ifeq (,$(findstring $(MAJORVERSION), 9.3 9.4 9.5 9.6 10.0)) - $(error PostgreSQL 9.3+ is required to compile this extension) -endif diff --git a/hdfs_deparse.c b/hdfs_deparse.c index 51c6695..23e0575 100644 --- a/hdfs_deparse.c +++ b/hdfs_deparse.c @@ -525,7 +525,7 @@ deparseTargetList(StringInfo buf, for (i = 1; i <= tupdesc->natts; i++) { - Form_pg_attribute attr = tupdesc->attrs[i - 1]; + Form_pg_attribute attr = TupleDescAttr(tupdesc, i - 1); /* Ignore dropped attributes. */ if (attr->attisdropped) @@ -622,7 +622,7 @@ deparseAnalyzeSql(hdfs_opt *opt, StringInfo buf, Relation rel, List **retrieved_ for (i = 0; i < tupdesc->natts; i++) { /* Ignore dropped columns. */ - if (tupdesc->attrs[i]->attisdropped) + if (TupleDescAttr(tupdesc, i)->attisdropped) continue; if (!first) @@ -630,7 +630,7 @@ deparseAnalyzeSql(hdfs_opt *opt, StringInfo buf, Relation rel, List **retrieved_ first = false; /* Use attribute name or column_name option. */ - colname = NameStr(tupdesc->attrs[i]->attname); + colname = NameStr(TupleDescAttr(tupdesc, i)->attname); options = GetForeignColumnOptions(relid, i + 1); foreach(lc, options) @@ -699,7 +699,11 @@ deparseColumnRef(StringInfo buf, int varno, int varattno, PlannerInfo *root) * option, use attribute name. */ if (colname == NULL) +#if PG_VERSION_NUM >= 110000 + colname = get_attname(rte->relid, varattno,false); +#else colname = get_relid_attribute_name(rte->relid, varattno); +#endif appendStringInfoString(buf, quote_identifier(colname)); } diff --git a/hdfs_fdw.c b/hdfs_fdw.c index 7cecb44..5d3d5fa 100644 --- a/hdfs_fdw.c +++ b/hdfs_fdw.c @@ -96,7 +96,7 @@ static void process_query_params(int index, #ifdef EDB_NATIVE_LANG - #if PG_VERSION_NUM >= 90300 + #if PG_VERSION_NUM >= 90300 && PG_VERSION_NUM < 110000 #define XACT_CB_SIGNATURE XactEvent event, void *arg, bool spl_commit #else #define XACT_CB_SIGNATURE XactEvent event, void *arg @@ -513,9 +513,13 @@ hdfsBeginForeignScan(ForeignScanState *node, int eflags) node->fdw_state = (void *) festate; festate->batch_cxt = AllocSetContextCreate(estate->es_query_cxt, "hdfs_fdw tuple data", +#if PG_VERSION_NUM >= 110000 + ALLOCSET_DEFAULT_SIZES); +#else ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE); +#endif festate->query_executed = false; festate->query = strVal(list_nth(fsplan->fdw_private, 0)); @@ -608,8 +612,8 @@ hdfsIterateForeignScan(ForeignScanState *node) { bool isnull = true; int attnum = lfirst_int(lc) - 1; - Oid pgtype = tupdesc->attrs[attnum]->atttypid; - int32 pgtypmod = tupdesc->attrs[attnum]->atttypmod; + Oid pgtype = TupleDescAttr(tupdesc, attnum)->atttypid; + int32 pgtypmod = TupleDescAttr(tupdesc, attnum)->atttypmod; Datum v; v = hdfs_get_value(festate->con_index, options, pgtype, @@ -761,7 +765,11 @@ prepare_query_params(PlanState *node, * benefit, and it'd require fdw to know more than is desirable * about Param evaluation.) */ +#if PG_VERSION_NUM >= 100000 + *param_exprs = ExecInitExprList(fdw_exprs, node); +#else *param_exprs = (List *) ExecInitExpr((Expr *) fdw_exprs, node); +#endif } diff --git a/hdfs_fdw.h b/hdfs_fdw.h index 2edbbad..c6cf2d9 100644 --- a/hdfs_fdw.h +++ b/hdfs_fdw.h @@ -19,6 +19,7 @@ #include "libhive/jdbc/hiveclient.h" +#include "access/tupdesc.h" #include "foreign/foreign.h" #include "lib/stringinfo.h" #include "nodes/relation.h" @@ -175,6 +176,10 @@ void hdfs_analyze(int con_index, hdfs_opt *opt); bool hdfs_bind_var(int con_index, int param_index, Oid type, Datum value, bool *isnull); +#ifndef TupleDescAttr +#define TupleDescAttr(tupdesc, i) ((tupdesc)->attrs[(i)]) +#endif + extern void _PG_init(void); extern void _PG_fini(void);