32169 lines
917 KiB
Diff
32169 lines
917 KiB
Diff
Description: <short summary of the patch>
|
|
TODO: Put a short summary on the line above and replace this paragraph
|
|
with a longer explanation of this change. Complete the meta-information
|
|
with other relevant fields (see below for details). To make it easier, the
|
|
information below has been extracted from the changelog. Adjust it or drop
|
|
it.
|
|
.
|
|
pglogical (2.2.2-1) UNRELEASED; urgency=medium
|
|
.
|
|
[ Debian PostgreSQL Maintainers ]
|
|
* New upstream minor release.
|
|
.
|
|
[ Michael Banck ]
|
|
* debian/patches/adapt_tap_tests.patch: Updated.
|
|
* debian/patches/v94_tap_support.patch: Likewise.
|
|
* debian/patches/test_increase_timeouts.patch: Likewise.
|
|
Author: Debian PostgreSQL Maintainers <team+postgresql@tracker.debian.org>
|
|
|
|
---
|
|
The information above should follow the Patch Tagging Guidelines, please
|
|
checkout http://dep.debian.net/deps/dep3/ to learn about the format. Here
|
|
are templates for supplementary fields that you might want to add:
|
|
|
|
Origin: <vendor|upstream|other>, <url of original patch>
|
|
Bug: <url in upstream bugtracker>
|
|
Bug-Debian: https://bugs.debian.org/<bugnumber>
|
|
Bug-Ubuntu: https://launchpad.net/bugs/<bugnumber>
|
|
Forwarded: <no|not-needed|url proving that it has been forwarded>
|
|
Reviewed-By: <name and email of someone who approved the patch>
|
|
Last-Update: 2019-08-05
|
|
|
|
--- /dev/null
|
|
+++ pglogical-2.2.2/pglogical_dump/Makefile
|
|
@@ -0,0 +1,16 @@
|
|
+PGFILEDESC = "pglogical_dump - pg_dump 9.4 with --snapshot support"
|
|
+PGAPPICON = win32
|
|
+
|
|
+PROGRAM = pglogical_dump
|
|
+OBJS = pg_dump.o common.o pg_dump_sort.o \
|
|
+ pg_backup_archiver.o pg_backup_db.o pg_backup_custom.o \
|
|
+ pg_backup_null.o pg_backup_tar.o pg_backup_directory.o \
|
|
+ pg_backup_utils.o parallel.o compress_io.o dumputils.o \
|
|
+ keywords.o kwlookup.o tar.o $(WIN32RES)
|
|
+
|
|
+PG_CPPFLAGS = -I$(libpq_srcdir)
|
|
+PG_LIBS = $(libpq_pgport)
|
|
+
|
|
+PG_CONFIG = pg_config
|
|
+PGXS := $(shell $(PG_CONFIG) --pgxs)
|
|
+include $(PGXS)
|
|
--- /dev/null
|
|
+++ pglogical-2.2.2/pglogical_dump/common.c
|
|
@@ -0,0 +1,913 @@
|
|
+/*-------------------------------------------------------------------------
|
|
+ *
|
|
+ * common.c
|
|
+ * Catalog routines used by pg_dump; long ago these were shared
|
|
+ * by another dump tool, but not anymore.
|
|
+ *
|
|
+ * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
|
|
+ * Portions Copyright (c) 1994, Regents of the University of California
|
|
+ *
|
|
+ *
|
|
+ * IDENTIFICATION
|
|
+ * src/bin/pg_dump/common.c
|
|
+ *
|
|
+ *-------------------------------------------------------------------------
|
|
+ */
|
|
+#include "pg_backup_archiver.h"
|
|
+#include "pg_backup_utils.h"
|
|
+
|
|
+#include <ctype.h>
|
|
+
|
|
+#include "catalog/pg_class.h"
|
|
+
|
|
+
|
|
+/*
|
|
+ * Variables for mapping DumpId to DumpableObject
|
|
+ */
|
|
+static DumpableObject **dumpIdMap = NULL;
|
|
+static int allocedDumpIds = 0;
|
|
+static DumpId lastDumpId = 0;
|
|
+
|
|
+/*
|
|
+ * Variables for mapping CatalogId to DumpableObject
|
|
+ */
|
|
+static bool catalogIdMapValid = false;
|
|
+static DumpableObject **catalogIdMap = NULL;
|
|
+static int numCatalogIds = 0;
|
|
+
|
|
+/*
|
|
+ * These variables are static to avoid the notational cruft of having to pass
|
|
+ * them into findTableByOid() and friends. For each of these arrays, we
|
|
+ * build a sorted-by-OID index array immediately after it's built, and then
|
|
+ * we use binary search in findTableByOid() and friends. (qsort'ing the base
|
|
+ * arrays themselves would be simpler, but it doesn't work because pg_dump.c
|
|
+ * may have already established pointers between items.)
|
|
+ */
|
|
+static TableInfo *tblinfo;
|
|
+static TypeInfo *typinfo;
|
|
+static FuncInfo *funinfo;
|
|
+static OprInfo *oprinfo;
|
|
+static NamespaceInfo *nspinfo;
|
|
+static int numTables;
|
|
+static int numTypes;
|
|
+static int numFuncs;
|
|
+static int numOperators;
|
|
+static int numCollations;
|
|
+static int numNamespaces;
|
|
+static DumpableObject **tblinfoindex;
|
|
+static DumpableObject **typinfoindex;
|
|
+static DumpableObject **funinfoindex;
|
|
+static DumpableObject **oprinfoindex;
|
|
+static DumpableObject **collinfoindex;
|
|
+static DumpableObject **nspinfoindex;
|
|
+
|
|
+
|
|
+static void flagInhTables(TableInfo *tbinfo, int numTables,
|
|
+ InhInfo *inhinfo, int numInherits);
|
|
+static void flagInhAttrs(TableInfo *tblinfo, int numTables);
|
|
+static DumpableObject **buildIndexArray(void *objArray, int numObjs,
|
|
+ Size objSize);
|
|
+static int DOCatalogIdCompare(const void *p1, const void *p2);
|
|
+static void findParentsByOid(TableInfo *self,
|
|
+ InhInfo *inhinfo, int numInherits);
|
|
+static int strInArray(const char *pattern, char **arr, int arr_size);
|
|
+
|
|
+
|
|
+/*
|
|
+ * getSchemaData
|
|
+ * Collect information about all potentially dumpable objects
|
|
+ */
|
|
+TableInfo *
|
|
+getSchemaData(Archive *fout, int *numTablesPtr)
|
|
+{
|
|
+ ExtensionInfo *extinfo;
|
|
+ InhInfo *inhinfo;
|
|
+ CollInfo *collinfo;
|
|
+ int numExtensions;
|
|
+ int numAggregates;
|
|
+ int numInherits;
|
|
+ int numRules;
|
|
+ int numProcLangs;
|
|
+ int numCasts;
|
|
+ int numOpclasses;
|
|
+ int numOpfamilies;
|
|
+ int numConversions;
|
|
+ int numTSParsers;
|
|
+ int numTSTemplates;
|
|
+ int numTSDicts;
|
|
+ int numTSConfigs;
|
|
+ int numForeignDataWrappers;
|
|
+ int numForeignServers;
|
|
+ int numDefaultACLs;
|
|
+ int numEventTriggers;
|
|
+
|
|
+ if (g_verbose)
|
|
+ write_msg(NULL, "reading schemas\n");
|
|
+ nspinfo = getNamespaces(fout, &numNamespaces);
|
|
+ nspinfoindex = buildIndexArray(nspinfo, numNamespaces, sizeof(NamespaceInfo));
|
|
+
|
|
+ /*
|
|
+ * getTables should be done as soon as possible, so as to minimize the
|
|
+ * window between starting our transaction and acquiring per-table locks.
|
|
+ * However, we have to do getNamespaces first because the tables get
|
|
+ * linked to their containing namespaces during getTables.
|
|
+ */
|
|
+ if (g_verbose)
|
|
+ write_msg(NULL, "reading user-defined tables\n");
|
|
+ tblinfo = getTables(fout, &numTables);
|
|
+ tblinfoindex = buildIndexArray(tblinfo, numTables, sizeof(TableInfo));
|
|
+
|
|
+ /* Do this after we've built tblinfoindex */
|
|
+ getOwnedSeqs(fout, tblinfo, numTables);
|
|
+
|
|
+ if (g_verbose)
|
|
+ write_msg(NULL, "reading extensions\n");
|
|
+ extinfo = getExtensions(fout, &numExtensions);
|
|
+
|
|
+ if (g_verbose)
|
|
+ write_msg(NULL, "reading user-defined functions\n");
|
|
+ funinfo = getFuncs(fout, &numFuncs);
|
|
+ funinfoindex = buildIndexArray(funinfo, numFuncs, sizeof(FuncInfo));
|
|
+
|
|
+ /* this must be after getTables and getFuncs */
|
|
+ if (g_verbose)
|
|
+ write_msg(NULL, "reading user-defined types\n");
|
|
+ typinfo = getTypes(fout, &numTypes);
|
|
+ typinfoindex = buildIndexArray(typinfo, numTypes, sizeof(TypeInfo));
|
|
+
|
|
+ /* this must be after getFuncs, too */
|
|
+ if (g_verbose)
|
|
+ write_msg(NULL, "reading procedural languages\n");
|
|
+ getProcLangs(fout, &numProcLangs);
|
|
+
|
|
+ if (g_verbose)
|
|
+ write_msg(NULL, "reading user-defined aggregate functions\n");
|
|
+ getAggregates(fout, &numAggregates);
|
|
+
|
|
+ if (g_verbose)
|
|
+ write_msg(NULL, "reading user-defined operators\n");
|
|
+ oprinfo = getOperators(fout, &numOperators);
|
|
+ oprinfoindex = buildIndexArray(oprinfo, numOperators, sizeof(OprInfo));
|
|
+
|
|
+ if (g_verbose)
|
|
+ write_msg(NULL, "reading user-defined operator classes\n");
|
|
+ getOpclasses(fout, &numOpclasses);
|
|
+
|
|
+ if (g_verbose)
|
|
+ write_msg(NULL, "reading user-defined operator families\n");
|
|
+ getOpfamilies(fout, &numOpfamilies);
|
|
+
|
|
+ if (g_verbose)
|
|
+ write_msg(NULL, "reading user-defined text search parsers\n");
|
|
+ getTSParsers(fout, &numTSParsers);
|
|
+
|
|
+ if (g_verbose)
|
|
+ write_msg(NULL, "reading user-defined text search templates\n");
|
|
+ getTSTemplates(fout, &numTSTemplates);
|
|
+
|
|
+ if (g_verbose)
|
|
+ write_msg(NULL, "reading user-defined text search dictionaries\n");
|
|
+ getTSDictionaries(fout, &numTSDicts);
|
|
+
|
|
+ if (g_verbose)
|
|
+ write_msg(NULL, "reading user-defined text search configurations\n");
|
|
+ getTSConfigurations(fout, &numTSConfigs);
|
|
+
|
|
+ if (g_verbose)
|
|
+ write_msg(NULL, "reading user-defined foreign-data wrappers\n");
|
|
+ getForeignDataWrappers(fout, &numForeignDataWrappers);
|
|
+
|
|
+ if (g_verbose)
|
|
+ write_msg(NULL, "reading user-defined foreign servers\n");
|
|
+ getForeignServers(fout, &numForeignServers);
|
|
+
|
|
+ if (g_verbose)
|
|
+ write_msg(NULL, "reading default privileges\n");
|
|
+ getDefaultACLs(fout, &numDefaultACLs);
|
|
+
|
|
+ if (g_verbose)
|
|
+ write_msg(NULL, "reading user-defined collations\n");
|
|
+ collinfo = getCollations(fout, &numCollations);
|
|
+ collinfoindex = buildIndexArray(collinfo, numCollations, sizeof(CollInfo));
|
|
+
|
|
+ if (g_verbose)
|
|
+ write_msg(NULL, "reading user-defined conversions\n");
|
|
+ getConversions(fout, &numConversions);
|
|
+
|
|
+ if (g_verbose)
|
|
+ write_msg(NULL, "reading type casts\n");
|
|
+ getCasts(fout, &numCasts);
|
|
+
|
|
+ if (g_verbose)
|
|
+ write_msg(NULL, "reading table inheritance information\n");
|
|
+ inhinfo = getInherits(fout, &numInherits);
|
|
+
|
|
+ if (g_verbose)
|
|
+ write_msg(NULL, "reading event triggers\n");
|
|
+ getEventTriggers(fout, &numEventTriggers);
|
|
+
|
|
+ /*
|
|
+ * Identify extension member objects and mark them as not to be dumped.
|
|
+ * This must happen after reading all objects that can be direct members
|
|
+ * of extensions, but before we begin to process table subsidiary objects.
|
|
+ */
|
|
+ if (g_verbose)
|
|
+ write_msg(NULL, "finding extension members\n");
|
|
+ getExtensionMembership(fout, extinfo, numExtensions);
|
|
+
|
|
+ /* Link tables to parents, mark parents of target tables interesting */
|
|
+ if (g_verbose)
|
|
+ write_msg(NULL, "finding inheritance relationships\n");
|
|
+ flagInhTables(tblinfo, numTables, inhinfo, numInherits);
|
|
+
|
|
+ if (g_verbose)
|
|
+ write_msg(NULL, "reading column info for interesting tables\n");
|
|
+ getTableAttrs(fout, tblinfo, numTables);
|
|
+
|
|
+ if (g_verbose)
|
|
+ write_msg(NULL, "flagging inherited columns in subtables\n");
|
|
+ flagInhAttrs(tblinfo, numTables);
|
|
+
|
|
+ if (g_verbose)
|
|
+ write_msg(NULL, "reading indexes\n");
|
|
+ getIndexes(fout, tblinfo, numTables);
|
|
+
|
|
+ if (g_verbose)
|
|
+ write_msg(NULL, "reading constraints\n");
|
|
+ getConstraints(fout, tblinfo, numTables);
|
|
+
|
|
+ if (g_verbose)
|
|
+ write_msg(NULL, "reading triggers\n");
|
|
+ getTriggers(fout, tblinfo, numTables);
|
|
+
|
|
+ if (g_verbose)
|
|
+ write_msg(NULL, "reading rewrite rules\n");
|
|
+ getRules(fout, &numRules);
|
|
+
|
|
+ *numTablesPtr = numTables;
|
|
+ return tblinfo;
|
|
+}
|
|
+
|
|
+/* flagInhTables -
|
|
+ * Fill in parent link fields of every target table, and mark
|
|
+ * parents of target tables as interesting
|
|
+ *
|
|
+ * Note that only direct ancestors of targets are marked interesting.
|
|
+ * This is sufficient; we don't much care whether they inherited their
|
|
+ * attributes or not.
|
|
+ *
|
|
+ * modifies tblinfo
|
|
+ */
|
|
+static void
|
|
+flagInhTables(TableInfo *tblinfo, int numTables,
|
|
+ InhInfo *inhinfo, int numInherits)
|
|
+{
|
|
+ int i,
|
|
+ j;
|
|
+ int numParents;
|
|
+ TableInfo **parents;
|
|
+
|
|
+ for (i = 0; i < numTables; i++)
|
|
+ {
|
|
+ /* Some kinds never have parents */
|
|
+ if (tblinfo[i].relkind == RELKIND_SEQUENCE ||
|
|
+ tblinfo[i].relkind == RELKIND_VIEW ||
|
|
+ tblinfo[i].relkind == RELKIND_MATVIEW)
|
|
+ continue;
|
|
+
|
|
+ /* Don't bother computing anything for non-target tables, either */
|
|
+ if (!tblinfo[i].dobj.dump)
|
|
+ continue;
|
|
+
|
|
+ /* Find all the immediate parent tables */
|
|
+ findParentsByOid(&tblinfo[i], inhinfo, numInherits);
|
|
+
|
|
+ /* Mark the parents as interesting for getTableAttrs */
|
|
+ numParents = tblinfo[i].numParents;
|
|
+ parents = tblinfo[i].parents;
|
|
+ for (j = 0; j < numParents; j++)
|
|
+ parents[j]->interesting = true;
|
|
+ }
|
|
+}
|
|
+
|
|
+/* flagInhAttrs -
|
|
+ * for each dumpable table in tblinfo, flag its inherited attributes
|
|
+ *
|
|
+ * What we need to do here is detect child columns that inherit NOT NULL
|
|
+ * bits from their parents (so that we needn't specify that again for the
|
|
+ * child) and child columns that have DEFAULT NULL when their parents had
|
|
+ * some non-null default. In the latter case, we make up a dummy AttrDefInfo
|
|
+ * object so that we'll correctly emit the necessary DEFAULT NULL clause;
|
|
+ * otherwise the backend will apply an inherited default to the column.
|
|
+ *
|
|
+ * modifies tblinfo
|
|
+ */
|
|
+static void
|
|
+flagInhAttrs(TableInfo *tblinfo, int numTables)
|
|
+{
|
|
+ int i,
|
|
+ j,
|
|
+ k;
|
|
+
|
|
+ for (i = 0; i < numTables; i++)
|
|
+ {
|
|
+ TableInfo *tbinfo = &(tblinfo[i]);
|
|
+ int numParents;
|
|
+ TableInfo **parents;
|
|
+
|
|
+ /* Some kinds never have parents */
|
|
+ if (tbinfo->relkind == RELKIND_SEQUENCE ||
|
|
+ tbinfo->relkind == RELKIND_VIEW ||
|
|
+ tbinfo->relkind == RELKIND_MATVIEW)
|
|
+ continue;
|
|
+
|
|
+ /* Don't bother computing anything for non-target tables, either */
|
|
+ if (!tbinfo->dobj.dump)
|
|
+ continue;
|
|
+
|
|
+ numParents = tbinfo->numParents;
|
|
+ parents = tbinfo->parents;
|
|
+
|
|
+ if (numParents == 0)
|
|
+ continue; /* nothing to see here, move along */
|
|
+
|
|
+ /* For each column, search for matching column names in parent(s) */
|
|
+ for (j = 0; j < tbinfo->numatts; j++)
|
|
+ {
|
|
+ bool foundNotNull; /* Attr was NOT NULL in a parent */
|
|
+ bool foundDefault; /* Found a default in a parent */
|
|
+
|
|
+ /* no point in examining dropped columns */
|
|
+ if (tbinfo->attisdropped[j])
|
|
+ continue;
|
|
+
|
|
+ foundNotNull = false;
|
|
+ foundDefault = false;
|
|
+ for (k = 0; k < numParents; k++)
|
|
+ {
|
|
+ TableInfo *parent = parents[k];
|
|
+ int inhAttrInd;
|
|
+
|
|
+ inhAttrInd = strInArray(tbinfo->attnames[j],
|
|
+ parent->attnames,
|
|
+ parent->numatts);
|
|
+ if (inhAttrInd >= 0)
|
|
+ {
|
|
+ foundNotNull |= parent->notnull[inhAttrInd];
|
|
+ foundDefault |= (parent->attrdefs[inhAttrInd] != NULL);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* Remember if we found inherited NOT NULL */
|
|
+ tbinfo->inhNotNull[j] = foundNotNull;
|
|
+
|
|
+ /* Manufacture a DEFAULT NULL clause if necessary */
|
|
+ if (foundDefault && tbinfo->attrdefs[j] == NULL)
|
|
+ {
|
|
+ AttrDefInfo *attrDef;
|
|
+
|
|
+ attrDef = (AttrDefInfo *) pg_malloc(sizeof(AttrDefInfo));
|
|
+ attrDef->dobj.objType = DO_ATTRDEF;
|
|
+ attrDef->dobj.catId.tableoid = 0;
|
|
+ attrDef->dobj.catId.oid = 0;
|
|
+ AssignDumpId(&attrDef->dobj);
|
|
+ attrDef->dobj.name = pg_strdup(tbinfo->dobj.name);
|
|
+ attrDef->dobj.namespace = tbinfo->dobj.namespace;
|
|
+ attrDef->dobj.dump = tbinfo->dobj.dump;
|
|
+
|
|
+ attrDef->adtable = tbinfo;
|
|
+ attrDef->adnum = j + 1;
|
|
+ attrDef->adef_expr = pg_strdup("NULL");
|
|
+
|
|
+ /* Will column be dumped explicitly? */
|
|
+ if (shouldPrintColumn(tbinfo, j))
|
|
+ {
|
|
+ attrDef->separate = false;
|
|
+ /* No dependency needed: NULL cannot have dependencies */
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ /* column will be suppressed, print default separately */
|
|
+ attrDef->separate = true;
|
|
+ /* ensure it comes out after the table */
|
|
+ addObjectDependency(&attrDef->dobj,
|
|
+ tbinfo->dobj.dumpId);
|
|
+ }
|
|
+
|
|
+ tbinfo->attrdefs[j] = attrDef;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+/*
|
|
+ * AssignDumpId
|
|
+ * Given a newly-created dumpable object, assign a dump ID,
|
|
+ * and enter the object into the lookup table.
|
|
+ *
|
|
+ * The caller is expected to have filled in objType and catId,
|
|
+ * but not any of the other standard fields of a DumpableObject.
|
|
+ */
|
|
+void
|
|
+AssignDumpId(DumpableObject *dobj)
|
|
+{
|
|
+ dobj->dumpId = ++lastDumpId;
|
|
+ dobj->name = NULL; /* must be set later */
|
|
+ dobj->namespace = NULL; /* may be set later */
|
|
+ dobj->dump = true; /* default assumption */
|
|
+ dobj->ext_member = false; /* default assumption */
|
|
+ dobj->dependencies = NULL;
|
|
+ dobj->nDeps = 0;
|
|
+ dobj->allocDeps = 0;
|
|
+
|
|
+ while (dobj->dumpId >= allocedDumpIds)
|
|
+ {
|
|
+ int newAlloc;
|
|
+
|
|
+ if (allocedDumpIds <= 0)
|
|
+ {
|
|
+ newAlloc = 256;
|
|
+ dumpIdMap = (DumpableObject **)
|
|
+ pg_malloc(newAlloc * sizeof(DumpableObject *));
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ newAlloc = allocedDumpIds * 2;
|
|
+ dumpIdMap = (DumpableObject **)
|
|
+ pg_realloc(dumpIdMap, newAlloc * sizeof(DumpableObject *));
|
|
+ }
|
|
+ memset(dumpIdMap + allocedDumpIds, 0,
|
|
+ (newAlloc - allocedDumpIds) * sizeof(DumpableObject *));
|
|
+ allocedDumpIds = newAlloc;
|
|
+ }
|
|
+ dumpIdMap[dobj->dumpId] = dobj;
|
|
+
|
|
+ /* mark catalogIdMap invalid, but don't rebuild it yet */
|
|
+ catalogIdMapValid = false;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Assign a DumpId that's not tied to a DumpableObject.
|
|
+ *
|
|
+ * This is used when creating a "fixed" ArchiveEntry that doesn't need to
|
|
+ * participate in the sorting logic.
|
|
+ */
|
|
+DumpId
|
|
+createDumpId(void)
|
|
+{
|
|
+ return ++lastDumpId;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Return the largest DumpId so far assigned
|
|
+ */
|
|
+DumpId
|
|
+getMaxDumpId(void)
|
|
+{
|
|
+ return lastDumpId;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Find a DumpableObject by dump ID
|
|
+ *
|
|
+ * Returns NULL for invalid ID
|
|
+ */
|
|
+DumpableObject *
|
|
+findObjectByDumpId(DumpId dumpId)
|
|
+{
|
|
+ if (dumpId <= 0 || dumpId >= allocedDumpIds)
|
|
+ return NULL; /* out of range? */
|
|
+ return dumpIdMap[dumpId];
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Find a DumpableObject by catalog ID
|
|
+ *
|
|
+ * Returns NULL for unknown ID
|
|
+ *
|
|
+ * We use binary search in a sorted list that is built on first call.
|
|
+ * If AssignDumpId() and findObjectByCatalogId() calls were freely intermixed,
|
|
+ * the code would work, but possibly be very slow. In the current usage
|
|
+ * pattern that does not happen, indeed we build the list at most twice.
|
|
+ */
|
|
+DumpableObject *
|
|
+findObjectByCatalogId(CatalogId catalogId)
|
|
+{
|
|
+ DumpableObject **low;
|
|
+ DumpableObject **high;
|
|
+
|
|
+ if (!catalogIdMapValid)
|
|
+ {
|
|
+ if (catalogIdMap)
|
|
+ free(catalogIdMap);
|
|
+ getDumpableObjects(&catalogIdMap, &numCatalogIds);
|
|
+ if (numCatalogIds > 1)
|
|
+ qsort((void *) catalogIdMap, numCatalogIds,
|
|
+ sizeof(DumpableObject *), DOCatalogIdCompare);
|
|
+ catalogIdMapValid = true;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * We could use bsearch() here, but the notational cruft of calling
|
|
+ * bsearch is nearly as bad as doing it ourselves; and the generalized
|
|
+ * bsearch function is noticeably slower as well.
|
|
+ */
|
|
+ if (numCatalogIds <= 0)
|
|
+ return NULL;
|
|
+ low = catalogIdMap;
|
|
+ high = catalogIdMap + (numCatalogIds - 1);
|
|
+ while (low <= high)
|
|
+ {
|
|
+ DumpableObject **middle;
|
|
+ int difference;
|
|
+
|
|
+ middle = low + (high - low) / 2;
|
|
+ /* comparison must match DOCatalogIdCompare, below */
|
|
+ difference = oidcmp((*middle)->catId.oid, catalogId.oid);
|
|
+ if (difference == 0)
|
|
+ difference = oidcmp((*middle)->catId.tableoid, catalogId.tableoid);
|
|
+ if (difference == 0)
|
|
+ return *middle;
|
|
+ else if (difference < 0)
|
|
+ low = middle + 1;
|
|
+ else
|
|
+ high = middle - 1;
|
|
+ }
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Find a DumpableObject by OID, in a pre-sorted array of one type of object
|
|
+ *
|
|
+ * Returns NULL for unknown OID
|
|
+ */
|
|
+static DumpableObject *
|
|
+findObjectByOid(Oid oid, DumpableObject **indexArray, int numObjs)
|
|
+{
|
|
+ DumpableObject **low;
|
|
+ DumpableObject **high;
|
|
+
|
|
+ /*
|
|
+ * This is the same as findObjectByCatalogId except we assume we need not
|
|
+ * look at table OID because the objects are all the same type.
|
|
+ *
|
|
+ * We could use bsearch() here, but the notational cruft of calling
|
|
+ * bsearch is nearly as bad as doing it ourselves; and the generalized
|
|
+ * bsearch function is noticeably slower as well.
|
|
+ */
|
|
+ if (numObjs <= 0)
|
|
+ return NULL;
|
|
+ low = indexArray;
|
|
+ high = indexArray + (numObjs - 1);
|
|
+ while (low <= high)
|
|
+ {
|
|
+ DumpableObject **middle;
|
|
+ int difference;
|
|
+
|
|
+ middle = low + (high - low) / 2;
|
|
+ difference = oidcmp((*middle)->catId.oid, oid);
|
|
+ if (difference == 0)
|
|
+ return *middle;
|
|
+ else if (difference < 0)
|
|
+ low = middle + 1;
|
|
+ else
|
|
+ high = middle - 1;
|
|
+ }
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Build an index array of DumpableObject pointers, sorted by OID
|
|
+ */
|
|
+static DumpableObject **
|
|
+buildIndexArray(void *objArray, int numObjs, Size objSize)
|
|
+{
|
|
+ DumpableObject **ptrs;
|
|
+ int i;
|
|
+
|
|
+ ptrs = (DumpableObject **) pg_malloc(numObjs * sizeof(DumpableObject *));
|
|
+ for (i = 0; i < numObjs; i++)
|
|
+ ptrs[i] = (DumpableObject *) ((char *) objArray + i * objSize);
|
|
+
|
|
+ /* We can use DOCatalogIdCompare to sort since its first key is OID */
|
|
+ if (numObjs > 1)
|
|
+ qsort((void *) ptrs, numObjs, sizeof(DumpableObject *),
|
|
+ DOCatalogIdCompare);
|
|
+
|
|
+ return ptrs;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * qsort comparator for pointers to DumpableObjects
|
|
+ */
|
|
+static int
|
|
+DOCatalogIdCompare(const void *p1, const void *p2)
|
|
+{
|
|
+ const DumpableObject *obj1 = *(DumpableObject *const *) p1;
|
|
+ const DumpableObject *obj2 = *(DumpableObject *const *) p2;
|
|
+ int cmpval;
|
|
+
|
|
+ /*
|
|
+ * Compare OID first since it's usually unique, whereas there will only be
|
|
+ * a few distinct values of tableoid.
|
|
+ */
|
|
+ cmpval = oidcmp(obj1->catId.oid, obj2->catId.oid);
|
|
+ if (cmpval == 0)
|
|
+ cmpval = oidcmp(obj1->catId.tableoid, obj2->catId.tableoid);
|
|
+ return cmpval;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Build an array of pointers to all known dumpable objects
|
|
+ *
|
|
+ * This simply creates a modifiable copy of the internal map.
|
|
+ */
|
|
+void
|
|
+getDumpableObjects(DumpableObject ***objs, int *numObjs)
|
|
+{
|
|
+ int i,
|
|
+ j;
|
|
+
|
|
+ *objs = (DumpableObject **)
|
|
+ pg_malloc(allocedDumpIds * sizeof(DumpableObject *));
|
|
+ j = 0;
|
|
+ for (i = 1; i < allocedDumpIds; i++)
|
|
+ {
|
|
+ if (dumpIdMap[i])
|
|
+ (*objs)[j++] = dumpIdMap[i];
|
|
+ }
|
|
+ *numObjs = j;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Add a dependency link to a DumpableObject
|
|
+ *
|
|
+ * Note: duplicate dependencies are currently not eliminated
|
|
+ */
|
|
+void
|
|
+addObjectDependency(DumpableObject *dobj, DumpId refId)
|
|
+{
|
|
+ if (dobj->nDeps >= dobj->allocDeps)
|
|
+ {
|
|
+ if (dobj->allocDeps <= 0)
|
|
+ {
|
|
+ dobj->allocDeps = 16;
|
|
+ dobj->dependencies = (DumpId *)
|
|
+ pg_malloc(dobj->allocDeps * sizeof(DumpId));
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ dobj->allocDeps *= 2;
|
|
+ dobj->dependencies = (DumpId *)
|
|
+ pg_realloc(dobj->dependencies,
|
|
+ dobj->allocDeps * sizeof(DumpId));
|
|
+ }
|
|
+ }
|
|
+ dobj->dependencies[dobj->nDeps++] = refId;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Remove a dependency link from a DumpableObject
|
|
+ *
|
|
+ * If there are multiple links, all are removed
|
|
+ */
|
|
+void
|
|
+removeObjectDependency(DumpableObject *dobj, DumpId refId)
|
|
+{
|
|
+ int i;
|
|
+ int j = 0;
|
|
+
|
|
+ for (i = 0; i < dobj->nDeps; i++)
|
|
+ {
|
|
+ if (dobj->dependencies[i] != refId)
|
|
+ dobj->dependencies[j++] = dobj->dependencies[i];
|
|
+ }
|
|
+ dobj->nDeps = j;
|
|
+}
|
|
+
|
|
+
|
|
+/*
|
|
+ * findTableByOid
|
|
+ * finds the entry (in tblinfo) of the table with the given oid
|
|
+ * returns NULL if not found
|
|
+ */
|
|
+TableInfo *
|
|
+findTableByOid(Oid oid)
|
|
+{
|
|
+ return (TableInfo *) findObjectByOid(oid, tblinfoindex, numTables);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * findTypeByOid
|
|
+ * finds the entry (in typinfo) of the type with the given oid
|
|
+ * returns NULL if not found
|
|
+ */
|
|
+TypeInfo *
|
|
+findTypeByOid(Oid oid)
|
|
+{
|
|
+ return (TypeInfo *) findObjectByOid(oid, typinfoindex, numTypes);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * findFuncByOid
|
|
+ * finds the entry (in funinfo) of the function with the given oid
|
|
+ * returns NULL if not found
|
|
+ */
|
|
+FuncInfo *
|
|
+findFuncByOid(Oid oid)
|
|
+{
|
|
+ return (FuncInfo *) findObjectByOid(oid, funinfoindex, numFuncs);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * findOprByOid
|
|
+ * finds the entry (in oprinfo) of the operator with the given oid
|
|
+ * returns NULL if not found
|
|
+ */
|
|
+OprInfo *
|
|
+findOprByOid(Oid oid)
|
|
+{
|
|
+ return (OprInfo *) findObjectByOid(oid, oprinfoindex, numOperators);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * findCollationByOid
|
|
+ * finds the entry (in collinfo) of the collation with the given oid
|
|
+ * returns NULL if not found
|
|
+ */
|
|
+CollInfo *
|
|
+findCollationByOid(Oid oid)
|
|
+{
|
|
+ return (CollInfo *) findObjectByOid(oid, collinfoindex, numCollations);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * findNamespaceByOid
|
|
+ * finds the entry (in nspinfo) of the namespace with the given oid
|
|
+ * returns NULL if not found
|
|
+ */
|
|
+NamespaceInfo *
|
|
+findNamespaceByOid(Oid oid)
|
|
+{
|
|
+ return (NamespaceInfo *) findObjectByOid(oid, nspinfoindex, numNamespaces);
|
|
+}
|
|
+
|
|
+
|
|
+/*
|
|
+ * findParentsByOid
|
|
+ * find a table's parents in tblinfo[]
|
|
+ */
|
|
+static void
|
|
+findParentsByOid(TableInfo *self,
|
|
+ InhInfo *inhinfo, int numInherits)
|
|
+{
|
|
+ Oid oid = self->dobj.catId.oid;
|
|
+ int i,
|
|
+ j;
|
|
+ int numParents;
|
|
+
|
|
+ numParents = 0;
|
|
+ for (i = 0; i < numInherits; i++)
|
|
+ {
|
|
+ if (inhinfo[i].inhrelid == oid)
|
|
+ numParents++;
|
|
+ }
|
|
+
|
|
+ self->numParents = numParents;
|
|
+
|
|
+ if (numParents > 0)
|
|
+ {
|
|
+ self->parents = (TableInfo **)
|
|
+ pg_malloc(sizeof(TableInfo *) * numParents);
|
|
+ j = 0;
|
|
+ for (i = 0; i < numInherits; i++)
|
|
+ {
|
|
+ if (inhinfo[i].inhrelid == oid)
|
|
+ {
|
|
+ TableInfo *parent;
|
|
+
|
|
+ parent = findTableByOid(inhinfo[i].inhparent);
|
|
+ if (parent == NULL)
|
|
+ {
|
|
+ write_msg(NULL, "failed sanity check, parent OID %u of table \"%s\" (OID %u) not found\n",
|
|
+ inhinfo[i].inhparent,
|
|
+ self->dobj.name,
|
|
+ oid);
|
|
+ exit_nicely(1);
|
|
+ }
|
|
+ self->parents[j++] = parent;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ else
|
|
+ self->parents = NULL;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * parseOidArray
|
|
+ * parse a string of numbers delimited by spaces into a character array
|
|
+ *
|
|
+ * Note: actually this is used for both Oids and potentially-signed
|
|
+ * attribute numbers. This should cause no trouble, but we could split
|
|
+ * the function into two functions with different argument types if it does.
|
|
+ */
|
|
+
|
|
+void
|
|
+parseOidArray(const char *str, Oid *array, int arraysize)
|
|
+{
|
|
+ int j,
|
|
+ argNum;
|
|
+ char temp[100];
|
|
+ char s;
|
|
+
|
|
+ argNum = 0;
|
|
+ j = 0;
|
|
+ for (;;)
|
|
+ {
|
|
+ s = *str++;
|
|
+ if (s == ' ' || s == '\0')
|
|
+ {
|
|
+ if (j > 0)
|
|
+ {
|
|
+ if (argNum >= arraysize)
|
|
+ {
|
|
+ write_msg(NULL, "could not parse numeric array \"%s\": too many numbers\n", str);
|
|
+ exit_nicely(1);
|
|
+ }
|
|
+ temp[j] = '\0';
|
|
+ array[argNum++] = atooid(temp);
|
|
+ j = 0;
|
|
+ }
|
|
+ if (s == '\0')
|
|
+ break;
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ if (!(isdigit((unsigned char) s) || s == '-') ||
|
|
+ j >= sizeof(temp) - 1)
|
|
+ {
|
|
+ write_msg(NULL, "could not parse numeric array \"%s\": invalid character in number\n", str);
|
|
+ exit_nicely(1);
|
|
+ }
|
|
+ temp[j++] = s;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ while (argNum < arraysize)
|
|
+ array[argNum++] = InvalidOid;
|
|
+}
|
|
+
|
|
+
|
|
+/*
|
|
+ * strInArray:
|
|
+ * takes in a string and a string array and the number of elements in the
|
|
+ * string array.
|
|
+ * returns the index if the string is somewhere in the array, -1 otherwise
|
|
+ */
|
|
+
|
|
+static int
|
|
+strInArray(const char *pattern, char **arr, int arr_size)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < arr_size; i++)
|
|
+ {
|
|
+ if (strcmp(pattern, arr[i]) == 0)
|
|
+ return i;
|
|
+ }
|
|
+ return -1;
|
|
+}
|
|
+
|
|
+
|
|
+/*
|
|
+ * Support for simple list operations
|
|
+ */
|
|
+
|
|
+void
|
|
+simple_oid_list_append(SimpleOidList *list, Oid val)
|
|
+{
|
|
+ SimpleOidListCell *cell;
|
|
+
|
|
+ cell = (SimpleOidListCell *) pg_malloc(sizeof(SimpleOidListCell));
|
|
+ cell->next = NULL;
|
|
+ cell->val = val;
|
|
+
|
|
+ if (list->tail)
|
|
+ list->tail->next = cell;
|
|
+ else
|
|
+ list->head = cell;
|
|
+ list->tail = cell;
|
|
+}
|
|
+
|
|
+bool
|
|
+simple_oid_list_member(SimpleOidList *list, Oid val)
|
|
+{
|
|
+ SimpleOidListCell *cell;
|
|
+
|
|
+ for (cell = list->head; cell; cell = cell->next)
|
|
+ {
|
|
+ if (cell->val == val)
|
|
+ return true;
|
|
+ }
|
|
+ return false;
|
|
+}
|
|
--- /dev/null
|
|
+++ pglogical-2.2.2/pglogical_dump/compat.h
|
|
@@ -0,0 +1,17 @@
|
|
+#ifndef COMPAT_H
|
|
+#define COMPAT_H
|
|
+
|
|
+#if !defined(pg_attribute_printf)
|
|
+
|
|
+/* GCC and XLC support format attributes */
|
|
+#if defined(__GNUC__) || defined(__IBMC__)
|
|
+#define pg_attribute_format_arg(a) __attribute__((format_arg(a)))
|
|
+#define pg_attribute_printf(f,a) __attribute__((format(PG_PRINTF_ATTRIBUTE, f, a)))
|
|
+#else
|
|
+#define pg_attribute_format_arg(a)
|
|
+#define pg_attribute_printf(f,a)
|
|
+#endif
|
|
+
|
|
+#endif
|
|
+
|
|
+#endif
|
|
--- /dev/null
|
|
+++ pglogical-2.2.2/pglogical_dump/compress_io.c
|
|
@@ -0,0 +1,722 @@
|
|
+/*-------------------------------------------------------------------------
|
|
+ *
|
|
+ * compress_io.c
|
|
+ * Routines for archivers to write an uncompressed or compressed data
|
|
+ * stream.
|
|
+ *
|
|
+ * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
|
|
+ * Portions Copyright (c) 1994, Regents of the University of California
|
|
+ *
|
|
+ * This file includes two APIs for dealing with compressed data. The first
|
|
+ * provides more flexibility, using callbacks to read/write data from the
|
|
+ * underlying stream. The second API is a wrapper around fopen/gzopen and
|
|
+ * friends, providing an interface similar to those, but abstracts away
|
|
+ * the possible compression. Both APIs use libz for the compression, but
|
|
+ * the second API uses gzip headers, so the resulting files can be easily
|
|
+ * manipulated with the gzip utility.
|
|
+ *
|
|
+ * Compressor API
|
|
+ * --------------
|
|
+ *
|
|
+ * The interface for writing to an archive consists of three functions:
|
|
+ * AllocateCompressor, WriteDataToArchive and EndCompressor. First you call
|
|
+ * AllocateCompressor, then write all the data by calling WriteDataToArchive
|
|
+ * as many times as needed, and finally EndCompressor. WriteDataToArchive
|
|
+ * and EndCompressor will call the WriteFunc that was provided to
|
|
+ * AllocateCompressor for each chunk of compressed data.
|
|
+ *
|
|
+ * The interface for reading an archive consists of just one function:
|
|
+ * ReadDataFromArchive. ReadDataFromArchive reads the whole compressed input
|
|
+ * stream, by repeatedly calling the given ReadFunc. ReadFunc returns the
|
|
+ * compressed data chunk at a time, and ReadDataFromArchive decompresses it
|
|
+ * and passes the decompressed data to ahwrite(), until ReadFunc returns 0
|
|
+ * to signal EOF.
|
|
+ *
|
|
+ * The interface is the same for compressed and uncompressed streams.
|
|
+ *
|
|
+ * Compressed stream API
|
|
+ * ----------------------
|
|
+ *
|
|
+ * The compressed stream API is a wrapper around the C standard fopen() and
|
|
+ * libz's gzopen() APIs. It allows you to use the same functions for
|
|
+ * compressed and uncompressed streams. cfopen_read() first tries to open
|
|
+ * the file with given name, and if it fails, it tries to open the same
|
|
+ * file with the .gz suffix. cfopen_write() opens a file for writing, an
|
|
+ * extra argument specifies if the file should be compressed, and adds the
|
|
+ * .gz suffix to the filename if so. This allows you to easily handle both
|
|
+ * compressed and uncompressed files.
|
|
+ *
|
|
+ * IDENTIFICATION
|
|
+ * src/bin/pg_dump/compress_io.c
|
|
+ *
|
|
+ *-------------------------------------------------------------------------
|
|
+ */
|
|
+
|
|
+#include "compress_io.h"
|
|
+#include "pg_backup_utils.h"
|
|
+#include "parallel.h"
|
|
+
|
|
+/*----------------------
|
|
+ * Compressor API
|
|
+ *----------------------
|
|
+ */
|
|
+
|
|
+/* typedef appears in compress_io.h */
|
|
+struct CompressorState
|
|
+{
|
|
+ CompressionAlgorithm comprAlg;
|
|
+ WriteFunc writeF;
|
|
+
|
|
+#ifdef HAVE_LIBZ
|
|
+ z_streamp zp;
|
|
+ char *zlibOut;
|
|
+ size_t zlibOutSize;
|
|
+#endif
|
|
+};
|
|
+
|
|
+/* translator: this is a module name */
|
|
+static const char *modulename = gettext_noop("compress_io");
|
|
+
|
|
+static void ParseCompressionOption(int compression, CompressionAlgorithm *alg,
|
|
+ int *level);
|
|
+
|
|
+/* Routines that support zlib compressed data I/O */
|
|
+#ifdef HAVE_LIBZ
|
|
+static void InitCompressorZlib(CompressorState *cs, int level);
|
|
+static void DeflateCompressorZlib(ArchiveHandle *AH, CompressorState *cs,
|
|
+ bool flush);
|
|
+static void ReadDataFromArchiveZlib(ArchiveHandle *AH, ReadFunc readF);
|
|
+static void WriteDataToArchiveZlib(ArchiveHandle *AH, CompressorState *cs,
|
|
+ const char *data, size_t dLen);
|
|
+static void EndCompressorZlib(ArchiveHandle *AH, CompressorState *cs);
|
|
+#endif
|
|
+
|
|
+/* Routines that support uncompressed data I/O */
|
|
+static void ReadDataFromArchiveNone(ArchiveHandle *AH, ReadFunc readF);
|
|
+static void WriteDataToArchiveNone(ArchiveHandle *AH, CompressorState *cs,
|
|
+ const char *data, size_t dLen);
|
|
+
|
|
+/*
|
|
+ * Interprets a numeric 'compression' value. The algorithm implied by the
|
|
+ * value (zlib or none at the moment), is returned in *alg, and the
|
|
+ * zlib compression level in *level.
|
|
+ */
|
|
+static void
|
|
+ParseCompressionOption(int compression, CompressionAlgorithm *alg, int *level)
|
|
+{
|
|
+ if (compression == Z_DEFAULT_COMPRESSION ||
|
|
+ (compression > 0 && compression <= 9))
|
|
+ *alg = COMPR_ALG_LIBZ;
|
|
+ else if (compression == 0)
|
|
+ *alg = COMPR_ALG_NONE;
|
|
+ else
|
|
+ {
|
|
+ exit_horribly(modulename, "invalid compression code: %d\n",
|
|
+ compression);
|
|
+ *alg = COMPR_ALG_NONE; /* keep compiler quiet */
|
|
+ }
|
|
+
|
|
+ /* The level is just the passed-in value. */
|
|
+ if (level)
|
|
+ *level = compression;
|
|
+}
|
|
+
|
|
+/* Public interface routines */
|
|
+
|
|
+/* Allocate a new compressor */
|
|
+CompressorState *
|
|
+AllocateCompressor(int compression, WriteFunc writeF)
|
|
+{
|
|
+ CompressorState *cs;
|
|
+ CompressionAlgorithm alg;
|
|
+ int level;
|
|
+
|
|
+ ParseCompressionOption(compression, &alg, &level);
|
|
+
|
|
+#ifndef HAVE_LIBZ
|
|
+ if (alg == COMPR_ALG_LIBZ)
|
|
+ exit_horribly(modulename, "not built with zlib support\n");
|
|
+#endif
|
|
+
|
|
+ cs = (CompressorState *) pg_malloc0(sizeof(CompressorState));
|
|
+ cs->writeF = writeF;
|
|
+ cs->comprAlg = alg;
|
|
+
|
|
+ /*
|
|
+ * Perform compression algorithm specific initialization.
|
|
+ */
|
|
+#ifdef HAVE_LIBZ
|
|
+ if (alg == COMPR_ALG_LIBZ)
|
|
+ InitCompressorZlib(cs, level);
|
|
+#endif
|
|
+
|
|
+ return cs;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Read all compressed data from the input stream (via readF) and print it
|
|
+ * out with ahwrite().
|
|
+ */
|
|
+void
|
|
+ReadDataFromArchive(ArchiveHandle *AH, int compression, ReadFunc readF)
|
|
+{
|
|
+ CompressionAlgorithm alg;
|
|
+
|
|
+ ParseCompressionOption(compression, &alg, NULL);
|
|
+
|
|
+ if (alg == COMPR_ALG_NONE)
|
|
+ ReadDataFromArchiveNone(AH, readF);
|
|
+ if (alg == COMPR_ALG_LIBZ)
|
|
+ {
|
|
+#ifdef HAVE_LIBZ
|
|
+ ReadDataFromArchiveZlib(AH, readF);
|
|
+#else
|
|
+ exit_horribly(modulename, "not built with zlib support\n");
|
|
+#endif
|
|
+ }
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Compress and write data to the output stream (via writeF).
|
|
+ */
|
|
+void
|
|
+WriteDataToArchive(ArchiveHandle *AH, CompressorState *cs,
|
|
+ const void *data, size_t dLen)
|
|
+{
|
|
+ /* Are we aborting? */
|
|
+ checkAborting(AH);
|
|
+
|
|
+ switch (cs->comprAlg)
|
|
+ {
|
|
+ case COMPR_ALG_LIBZ:
|
|
+#ifdef HAVE_LIBZ
|
|
+ WriteDataToArchiveZlib(AH, cs, data, dLen);
|
|
+#else
|
|
+ exit_horribly(modulename, "not built with zlib support\n");
|
|
+#endif
|
|
+ break;
|
|
+ case COMPR_ALG_NONE:
|
|
+ WriteDataToArchiveNone(AH, cs, data, dLen);
|
|
+ break;
|
|
+ }
|
|
+ return;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Terminate compression library context and flush its buffers.
|
|
+ */
|
|
+void
|
|
+EndCompressor(ArchiveHandle *AH, CompressorState *cs)
|
|
+{
|
|
+#ifdef HAVE_LIBZ
|
|
+ if (cs->comprAlg == COMPR_ALG_LIBZ)
|
|
+ EndCompressorZlib(AH, cs);
|
|
+#endif
|
|
+ free(cs);
|
|
+}
|
|
+
|
|
+/* Private routines, specific to each compression method. */
|
|
+
|
|
+#ifdef HAVE_LIBZ
|
|
+/*
|
|
+ * Functions for zlib compressed output.
|
|
+ */
|
|
+
|
|
+static void
|
|
+InitCompressorZlib(CompressorState *cs, int level)
|
|
+{
|
|
+ z_streamp zp;
|
|
+
|
|
+ zp = cs->zp = (z_streamp) pg_malloc(sizeof(z_stream));
|
|
+ zp->zalloc = Z_NULL;
|
|
+ zp->zfree = Z_NULL;
|
|
+ zp->opaque = Z_NULL;
|
|
+
|
|
+ /*
|
|
+ * zlibOutSize is the buffer size we tell zlib it can output to. We
|
|
+ * actually allocate one extra byte because some routines want to append a
|
|
+ * trailing zero byte to the zlib output.
|
|
+ */
|
|
+ cs->zlibOut = (char *) pg_malloc(ZLIB_OUT_SIZE + 1);
|
|
+ cs->zlibOutSize = ZLIB_OUT_SIZE;
|
|
+
|
|
+ if (deflateInit(zp, level) != Z_OK)
|
|
+ exit_horribly(modulename,
|
|
+ "could not initialize compression library: %s\n",
|
|
+ zp->msg);
|
|
+
|
|
+ /* Just be paranoid - maybe End is called after Start, with no Write */
|
|
+ zp->next_out = (void *) cs->zlibOut;
|
|
+ zp->avail_out = cs->zlibOutSize;
|
|
+}
|
|
+
|
|
+static void
|
|
+EndCompressorZlib(ArchiveHandle *AH, CompressorState *cs)
|
|
+{
|
|
+ z_streamp zp = cs->zp;
|
|
+
|
|
+ zp->next_in = NULL;
|
|
+ zp->avail_in = 0;
|
|
+
|
|
+ /* Flush any remaining data from zlib buffer */
|
|
+ DeflateCompressorZlib(AH, cs, true);
|
|
+
|
|
+ if (deflateEnd(zp) != Z_OK)
|
|
+ exit_horribly(modulename,
|
|
+ "could not close compression stream: %s\n", zp->msg);
|
|
+
|
|
+ free(cs->zlibOut);
|
|
+ free(cs->zp);
|
|
+}
|
|
+
|
|
+static void
|
|
+DeflateCompressorZlib(ArchiveHandle *AH, CompressorState *cs, bool flush)
|
|
+{
|
|
+ z_streamp zp = cs->zp;
|
|
+ char *out = cs->zlibOut;
|
|
+ int res = Z_OK;
|
|
+
|
|
+ while (cs->zp->avail_in != 0 || flush)
|
|
+ {
|
|
+ res = deflate(zp, flush ? Z_FINISH : Z_NO_FLUSH);
|
|
+ if (res == Z_STREAM_ERROR)
|
|
+ exit_horribly(modulename,
|
|
+ "could not compress data: %s\n", zp->msg);
|
|
+ if ((flush && (zp->avail_out < cs->zlibOutSize))
|
|
+ || (zp->avail_out == 0)
|
|
+ || (zp->avail_in != 0)
|
|
+ )
|
|
+ {
|
|
+ /*
|
|
+ * Extra paranoia: avoid zero-length chunks, since a zero length
|
|
+ * chunk is the EOF marker in the custom format. This should never
|
|
+ * happen but...
|
|
+ */
|
|
+ if (zp->avail_out < cs->zlibOutSize)
|
|
+ {
|
|
+ /*
|
|
+ * Any write function shoud do its own error checking but to
|
|
+ * make sure we do a check here as well...
|
|
+ */
|
|
+ size_t len = cs->zlibOutSize - zp->avail_out;
|
|
+
|
|
+ cs->writeF(AH, out, len);
|
|
+ }
|
|
+ zp->next_out = (void *) out;
|
|
+ zp->avail_out = cs->zlibOutSize;
|
|
+ }
|
|
+
|
|
+ if (res == Z_STREAM_END)
|
|
+ break;
|
|
+ }
|
|
+}
|
|
+
|
|
+static void
|
|
+WriteDataToArchiveZlib(ArchiveHandle *AH, CompressorState *cs,
|
|
+ const char *data, size_t dLen)
|
|
+{
|
|
+ cs->zp->next_in = (void *) data;
|
|
+ cs->zp->avail_in = dLen;
|
|
+ DeflateCompressorZlib(AH, cs, false);
|
|
+
|
|
+ return;
|
|
+}
|
|
+
|
|
+static void
|
|
+ReadDataFromArchiveZlib(ArchiveHandle *AH, ReadFunc readF)
|
|
+{
|
|
+ z_streamp zp;
|
|
+ char *out;
|
|
+ int res = Z_OK;
|
|
+ size_t cnt;
|
|
+ char *buf;
|
|
+ size_t buflen;
|
|
+
|
|
+ zp = (z_streamp) pg_malloc(sizeof(z_stream));
|
|
+ zp->zalloc = Z_NULL;
|
|
+ zp->zfree = Z_NULL;
|
|
+ zp->opaque = Z_NULL;
|
|
+
|
|
+ buf = pg_malloc(ZLIB_IN_SIZE);
|
|
+ buflen = ZLIB_IN_SIZE;
|
|
+
|
|
+ out = pg_malloc(ZLIB_OUT_SIZE + 1);
|
|
+
|
|
+ if (inflateInit(zp) != Z_OK)
|
|
+ exit_horribly(modulename,
|
|
+ "could not initialize compression library: %s\n",
|
|
+ zp->msg);
|
|
+
|
|
+ /* no minimal chunk size for zlib */
|
|
+ while ((cnt = readF(AH, &buf, &buflen)))
|
|
+ {
|
|
+ /* Are we aborting? */
|
|
+ checkAborting(AH);
|
|
+
|
|
+ zp->next_in = (void *) buf;
|
|
+ zp->avail_in = cnt;
|
|
+
|
|
+ while (zp->avail_in > 0)
|
|
+ {
|
|
+ zp->next_out = (void *) out;
|
|
+ zp->avail_out = ZLIB_OUT_SIZE;
|
|
+
|
|
+ res = inflate(zp, 0);
|
|
+ if (res != Z_OK && res != Z_STREAM_END)
|
|
+ exit_horribly(modulename,
|
|
+ "could not uncompress data: %s\n", zp->msg);
|
|
+
|
|
+ out[ZLIB_OUT_SIZE - zp->avail_out] = '\0';
|
|
+ ahwrite(out, 1, ZLIB_OUT_SIZE - zp->avail_out, AH);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ zp->next_in = NULL;
|
|
+ zp->avail_in = 0;
|
|
+ while (res != Z_STREAM_END)
|
|
+ {
|
|
+ zp->next_out = (void *) out;
|
|
+ zp->avail_out = ZLIB_OUT_SIZE;
|
|
+ res = inflate(zp, 0);
|
|
+ if (res != Z_OK && res != Z_STREAM_END)
|
|
+ exit_horribly(modulename,
|
|
+ "could not uncompress data: %s\n", zp->msg);
|
|
+
|
|
+ out[ZLIB_OUT_SIZE - zp->avail_out] = '\0';
|
|
+ ahwrite(out, 1, ZLIB_OUT_SIZE - zp->avail_out, AH);
|
|
+ }
|
|
+
|
|
+ if (inflateEnd(zp) != Z_OK)
|
|
+ exit_horribly(modulename,
|
|
+ "could not close compression library: %s\n", zp->msg);
|
|
+
|
|
+ free(buf);
|
|
+ free(out);
|
|
+ free(zp);
|
|
+}
|
|
+#endif /* HAVE_LIBZ */
|
|
+
|
|
+
|
|
+/*
|
|
+ * Functions for uncompressed output.
|
|
+ */
|
|
+
|
|
+static void
|
|
+ReadDataFromArchiveNone(ArchiveHandle *AH, ReadFunc readF)
|
|
+{
|
|
+ size_t cnt;
|
|
+ char *buf;
|
|
+ size_t buflen;
|
|
+
|
|
+ buf = pg_malloc(ZLIB_OUT_SIZE);
|
|
+ buflen = ZLIB_OUT_SIZE;
|
|
+
|
|
+ while ((cnt = readF(AH, &buf, &buflen)))
|
|
+ {
|
|
+ /* Are we aborting? */
|
|
+ checkAborting(AH);
|
|
+
|
|
+ ahwrite(buf, 1, cnt, AH);
|
|
+ }
|
|
+
|
|
+ free(buf);
|
|
+}
|
|
+
|
|
+static void
|
|
+WriteDataToArchiveNone(ArchiveHandle *AH, CompressorState *cs,
|
|
+ const char *data, size_t dLen)
|
|
+{
|
|
+ cs->writeF(AH, data, dLen);
|
|
+ return;
|
|
+}
|
|
+
|
|
+
|
|
+/*----------------------
|
|
+ * Compressed stream API
|
|
+ *----------------------
|
|
+ */
|
|
+
|
|
+/*
|
|
+ * cfp represents an open stream, wrapping the underlying FILE or gzFile
|
|
+ * pointer. This is opaque to the callers.
|
|
+ */
|
|
+struct cfp
|
|
+{
|
|
+ FILE *uncompressedfp;
|
|
+#ifdef HAVE_LIBZ
|
|
+ gzFile compressedfp;
|
|
+#endif
|
|
+};
|
|
+
|
|
+#ifdef HAVE_LIBZ
|
|
+static int hasSuffix(const char *filename, const char *suffix);
|
|
+#endif
|
|
+
|
|
+/* free() without changing errno; useful in several places below */
|
|
+static void
|
|
+free_keep_errno(void *p)
|
|
+{
|
|
+ int save_errno = errno;
|
|
+
|
|
+ free(p);
|
|
+ errno = save_errno;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Open a file for reading. 'path' is the file to open, and 'mode' should
|
|
+ * be either "r" or "rb".
|
|
+ *
|
|
+ * If the file at 'path' does not exist, we append the ".gz" suffix (if 'path'
|
|
+ * doesn't already have it) and try again. So if you pass "foo" as 'path',
|
|
+ * this will open either "foo" or "foo.gz".
|
|
+ *
|
|
+ * On failure, return NULL with an error code in errno.
|
|
+ */
|
|
+cfp *
|
|
+cfopen_read(const char *path, const char *mode)
|
|
+{
|
|
+ cfp *fp;
|
|
+
|
|
+#ifdef HAVE_LIBZ
|
|
+ if (hasSuffix(path, ".gz"))
|
|
+ fp = cfopen(path, mode, 1);
|
|
+ else
|
|
+#endif
|
|
+ {
|
|
+ fp = cfopen(path, mode, 0);
|
|
+#ifdef HAVE_LIBZ
|
|
+ if (fp == NULL)
|
|
+ {
|
|
+ char *fname;
|
|
+
|
|
+ fname = psprintf("%s.gz", path);
|
|
+ fp = cfopen(fname, mode, 1);
|
|
+ free_keep_errno(fname);
|
|
+ }
|
|
+#endif
|
|
+ }
|
|
+ return fp;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Open a file for writing. 'path' indicates the path name, and 'mode' must
|
|
+ * be a filemode as accepted by fopen() and gzopen() that indicates writing
|
|
+ * ("w", "wb", "a", or "ab").
|
|
+ *
|
|
+ * If 'compression' is non-zero, a gzip compressed stream is opened, and
|
|
+ * 'compression' indicates the compression level used. The ".gz" suffix
|
|
+ * is automatically added to 'path' in that case.
|
|
+ *
|
|
+ * On failure, return NULL with an error code in errno.
|
|
+ */
|
|
+cfp *
|
|
+cfopen_write(const char *path, const char *mode, int compression)
|
|
+{
|
|
+ cfp *fp;
|
|
+
|
|
+ if (compression == 0)
|
|
+ fp = cfopen(path, mode, 0);
|
|
+ else
|
|
+ {
|
|
+#ifdef HAVE_LIBZ
|
|
+ char *fname;
|
|
+
|
|
+ fname = psprintf("%s.gz", path);
|
|
+ fp = cfopen(fname, mode, compression);
|
|
+ free_keep_errno(fname);
|
|
+#else
|
|
+ exit_horribly(modulename, "not built with zlib support\n");
|
|
+ fp = NULL; /* keep compiler quiet */
|
|
+#endif
|
|
+ }
|
|
+ return fp;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Opens file 'path' in 'mode'. If 'compression' is non-zero, the file
|
|
+ * is opened with libz gzopen(), otherwise with plain fopen().
|
|
+ *
|
|
+ * On failure, return NULL with an error code in errno.
|
|
+ */
|
|
+cfp *
|
|
+cfopen(const char *path, const char *mode, int compression)
|
|
+{
|
|
+ cfp *fp = pg_malloc(sizeof(cfp));
|
|
+
|
|
+ if (compression != 0)
|
|
+ {
|
|
+#ifdef HAVE_LIBZ
|
|
+ if (compression != Z_DEFAULT_COMPRESSION)
|
|
+ {
|
|
+ /* user has specified a compression level, so tell zlib to use it */
|
|
+ char mode_compression[32];
|
|
+
|
|
+ snprintf(mode_compression, sizeof(mode_compression), "%s%d",
|
|
+ mode, compression);
|
|
+ fp->compressedfp = gzopen(path, mode_compression);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ /* don't specify a level, just use the zlib default */
|
|
+ fp->compressedfp = gzopen(path, mode);
|
|
+ }
|
|
+
|
|
+ fp->uncompressedfp = NULL;
|
|
+ if (fp->compressedfp == NULL)
|
|
+ {
|
|
+ free_keep_errno(fp);
|
|
+ fp = NULL;
|
|
+ }
|
|
+#else
|
|
+ exit_horribly(modulename, "not built with zlib support\n");
|
|
+#endif
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+#ifdef HAVE_LIBZ
|
|
+ fp->compressedfp = NULL;
|
|
+#endif
|
|
+ fp->uncompressedfp = fopen(path, mode);
|
|
+ if (fp->uncompressedfp == NULL)
|
|
+ {
|
|
+ free_keep_errno(fp);
|
|
+ fp = NULL;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return fp;
|
|
+}
|
|
+
|
|
+
|
|
+int
|
|
+cfread(void *ptr, int size, cfp *fp)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ if (size == 0)
|
|
+ return 0;
|
|
+
|
|
+#ifdef HAVE_LIBZ
|
|
+ if (fp->compressedfp)
|
|
+ {
|
|
+ ret = gzread(fp->compressedfp, ptr, size);
|
|
+ if (ret != size && !gzeof(fp->compressedfp))
|
|
+ exit_horribly(modulename,
|
|
+ "could not read from input file: %s\n", strerror(errno));
|
|
+ }
|
|
+ else
|
|
+#endif
|
|
+ {
|
|
+ ret = fread(ptr, 1, size, fp->uncompressedfp);
|
|
+ if (ret != size && !feof(fp->uncompressedfp))
|
|
+ READ_ERROR_EXIT(fp->uncompressedfp);
|
|
+ }
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int
|
|
+cfwrite(const void *ptr, int size, cfp *fp)
|
|
+{
|
|
+#ifdef HAVE_LIBZ
|
|
+ if (fp->compressedfp)
|
|
+ return gzwrite(fp->compressedfp, ptr, size);
|
|
+ else
|
|
+#endif
|
|
+ return fwrite(ptr, 1, size, fp->uncompressedfp);
|
|
+}
|
|
+
|
|
+int
|
|
+cfgetc(cfp *fp)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+#ifdef HAVE_LIBZ
|
|
+ if (fp->compressedfp)
|
|
+ {
|
|
+ ret = gzgetc(fp->compressedfp);
|
|
+ if (ret == EOF)
|
|
+ {
|
|
+ if (!gzeof(fp->compressedfp))
|
|
+ exit_horribly(modulename,
|
|
+ "could not read from input file: %s\n", strerror(errno));
|
|
+ else
|
|
+ exit_horribly(modulename,
|
|
+ "could not read from input file: end of file\n");
|
|
+ }
|
|
+ }
|
|
+ else
|
|
+#endif
|
|
+ {
|
|
+ ret = fgetc(fp->uncompressedfp);
|
|
+ if (ret == EOF)
|
|
+ READ_ERROR_EXIT(fp->uncompressedfp);
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+char *
|
|
+cfgets(cfp *fp, char *buf, int len)
|
|
+{
|
|
+#ifdef HAVE_LIBZ
|
|
+ if (fp->compressedfp)
|
|
+ return gzgets(fp->compressedfp, buf, len);
|
|
+ else
|
|
+#endif
|
|
+ return fgets(buf, len, fp->uncompressedfp);
|
|
+}
|
|
+
|
|
+int
|
|
+cfclose(cfp *fp)
|
|
+{
|
|
+ int result;
|
|
+
|
|
+ if (fp == NULL)
|
|
+ {
|
|
+ errno = EBADF;
|
|
+ return EOF;
|
|
+ }
|
|
+#ifdef HAVE_LIBZ
|
|
+ if (fp->compressedfp)
|
|
+ {
|
|
+ result = gzclose(fp->compressedfp);
|
|
+ fp->compressedfp = NULL;
|
|
+ }
|
|
+ else
|
|
+#endif
|
|
+ {
|
|
+ result = fclose(fp->uncompressedfp);
|
|
+ fp->uncompressedfp = NULL;
|
|
+ }
|
|
+ free_keep_errno(fp);
|
|
+
|
|
+ return result;
|
|
+}
|
|
+
|
|
+int
|
|
+cfeof(cfp *fp)
|
|
+{
|
|
+#ifdef HAVE_LIBZ
|
|
+ if (fp->compressedfp)
|
|
+ return gzeof(fp->compressedfp);
|
|
+ else
|
|
+#endif
|
|
+ return feof(fp->uncompressedfp);
|
|
+}
|
|
+
|
|
+#ifdef HAVE_LIBZ
|
|
+static int
|
|
+hasSuffix(const char *filename, const char *suffix)
|
|
+{
|
|
+ int filenamelen = strlen(filename);
|
|
+ int suffixlen = strlen(suffix);
|
|
+
|
|
+ if (filenamelen < suffixlen)
|
|
+ return 0;
|
|
+
|
|
+ return memcmp(&filename[filenamelen - suffixlen],
|
|
+ suffix,
|
|
+ suffixlen) == 0;
|
|
+}
|
|
+
|
|
+#endif
|
|
--- /dev/null
|
|
+++ pglogical-2.2.2/pglogical_dump/compress_io.h
|
|
@@ -0,0 +1,70 @@
|
|
+/*-------------------------------------------------------------------------
|
|
+ *
|
|
+ * compress_io.h
|
|
+ * Interface to compress_io.c routines
|
|
+ *
|
|
+ * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
|
|
+ * Portions Copyright (c) 1994, Regents of the University of California
|
|
+ *
|
|
+ * IDENTIFICATION
|
|
+ * src/bin/pg_dump/compress_io.h
|
|
+ *
|
|
+ *-------------------------------------------------------------------------
|
|
+ */
|
|
+
|
|
+#ifndef __COMPRESS_IO__
|
|
+#define __COMPRESS_IO__
|
|
+
|
|
+#include "postgres_fe.h"
|
|
+#include "pg_backup_archiver.h"
|
|
+
|
|
+/* Initial buffer sizes used in zlib compression. */
|
|
+#define ZLIB_OUT_SIZE 4096
|
|
+#define ZLIB_IN_SIZE 4096
|
|
+
|
|
+typedef enum
|
|
+{
|
|
+ COMPR_ALG_NONE,
|
|
+ COMPR_ALG_LIBZ
|
|
+} CompressionAlgorithm;
|
|
+
|
|
+/* Prototype for callback function to WriteDataToArchive() */
|
|
+typedef void (*WriteFunc) (ArchiveHandle *AH, const char *buf, size_t len);
|
|
+
|
|
+/*
|
|
+ * Prototype for callback function to ReadDataFromArchive()
|
|
+ *
|
|
+ * ReadDataFromArchive will call the read function repeatedly, until it
|
|
+ * returns 0 to signal EOF. ReadDataFromArchive passes a buffer to read the
|
|
+ * data into in *buf, of length *buflen. If that's not big enough for the
|
|
+ * callback function, it can free() it and malloc() a new one, returning the
|
|
+ * new buffer and its size in *buf and *buflen.
|
|
+ *
|
|
+ * Returns the number of bytes read into *buf, or 0 on EOF.
|
|
+ */
|
|
+typedef size_t (*ReadFunc) (ArchiveHandle *AH, char **buf, size_t *buflen);
|
|
+
|
|
+/* struct definition appears in compress_io.c */
|
|
+typedef struct CompressorState CompressorState;
|
|
+
|
|
+extern CompressorState *AllocateCompressor(int compression, WriteFunc writeF);
|
|
+extern void ReadDataFromArchive(ArchiveHandle *AH, int compression,
|
|
+ ReadFunc readF);
|
|
+extern void WriteDataToArchive(ArchiveHandle *AH, CompressorState *cs,
|
|
+ const void *data, size_t dLen);
|
|
+extern void EndCompressor(ArchiveHandle *AH, CompressorState *cs);
|
|
+
|
|
+
|
|
+typedef struct cfp cfp;
|
|
+
|
|
+extern cfp *cfopen(const char *path, const char *mode, int compression);
|
|
+extern cfp *cfopen_read(const char *path, const char *mode);
|
|
+extern cfp *cfopen_write(const char *path, const char *mode, int compression);
|
|
+extern int cfread(void *ptr, int size, cfp *fp);
|
|
+extern int cfwrite(const void *ptr, int size, cfp *fp);
|
|
+extern int cfgetc(cfp *fp);
|
|
+extern char *cfgets(cfp *fp, char *buf, int len);
|
|
+extern int cfclose(cfp *fp);
|
|
+extern int cfeof(cfp *fp);
|
|
+
|
|
+#endif
|
|
--- /dev/null
|
|
+++ pglogical-2.2.2/pglogical_dump/dumputils.c
|
|
@@ -0,0 +1,1244 @@
|
|
+/*-------------------------------------------------------------------------
|
|
+ *
|
|
+ * Utility routines for SQL dumping
|
|
+ * Basically this is stuff that is useful in both pg_dump and pg_dumpall.
|
|
+ * Lately it's also being used by psql and bin/scripts/ ...
|
|
+ *
|
|
+ *
|
|
+ * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
|
|
+ * Portions Copyright (c) 1994, Regents of the University of California
|
|
+ *
|
|
+ * src/bin/pg_dump/dumputils.c
|
|
+ *
|
|
+ *-------------------------------------------------------------------------
|
|
+ */
|
|
+#include "postgres_fe.h"
|
|
+
|
|
+#include <ctype.h>
|
|
+
|
|
+#include "dumputils.h"
|
|
+
|
|
+#include "parser/keywords.h"
|
|
+
|
|
+
|
|
+/* Globals from keywords.c */
|
|
+extern const ScanKeyword FEScanKeywords[];
|
|
+extern const int NumFEScanKeywords;
|
|
+
|
|
+#define supports_grant_options(version) ((version) >= 70400)
|
|
+
|
|
+static bool parseAclItem(const char *item, const char *type,
|
|
+ const char *name, const char *subname, int remoteVersion,
|
|
+ PQExpBuffer grantee, PQExpBuffer grantor,
|
|
+ PQExpBuffer privs, PQExpBuffer privswgo);
|
|
+static char *copyAclUserName(PQExpBuffer output, char *input);
|
|
+static void AddAcl(PQExpBuffer aclbuf, const char *keyword,
|
|
+ const char *subname);
|
|
+static PQExpBuffer defaultGetLocalPQExpBuffer(void);
|
|
+
|
|
+/* Globals exported by this file */
|
|
+int quote_all_identifiers = 0;
|
|
+PQExpBuffer (*getLocalPQExpBuffer) (void) = defaultGetLocalPQExpBuffer;
|
|
+
|
|
+/*
|
|
+ * Returns a temporary PQExpBuffer, valid until the next call to the function.
|
|
+ * This is used by fmtId and fmtQualifiedId.
|
|
+ *
|
|
+ * Non-reentrant and non-thread-safe but reduces memory leakage. You can
|
|
+ * replace this with a custom version by setting the getLocalPQExpBuffer
|
|
+ * function pointer.
|
|
+ */
|
|
+static PQExpBuffer
|
|
+defaultGetLocalPQExpBuffer(void)
|
|
+{
|
|
+ static PQExpBuffer id_return = NULL;
|
|
+
|
|
+ if (id_return) /* first time through? */
|
|
+ {
|
|
+ /* same buffer, just wipe contents */
|
|
+ resetPQExpBuffer(id_return);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ /* new buffer */
|
|
+ id_return = createPQExpBuffer();
|
|
+ }
|
|
+
|
|
+ return id_return;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Quotes input string if it's not a legitimate SQL identifier as-is.
|
|
+ *
|
|
+ * Note that the returned string must be used before calling fmtId again,
|
|
+ * since we re-use the same return buffer each time.
|
|
+ */
|
|
+const char *
|
|
+fmtId(const char *rawid)
|
|
+{
|
|
+ PQExpBuffer id_return = getLocalPQExpBuffer();
|
|
+
|
|
+ const char *cp;
|
|
+ bool need_quotes = false;
|
|
+
|
|
+ /*
|
|
+ * These checks need to match the identifier production in scan.l. Don't
|
|
+ * use islower() etc.
|
|
+ */
|
|
+ if (quote_all_identifiers)
|
|
+ need_quotes = true;
|
|
+ /* slightly different rules for first character */
|
|
+ else if (!((rawid[0] >= 'a' && rawid[0] <= 'z') || rawid[0] == '_'))
|
|
+ need_quotes = true;
|
|
+ else
|
|
+ {
|
|
+ /* otherwise check the entire string */
|
|
+ for (cp = rawid; *cp; cp++)
|
|
+ {
|
|
+ if (!((*cp >= 'a' && *cp <= 'z')
|
|
+ || (*cp >= '0' && *cp <= '9')
|
|
+ || (*cp == '_')))
|
|
+ {
|
|
+ need_quotes = true;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (!need_quotes)
|
|
+ {
|
|
+ /*
|
|
+ * Check for keyword. We quote keywords except for unreserved ones.
|
|
+ * (In some cases we could avoid quoting a col_name or type_func_name
|
|
+ * keyword, but it seems much harder than it's worth to tell that.)
|
|
+ *
|
|
+ * Note: ScanKeywordLookup() does case-insensitive comparison, but
|
|
+ * that's fine, since we already know we have all-lower-case.
|
|
+ */
|
|
+ const ScanKeyword *keyword = ScanKeywordLookup(rawid,
|
|
+ FEScanKeywords,
|
|
+ NumFEScanKeywords);
|
|
+
|
|
+ if (keyword != NULL && keyword->category != UNRESERVED_KEYWORD)
|
|
+ need_quotes = true;
|
|
+ }
|
|
+
|
|
+ if (!need_quotes)
|
|
+ {
|
|
+ /* no quoting needed */
|
|
+ appendPQExpBufferStr(id_return, rawid);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ appendPQExpBufferChar(id_return, '\"');
|
|
+ for (cp = rawid; *cp; cp++)
|
|
+ {
|
|
+ /*
|
|
+ * Did we find a double-quote in the string? Then make this a
|
|
+ * double double-quote per SQL99. Before, we put in a
|
|
+ * backslash/double-quote pair. - thomas 2000-08-05
|
|
+ */
|
|
+ if (*cp == '\"')
|
|
+ appendPQExpBufferChar(id_return, '\"');
|
|
+ appendPQExpBufferChar(id_return, *cp);
|
|
+ }
|
|
+ appendPQExpBufferChar(id_return, '\"');
|
|
+ }
|
|
+
|
|
+ return id_return->data;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * fmtQualifiedId - convert a qualified name to the proper format for
|
|
+ * the source database.
|
|
+ *
|
|
+ * Like fmtId, use the result before calling again.
|
|
+ *
|
|
+ * Since we call fmtId and it also uses getThreadLocalPQExpBuffer() we cannot
|
|
+ * use it until we're finished with calling fmtId().
|
|
+ */
|
|
+const char *
|
|
+fmtQualifiedId(int remoteVersion, const char *schema, const char *id)
|
|
+{
|
|
+ PQExpBuffer id_return;
|
|
+ PQExpBuffer lcl_pqexp = createPQExpBuffer();
|
|
+
|
|
+ /* Suppress schema name if fetching from pre-7.3 DB */
|
|
+ if (remoteVersion >= 70300 && schema && *schema)
|
|
+ {
|
|
+ appendPQExpBuffer(lcl_pqexp, "%s.", fmtId(schema));
|
|
+ }
|
|
+ appendPQExpBufferStr(lcl_pqexp, fmtId(id));
|
|
+
|
|
+ id_return = getLocalPQExpBuffer();
|
|
+
|
|
+ appendPQExpBufferStr(id_return, lcl_pqexp->data);
|
|
+ destroyPQExpBuffer(lcl_pqexp);
|
|
+
|
|
+ return id_return->data;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Convert a string value to an SQL string literal and append it to
|
|
+ * the given buffer. We assume the specified client_encoding and
|
|
+ * standard_conforming_strings settings.
|
|
+ *
|
|
+ * This is essentially equivalent to libpq's PQescapeStringInternal,
|
|
+ * except for the output buffer structure. We need it in situations
|
|
+ * where we do not have a PGconn available. Where we do,
|
|
+ * appendStringLiteralConn is a better choice.
|
|
+ */
|
|
+void
|
|
+appendStringLiteral(PQExpBuffer buf, const char *str,
|
|
+ int encoding, bool std_strings)
|
|
+{
|
|
+ size_t length = strlen(str);
|
|
+ const char *source = str;
|
|
+ char *target;
|
|
+
|
|
+ if (!enlargePQExpBuffer(buf, 2 * length + 2))
|
|
+ return;
|
|
+
|
|
+ target = buf->data + buf->len;
|
|
+ *target++ = '\'';
|
|
+
|
|
+ while (*source != '\0')
|
|
+ {
|
|
+ char c = *source;
|
|
+ int len;
|
|
+ int i;
|
|
+
|
|
+ /* Fast path for plain ASCII */
|
|
+ if (!IS_HIGHBIT_SET(c))
|
|
+ {
|
|
+ /* Apply quoting if needed */
|
|
+ if (SQL_STR_DOUBLE(c, !std_strings))
|
|
+ *target++ = c;
|
|
+ /* Copy the character */
|
|
+ *target++ = c;
|
|
+ source++;
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ /* Slow path for possible multibyte characters */
|
|
+ len = PQmblen(source, encoding);
|
|
+
|
|
+ /* Copy the character */
|
|
+ for (i = 0; i < len; i++)
|
|
+ {
|
|
+ if (*source == '\0')
|
|
+ break;
|
|
+ *target++ = *source++;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * If we hit premature end of string (ie, incomplete multibyte
|
|
+ * character), try to pad out to the correct length with spaces. We
|
|
+ * may not be able to pad completely, but we will always be able to
|
|
+ * insert at least one pad space (since we'd not have quoted a
|
|
+ * multibyte character). This should be enough to make a string that
|
|
+ * the server will error out on.
|
|
+ */
|
|
+ if (i < len)
|
|
+ {
|
|
+ char *stop = buf->data + buf->maxlen - 2;
|
|
+
|
|
+ for (; i < len; i++)
|
|
+ {
|
|
+ if (target >= stop)
|
|
+ break;
|
|
+ *target++ = ' ';
|
|
+ }
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* Write the terminating quote and NUL character. */
|
|
+ *target++ = '\'';
|
|
+ *target = '\0';
|
|
+
|
|
+ buf->len = target - buf->data;
|
|
+}
|
|
+
|
|
+
|
|
+/*
|
|
+ * Convert a string value to an SQL string literal and append it to
|
|
+ * the given buffer. Encoding and string syntax rules are as indicated
|
|
+ * by current settings of the PGconn.
|
|
+ */
|
|
+void
|
|
+appendStringLiteralConn(PQExpBuffer buf, const char *str, PGconn *conn)
|
|
+{
|
|
+ size_t length = strlen(str);
|
|
+
|
|
+ /*
|
|
+ * XXX This is a kluge to silence escape_string_warning in our utility
|
|
+ * programs. It should go away someday.
|
|
+ */
|
|
+ if (strchr(str, '\\') != NULL && PQserverVersion(conn) >= 80100)
|
|
+ {
|
|
+ /* ensure we are not adjacent to an identifier */
|
|
+ if (buf->len > 0 && buf->data[buf->len - 1] != ' ')
|
|
+ appendPQExpBufferChar(buf, ' ');
|
|
+ appendPQExpBufferChar(buf, ESCAPE_STRING_SYNTAX);
|
|
+ appendStringLiteral(buf, str, PQclientEncoding(conn), false);
|
|
+ return;
|
|
+ }
|
|
+ /* XXX end kluge */
|
|
+
|
|
+ if (!enlargePQExpBuffer(buf, 2 * length + 2))
|
|
+ return;
|
|
+ appendPQExpBufferChar(buf, '\'');
|
|
+ buf->len += PQescapeStringConn(conn, buf->data + buf->len,
|
|
+ str, length, NULL);
|
|
+ appendPQExpBufferChar(buf, '\'');
|
|
+}
|
|
+
|
|
+
|
|
+/*
|
|
+ * Convert a string value to a dollar quoted literal and append it to
|
|
+ * the given buffer. If the dqprefix parameter is not NULL then the
|
|
+ * dollar quote delimiter will begin with that (after the opening $).
|
|
+ *
|
|
+ * No escaping is done at all on str, in compliance with the rules
|
|
+ * for parsing dollar quoted strings. Also, we need not worry about
|
|
+ * encoding issues.
|
|
+ */
|
|
+void
|
|
+appendStringLiteralDQ(PQExpBuffer buf, const char *str, const char *dqprefix)
|
|
+{
|
|
+ static const char suffixes[] = "_XXXXXXX";
|
|
+ int nextchar = 0;
|
|
+ PQExpBuffer delimBuf = createPQExpBuffer();
|
|
+
|
|
+ /* start with $ + dqprefix if not NULL */
|
|
+ appendPQExpBufferChar(delimBuf, '$');
|
|
+ if (dqprefix)
|
|
+ appendPQExpBufferStr(delimBuf, dqprefix);
|
|
+
|
|
+ /*
|
|
+ * Make sure we choose a delimiter which (without the trailing $) is not
|
|
+ * present in the string being quoted. We don't check with the trailing $
|
|
+ * because a string ending in $foo must not be quoted with $foo$.
|
|
+ */
|
|
+ while (strstr(str, delimBuf->data) != NULL)
|
|
+ {
|
|
+ appendPQExpBufferChar(delimBuf, suffixes[nextchar++]);
|
|
+ nextchar %= sizeof(suffixes) - 1;
|
|
+ }
|
|
+
|
|
+ /* add trailing $ */
|
|
+ appendPQExpBufferChar(delimBuf, '$');
|
|
+
|
|
+ /* quote it and we are all done */
|
|
+ appendPQExpBufferStr(buf, delimBuf->data);
|
|
+ appendPQExpBufferStr(buf, str);
|
|
+ appendPQExpBufferStr(buf, delimBuf->data);
|
|
+
|
|
+ destroyPQExpBuffer(delimBuf);
|
|
+}
|
|
+
|
|
+
|
|
+/*
|
|
+ * Convert a bytea value (presented as raw bytes) to an SQL string literal
|
|
+ * and append it to the given buffer. We assume the specified
|
|
+ * standard_conforming_strings setting.
|
|
+ *
|
|
+ * This is needed in situations where we do not have a PGconn available.
|
|
+ * Where we do, PQescapeByteaConn is a better choice.
|
|
+ */
|
|
+void
|
|
+appendByteaLiteral(PQExpBuffer buf, const unsigned char *str, size_t length,
|
|
+ bool std_strings)
|
|
+{
|
|
+ const unsigned char *source = str;
|
|
+ char *target;
|
|
+
|
|
+ static const char hextbl[] = "0123456789abcdef";
|
|
+
|
|
+ /*
|
|
+ * This implementation is hard-wired to produce hex-format output. We do
|
|
+ * not know the server version the output will be loaded into, so making
|
|
+ * an intelligent format choice is impossible. It might be better to
|
|
+ * always use the old escaped format.
|
|
+ */
|
|
+ if (!enlargePQExpBuffer(buf, 2 * length + 5))
|
|
+ return;
|
|
+
|
|
+ target = buf->data + buf->len;
|
|
+ *target++ = '\'';
|
|
+ if (!std_strings)
|
|
+ *target++ = '\\';
|
|
+ *target++ = '\\';
|
|
+ *target++ = 'x';
|
|
+
|
|
+ while (length-- > 0)
|
|
+ {
|
|
+ unsigned char c = *source++;
|
|
+
|
|
+ *target++ = hextbl[(c >> 4) & 0xF];
|
|
+ *target++ = hextbl[c & 0xF];
|
|
+ }
|
|
+
|
|
+ /* Write the terminating quote and NUL character. */
|
|
+ *target++ = '\'';
|
|
+ *target = '\0';
|
|
+
|
|
+ buf->len = target - buf->data;
|
|
+}
|
|
+
|
|
+
|
|
+/*
|
|
+ * Deconstruct the text representation of a 1-dimensional Postgres array
|
|
+ * into individual items.
|
|
+ *
|
|
+ * On success, returns true and sets *itemarray and *nitems to describe
|
|
+ * an array of individual strings. On parse failure, returns false;
|
|
+ * *itemarray may exist or be NULL.
|
|
+ *
|
|
+ * NOTE: free'ing itemarray is sufficient to deallocate the working storage.
|
|
+ */
|
|
+bool
|
|
+parsePGArray(const char *atext, char ***itemarray, int *nitems)
|
|
+{
|
|
+ int inputlen;
|
|
+ char **items;
|
|
+ char *strings;
|
|
+ int curitem;
|
|
+
|
|
+ /*
|
|
+ * We expect input in the form of "{item,item,item}" where any item is
|
|
+ * either raw data, or surrounded by double quotes (in which case embedded
|
|
+ * characters including backslashes and quotes are backslashed).
|
|
+ *
|
|
+ * We build the result as an array of pointers followed by the actual
|
|
+ * string data, all in one malloc block for convenience of deallocation.
|
|
+ * The worst-case storage need is not more than one pointer and one
|
|
+ * character for each input character (consider "{,,,,,,,,,,}").
|
|
+ */
|
|
+ *itemarray = NULL;
|
|
+ *nitems = 0;
|
|
+ inputlen = strlen(atext);
|
|
+ if (inputlen < 2 || atext[0] != '{' || atext[inputlen - 1] != '}')
|
|
+ return false; /* bad input */
|
|
+ items = (char **) malloc(inputlen * (sizeof(char *) + sizeof(char)));
|
|
+ if (items == NULL)
|
|
+ return false; /* out of memory */
|
|
+ *itemarray = items;
|
|
+ strings = (char *) (items + inputlen);
|
|
+
|
|
+ atext++; /* advance over initial '{' */
|
|
+ curitem = 0;
|
|
+ while (*atext != '}')
|
|
+ {
|
|
+ if (*atext == '\0')
|
|
+ return false; /* premature end of string */
|
|
+ items[curitem] = strings;
|
|
+ while (*atext != '}' && *atext != ',')
|
|
+ {
|
|
+ if (*atext == '\0')
|
|
+ return false; /* premature end of string */
|
|
+ if (*atext != '"')
|
|
+ *strings++ = *atext++; /* copy unquoted data */
|
|
+ else
|
|
+ {
|
|
+ /* process quoted substring */
|
|
+ atext++;
|
|
+ while (*atext != '"')
|
|
+ {
|
|
+ if (*atext == '\0')
|
|
+ return false; /* premature end of string */
|
|
+ if (*atext == '\\')
|
|
+ {
|
|
+ atext++;
|
|
+ if (*atext == '\0')
|
|
+ return false; /* premature end of string */
|
|
+ }
|
|
+ *strings++ = *atext++; /* copy quoted data */
|
|
+ }
|
|
+ atext++;
|
|
+ }
|
|
+ }
|
|
+ *strings++ = '\0';
|
|
+ if (*atext == ',')
|
|
+ atext++;
|
|
+ curitem++;
|
|
+ }
|
|
+ if (atext[1] != '\0')
|
|
+ return false; /* bogus syntax (embedded '}') */
|
|
+ *nitems = curitem;
|
|
+ return true;
|
|
+}
|
|
+
|
|
+
|
|
+/*
|
|
+ * Build GRANT/REVOKE command(s) for an object.
|
|
+ *
|
|
+ * name: the object name, in the form to use in the commands (already quoted)
|
|
+ * subname: the sub-object name, if any (already quoted); NULL if none
|
|
+ * type: the object type (as seen in GRANT command: must be one of
|
|
+ * TABLE, SEQUENCE, FUNCTION, LANGUAGE, SCHEMA, DATABASE, TABLESPACE,
|
|
+ * FOREIGN DATA WRAPPER, SERVER, or LARGE OBJECT)
|
|
+ * acls: the ACL string fetched from the database
|
|
+ * owner: username of object owner (will be passed through fmtId); can be
|
|
+ * NULL or empty string to indicate "no owner known"
|
|
+ * prefix: string to prefix to each generated command; typically empty
|
|
+ * remoteVersion: version of database
|
|
+ *
|
|
+ * Returns TRUE if okay, FALSE if could not parse the acl string.
|
|
+ * The resulting commands (if any) are appended to the contents of 'sql'.
|
|
+ *
|
|
+ * Note: when processing a default ACL, prefix is "ALTER DEFAULT PRIVILEGES "
|
|
+ * or something similar, and name is an empty string.
|
|
+ *
|
|
+ * Note: beware of passing a fmtId() result directly as 'name' or 'subname',
|
|
+ * since this routine uses fmtId() internally.
|
|
+ */
|
|
+bool
|
|
+buildACLCommands(const char *name, const char *subname,
|
|
+ const char *type, const char *acls, const char *owner,
|
|
+ const char *prefix, int remoteVersion,
|
|
+ PQExpBuffer sql)
|
|
+{
|
|
+ bool ok = true;
|
|
+ char **aclitems;
|
|
+ int naclitems;
|
|
+ int i;
|
|
+ PQExpBuffer grantee,
|
|
+ grantor,
|
|
+ privs,
|
|
+ privswgo;
|
|
+ PQExpBuffer firstsql,
|
|
+ secondsql;
|
|
+ bool found_owner_privs = false;
|
|
+
|
|
+ if (strlen(acls) == 0)
|
|
+ return true; /* object has default permissions */
|
|
+
|
|
+ /* treat empty-string owner same as NULL */
|
|
+ if (owner && *owner == '\0')
|
|
+ owner = NULL;
|
|
+
|
|
+ if (!parsePGArray(acls, &aclitems, &naclitems))
|
|
+ {
|
|
+ if (aclitems)
|
|
+ free(aclitems);
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ grantee = createPQExpBuffer();
|
|
+ grantor = createPQExpBuffer();
|
|
+ privs = createPQExpBuffer();
|
|
+ privswgo = createPQExpBuffer();
|
|
+
|
|
+ /*
|
|
+ * At the end, these two will be pasted together to form the result. But
|
|
+ * the owner privileges need to go before the other ones to keep the
|
|
+ * dependencies valid. In recent versions this is normally the case, but
|
|
+ * in old versions they come after the PUBLIC privileges and that results
|
|
+ * in problems if we need to run REVOKE on the owner privileges.
|
|
+ */
|
|
+ firstsql = createPQExpBuffer();
|
|
+ secondsql = createPQExpBuffer();
|
|
+
|
|
+ /*
|
|
+ * Always start with REVOKE ALL FROM PUBLIC, so that we don't have to
|
|
+ * wire-in knowledge about the default public privileges for different
|
|
+ * kinds of objects.
|
|
+ */
|
|
+ appendPQExpBuffer(firstsql, "%sREVOKE ALL", prefix);
|
|
+ if (subname)
|
|
+ appendPQExpBuffer(firstsql, "(%s)", subname);
|
|
+ appendPQExpBuffer(firstsql, " ON %s %s FROM PUBLIC;\n", type, name);
|
|
+
|
|
+ /*
|
|
+ * We still need some hacking though to cover the case where new default
|
|
+ * public privileges are added in new versions: the REVOKE ALL will revoke
|
|
+ * them, leading to behavior different from what the old version had,
|
|
+ * which is generally not what's wanted. So add back default privs if the
|
|
+ * source database is too old to have had that particular priv.
|
|
+ */
|
|
+ if (remoteVersion < 80200 && strcmp(type, "DATABASE") == 0)
|
|
+ {
|
|
+ /* database CONNECT priv didn't exist before 8.2 */
|
|
+ appendPQExpBuffer(firstsql, "%sGRANT CONNECT ON %s %s TO PUBLIC;\n",
|
|
+ prefix, type, name);
|
|
+ }
|
|
+
|
|
+ /* Scan individual ACL items */
|
|
+ for (i = 0; i < naclitems; i++)
|
|
+ {
|
|
+ if (!parseAclItem(aclitems[i], type, name, subname, remoteVersion,
|
|
+ grantee, grantor, privs, privswgo))
|
|
+ {
|
|
+ ok = false;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (grantor->len == 0 && owner)
|
|
+ printfPQExpBuffer(grantor, "%s", owner);
|
|
+
|
|
+ if (privs->len > 0 || privswgo->len > 0)
|
|
+ {
|
|
+ if (owner
|
|
+ && strcmp(grantee->data, owner) == 0
|
|
+ && strcmp(grantor->data, owner) == 0)
|
|
+ {
|
|
+ found_owner_privs = true;
|
|
+
|
|
+ /*
|
|
+ * For the owner, the default privilege level is ALL WITH
|
|
+ * GRANT OPTION (only ALL prior to 7.4).
|
|
+ */
|
|
+ if (supports_grant_options(remoteVersion)
|
|
+ ? strcmp(privswgo->data, "ALL") != 0
|
|
+ : strcmp(privs->data, "ALL") != 0)
|
|
+ {
|
|
+ appendPQExpBuffer(firstsql, "%sREVOKE ALL", prefix);
|
|
+ if (subname)
|
|
+ appendPQExpBuffer(firstsql, "(%s)", subname);
|
|
+ appendPQExpBuffer(firstsql, " ON %s %s FROM %s;\n",
|
|
+ type, name, fmtId(grantee->data));
|
|
+ if (privs->len > 0)
|
|
+ appendPQExpBuffer(firstsql,
|
|
+ "%sGRANT %s ON %s %s TO %s;\n",
|
|
+ prefix, privs->data, type, name,
|
|
+ fmtId(grantee->data));
|
|
+ if (privswgo->len > 0)
|
|
+ appendPQExpBuffer(firstsql,
|
|
+ "%sGRANT %s ON %s %s TO %s WITH GRANT OPTION;\n",
|
|
+ prefix, privswgo->data, type, name,
|
|
+ fmtId(grantee->data));
|
|
+ }
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ /*
|
|
+ * Otherwise can assume we are starting from no privs.
|
|
+ */
|
|
+ if (grantor->len > 0
|
|
+ && (!owner || strcmp(owner, grantor->data) != 0))
|
|
+ appendPQExpBuffer(secondsql, "SET SESSION AUTHORIZATION %s;\n",
|
|
+ fmtId(grantor->data));
|
|
+
|
|
+ if (privs->len > 0)
|
|
+ {
|
|
+ appendPQExpBuffer(secondsql, "%sGRANT %s ON %s %s TO ",
|
|
+ prefix, privs->data, type, name);
|
|
+ if (grantee->len == 0)
|
|
+ appendPQExpBufferStr(secondsql, "PUBLIC;\n");
|
|
+ else if (strncmp(grantee->data, "group ",
|
|
+ strlen("group ")) == 0)
|
|
+ appendPQExpBuffer(secondsql, "GROUP %s;\n",
|
|
+ fmtId(grantee->data + strlen("group ")));
|
|
+ else
|
|
+ appendPQExpBuffer(secondsql, "%s;\n", fmtId(grantee->data));
|
|
+ }
|
|
+ if (privswgo->len > 0)
|
|
+ {
|
|
+ appendPQExpBuffer(secondsql, "%sGRANT %s ON %s %s TO ",
|
|
+ prefix, privswgo->data, type, name);
|
|
+ if (grantee->len == 0)
|
|
+ appendPQExpBufferStr(secondsql, "PUBLIC");
|
|
+ else if (strncmp(grantee->data, "group ",
|
|
+ strlen("group ")) == 0)
|
|
+ appendPQExpBuffer(secondsql, "GROUP %s",
|
|
+ fmtId(grantee->data + strlen("group ")));
|
|
+ else
|
|
+ appendPQExpBufferStr(secondsql, fmtId(grantee->data));
|
|
+ appendPQExpBufferStr(secondsql, " WITH GRANT OPTION;\n");
|
|
+ }
|
|
+
|
|
+ if (grantor->len > 0
|
|
+ && (!owner || strcmp(owner, grantor->data) != 0))
|
|
+ appendPQExpBufferStr(secondsql, "RESET SESSION AUTHORIZATION;\n");
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * If we didn't find any owner privs, the owner must have revoked 'em all
|
|
+ */
|
|
+ if (!found_owner_privs && owner)
|
|
+ {
|
|
+ appendPQExpBuffer(firstsql, "%sREVOKE ALL", prefix);
|
|
+ if (subname)
|
|
+ appendPQExpBuffer(firstsql, "(%s)", subname);
|
|
+ appendPQExpBuffer(firstsql, " ON %s %s FROM %s;\n",
|
|
+ type, name, fmtId(owner));
|
|
+ }
|
|
+
|
|
+ destroyPQExpBuffer(grantee);
|
|
+ destroyPQExpBuffer(grantor);
|
|
+ destroyPQExpBuffer(privs);
|
|
+ destroyPQExpBuffer(privswgo);
|
|
+
|
|
+ appendPQExpBuffer(sql, "%s%s", firstsql->data, secondsql->data);
|
|
+ destroyPQExpBuffer(firstsql);
|
|
+ destroyPQExpBuffer(secondsql);
|
|
+
|
|
+ free(aclitems);
|
|
+
|
|
+ return ok;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Build ALTER DEFAULT PRIVILEGES command(s) for single pg_default_acl entry.
|
|
+ *
|
|
+ * type: the object type (TABLES, FUNCTIONS, etc)
|
|
+ * nspname: schema name, or NULL for global default privileges
|
|
+ * acls: the ACL string fetched from the database
|
|
+ * owner: username of privileges owner (will be passed through fmtId)
|
|
+ * remoteVersion: version of database
|
|
+ *
|
|
+ * Returns TRUE if okay, FALSE if could not parse the acl string.
|
|
+ * The resulting commands (if any) are appended to the contents of 'sql'.
|
|
+ */
|
|
+bool
|
|
+buildDefaultACLCommands(const char *type, const char *nspname,
|
|
+ const char *acls, const char *owner,
|
|
+ int remoteVersion,
|
|
+ PQExpBuffer sql)
|
|
+{
|
|
+ bool result;
|
|
+ PQExpBuffer prefix;
|
|
+
|
|
+ prefix = createPQExpBuffer();
|
|
+
|
|
+ /*
|
|
+ * We incorporate the target role directly into the command, rather than
|
|
+ * playing around with SET ROLE or anything like that. This is so that a
|
|
+ * permissions error leads to nothing happening, rather than changing
|
|
+ * default privileges for the wrong user.
|
|
+ */
|
|
+ appendPQExpBuffer(prefix, "ALTER DEFAULT PRIVILEGES FOR ROLE %s ",
|
|
+ fmtId(owner));
|
|
+ if (nspname)
|
|
+ appendPQExpBuffer(prefix, "IN SCHEMA %s ", fmtId(nspname));
|
|
+
|
|
+ result = buildACLCommands("", NULL,
|
|
+ type, acls, owner,
|
|
+ prefix->data, remoteVersion,
|
|
+ sql);
|
|
+
|
|
+ destroyPQExpBuffer(prefix);
|
|
+
|
|
+ return result;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * This will parse an aclitem string, having the general form
|
|
+ * username=privilegecodes/grantor
|
|
+ * or
|
|
+ * group groupname=privilegecodes/grantor
|
|
+ * (the /grantor part will not be present if pre-7.4 database).
|
|
+ *
|
|
+ * The returned grantee string will be the dequoted username or groupname
|
|
+ * (preceded with "group " in the latter case). The returned grantor is
|
|
+ * the dequoted grantor name or empty. Privilege characters are decoded
|
|
+ * and split between privileges with grant option (privswgo) and without
|
|
+ * (privs).
|
|
+ *
|
|
+ * Note: for cross-version compatibility, it's important to use ALL when
|
|
+ * appropriate.
|
|
+ */
|
|
+static bool
|
|
+parseAclItem(const char *item, const char *type,
|
|
+ const char *name, const char *subname, int remoteVersion,
|
|
+ PQExpBuffer grantee, PQExpBuffer grantor,
|
|
+ PQExpBuffer privs, PQExpBuffer privswgo)
|
|
+{
|
|
+ char *buf;
|
|
+ bool all_with_go = true;
|
|
+ bool all_without_go = true;
|
|
+ char *eqpos;
|
|
+ char *slpos;
|
|
+ char *pos;
|
|
+
|
|
+ buf = strdup(item);
|
|
+ if (!buf)
|
|
+ return false;
|
|
+
|
|
+ /* user or group name is string up to = */
|
|
+ eqpos = copyAclUserName(grantee, buf);
|
|
+ if (*eqpos != '=')
|
|
+ {
|
|
+ free(buf);
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ /* grantor may be listed after / */
|
|
+ slpos = strchr(eqpos + 1, '/');
|
|
+ if (slpos)
|
|
+ {
|
|
+ *slpos++ = '\0';
|
|
+ slpos = copyAclUserName(grantor, slpos);
|
|
+ if (*slpos != '\0')
|
|
+ {
|
|
+ free(buf);
|
|
+ return false;
|
|
+ }
|
|
+ }
|
|
+ else
|
|
+ resetPQExpBuffer(grantor);
|
|
+
|
|
+ /* privilege codes */
|
|
+#define CONVERT_PRIV(code, keywd) \
|
|
+do { \
|
|
+ if ((pos = strchr(eqpos + 1, code))) \
|
|
+ { \
|
|
+ if (*(pos + 1) == '*') \
|
|
+ { \
|
|
+ AddAcl(privswgo, keywd, subname); \
|
|
+ all_without_go = false; \
|
|
+ } \
|
|
+ else \
|
|
+ { \
|
|
+ AddAcl(privs, keywd, subname); \
|
|
+ all_with_go = false; \
|
|
+ } \
|
|
+ } \
|
|
+ else \
|
|
+ all_with_go = all_without_go = false; \
|
|
+} while (0)
|
|
+
|
|
+ resetPQExpBuffer(privs);
|
|
+ resetPQExpBuffer(privswgo);
|
|
+
|
|
+ if (strcmp(type, "TABLE") == 0 || strcmp(type, "SEQUENCE") == 0 ||
|
|
+ strcmp(type, "TABLES") == 0 || strcmp(type, "SEQUENCES") == 0)
|
|
+ {
|
|
+ CONVERT_PRIV('r', "SELECT");
|
|
+
|
|
+ if (strcmp(type, "SEQUENCE") == 0 ||
|
|
+ strcmp(type, "SEQUENCES") == 0)
|
|
+ /* sequence only */
|
|
+ CONVERT_PRIV('U', "USAGE");
|
|
+ else
|
|
+ {
|
|
+ /* table only */
|
|
+ CONVERT_PRIV('a', "INSERT");
|
|
+ if (remoteVersion >= 70200)
|
|
+ CONVERT_PRIV('x', "REFERENCES");
|
|
+ /* rest are not applicable to columns */
|
|
+ if (subname == NULL)
|
|
+ {
|
|
+ if (remoteVersion >= 70200)
|
|
+ {
|
|
+ CONVERT_PRIV('d', "DELETE");
|
|
+ CONVERT_PRIV('t', "TRIGGER");
|
|
+ }
|
|
+ if (remoteVersion >= 80400)
|
|
+ CONVERT_PRIV('D', "TRUNCATE");
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* UPDATE */
|
|
+ if (remoteVersion >= 70200 ||
|
|
+ strcmp(type, "SEQUENCE") == 0 ||
|
|
+ strcmp(type, "SEQUENCES") == 0)
|
|
+ CONVERT_PRIV('w', "UPDATE");
|
|
+ else
|
|
+ /* 7.0 and 7.1 have a simpler worldview */
|
|
+ CONVERT_PRIV('w', "UPDATE,DELETE");
|
|
+ }
|
|
+ else if (strcmp(type, "FUNCTION") == 0 ||
|
|
+ strcmp(type, "FUNCTIONS") == 0)
|
|
+ CONVERT_PRIV('X', "EXECUTE");
|
|
+ else if (strcmp(type, "LANGUAGE") == 0)
|
|
+ CONVERT_PRIV('U', "USAGE");
|
|
+ else if (strcmp(type, "SCHEMA") == 0)
|
|
+ {
|
|
+ CONVERT_PRIV('C', "CREATE");
|
|
+ CONVERT_PRIV('U', "USAGE");
|
|
+ }
|
|
+ else if (strcmp(type, "DATABASE") == 0)
|
|
+ {
|
|
+ CONVERT_PRIV('C', "CREATE");
|
|
+ CONVERT_PRIV('c', "CONNECT");
|
|
+ CONVERT_PRIV('T', "TEMPORARY");
|
|
+ }
|
|
+ else if (strcmp(type, "TABLESPACE") == 0)
|
|
+ CONVERT_PRIV('C', "CREATE");
|
|
+ else if (strcmp(type, "TYPE") == 0 ||
|
|
+ strcmp(type, "TYPES") == 0)
|
|
+ CONVERT_PRIV('U', "USAGE");
|
|
+ else if (strcmp(type, "FOREIGN DATA WRAPPER") == 0)
|
|
+ CONVERT_PRIV('U', "USAGE");
|
|
+ else if (strcmp(type, "FOREIGN SERVER") == 0)
|
|
+ CONVERT_PRIV('U', "USAGE");
|
|
+ else if (strcmp(type, "FOREIGN TABLE") == 0)
|
|
+ CONVERT_PRIV('r', "SELECT");
|
|
+ else if (strcmp(type, "LARGE OBJECT") == 0)
|
|
+ {
|
|
+ CONVERT_PRIV('r', "SELECT");
|
|
+ CONVERT_PRIV('w', "UPDATE");
|
|
+ }
|
|
+ else
|
|
+ abort();
|
|
+
|
|
+#undef CONVERT_PRIV
|
|
+
|
|
+ if (all_with_go)
|
|
+ {
|
|
+ resetPQExpBuffer(privs);
|
|
+ printfPQExpBuffer(privswgo, "ALL");
|
|
+ if (subname)
|
|
+ appendPQExpBuffer(privswgo, "(%s)", subname);
|
|
+ }
|
|
+ else if (all_without_go)
|
|
+ {
|
|
+ resetPQExpBuffer(privswgo);
|
|
+ printfPQExpBuffer(privs, "ALL");
|
|
+ if (subname)
|
|
+ appendPQExpBuffer(privs, "(%s)", subname);
|
|
+ }
|
|
+
|
|
+ free(buf);
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Transfer a user or group name starting at *input into the output buffer,
|
|
+ * dequoting if needed. Returns a pointer to just past the input name.
|
|
+ * The name is taken to end at an unquoted '=' or end of string.
|
|
+ */
|
|
+static char *
|
|
+copyAclUserName(PQExpBuffer output, char *input)
|
|
+{
|
|
+ resetPQExpBuffer(output);
|
|
+
|
|
+ while (*input && *input != '=')
|
|
+ {
|
|
+ /*
|
|
+ * If user name isn't quoted, then just add it to the output buffer
|
|
+ */
|
|
+ if (*input != '"')
|
|
+ appendPQExpBufferChar(output, *input++);
|
|
+ else
|
|
+ {
|
|
+ /* Otherwise, it's a quoted username */
|
|
+ input++;
|
|
+ /* Loop until we come across an unescaped quote */
|
|
+ while (!(*input == '"' && *(input + 1) != '"'))
|
|
+ {
|
|
+ if (*input == '\0')
|
|
+ return input; /* really a syntax error... */
|
|
+
|
|
+ /*
|
|
+ * Quoting convention is to escape " as "". Keep this code in
|
|
+ * sync with putid() in backend's acl.c.
|
|
+ */
|
|
+ if (*input == '"' && *(input + 1) == '"')
|
|
+ input++;
|
|
+ appendPQExpBufferChar(output, *input++);
|
|
+ }
|
|
+ input++;
|
|
+ }
|
|
+ }
|
|
+ return input;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Append a privilege keyword to a keyword list, inserting comma if needed.
|
|
+ */
|
|
+static void
|
|
+AddAcl(PQExpBuffer aclbuf, const char *keyword, const char *subname)
|
|
+{
|
|
+ if (aclbuf->len > 0)
|
|
+ appendPQExpBufferChar(aclbuf, ',');
|
|
+ appendPQExpBufferStr(aclbuf, keyword);
|
|
+ if (subname)
|
|
+ appendPQExpBuffer(aclbuf, "(%s)", subname);
|
|
+}
|
|
+
|
|
+
|
|
+/*
|
|
+ * processSQLNamePattern
|
|
+ *
|
|
+ * Scan a wildcard-pattern string and generate appropriate WHERE clauses
|
|
+ * to limit the set of objects returned. The WHERE clauses are appended
|
|
+ * to the already-partially-constructed query in buf. Returns whether
|
|
+ * any clause was added.
|
|
+ *
|
|
+ * conn: connection query will be sent to (consulted for escaping rules).
|
|
+ * buf: output parameter.
|
|
+ * pattern: user-specified pattern option, or NULL if none ("*" is implied).
|
|
+ * have_where: true if caller already emitted "WHERE" (clauses will be ANDed
|
|
+ * onto the existing WHERE clause).
|
|
+ * force_escape: always quote regexp special characters, even outside
|
|
+ * double quotes (else they are quoted only between double quotes).
|
|
+ * schemavar: name of query variable to match against a schema-name pattern.
|
|
+ * Can be NULL if no schema.
|
|
+ * namevar: name of query variable to match against an object-name pattern.
|
|
+ * altnamevar: NULL, or name of an alternative variable to match against name.
|
|
+ * visibilityrule: clause to use if we want to restrict to visible objects
|
|
+ * (for example, "pg_catalog.pg_table_is_visible(p.oid)"). Can be NULL.
|
|
+ *
|
|
+ * Formatting note: the text already present in buf should end with a newline.
|
|
+ * The appended text, if any, will end with one too.
|
|
+ */
|
|
+bool
|
|
+processSQLNamePattern(PGconn *conn, PQExpBuffer buf, const char *pattern,
|
|
+ bool have_where, bool force_escape,
|
|
+ const char *schemavar, const char *namevar,
|
|
+ const char *altnamevar, const char *visibilityrule)
|
|
+{
|
|
+ PQExpBufferData schemabuf;
|
|
+ PQExpBufferData namebuf;
|
|
+ int encoding = PQclientEncoding(conn);
|
|
+ bool inquotes;
|
|
+ const char *cp;
|
|
+ int i;
|
|
+ bool added_clause = false;
|
|
+
|
|
+#define WHEREAND() \
|
|
+ (appendPQExpBufferStr(buf, have_where ? " AND " : "WHERE "), \
|
|
+ have_where = true, added_clause = true)
|
|
+
|
|
+ if (pattern == NULL)
|
|
+ {
|
|
+ /* Default: select all visible objects */
|
|
+ if (visibilityrule)
|
|
+ {
|
|
+ WHEREAND();
|
|
+ appendPQExpBuffer(buf, "%s\n", visibilityrule);
|
|
+ }
|
|
+ return added_clause;
|
|
+ }
|
|
+
|
|
+ initPQExpBuffer(&schemabuf);
|
|
+ initPQExpBuffer(&namebuf);
|
|
+
|
|
+ /*
|
|
+ * Parse the pattern, converting quotes and lower-casing unquoted letters.
|
|
+ * Also, adjust shell-style wildcard characters into regexp notation.
|
|
+ *
|
|
+ * We surround the pattern with "^(...)$" to force it to match the whole
|
|
+ * string, as per SQL practice. We have to have parens in case the string
|
|
+ * contains "|", else the "^" and "$" will be bound into the first and
|
|
+ * last alternatives which is not what we want.
|
|
+ *
|
|
+ * Note: the result of this pass is the actual regexp pattern(s) we want
|
|
+ * to execute. Quoting/escaping into SQL literal format will be done
|
|
+ * below using appendStringLiteralConn().
|
|
+ */
|
|
+ appendPQExpBufferStr(&namebuf, "^(");
|
|
+
|
|
+ inquotes = false;
|
|
+ cp = pattern;
|
|
+
|
|
+ while (*cp)
|
|
+ {
|
|
+ char ch = *cp;
|
|
+
|
|
+ if (ch == '"')
|
|
+ {
|
|
+ if (inquotes && cp[1] == '"')
|
|
+ {
|
|
+ /* emit one quote, stay in inquotes mode */
|
|
+ appendPQExpBufferChar(&namebuf, '"');
|
|
+ cp++;
|
|
+ }
|
|
+ else
|
|
+ inquotes = !inquotes;
|
|
+ cp++;
|
|
+ }
|
|
+ else if (!inquotes && isupper((unsigned char) ch))
|
|
+ {
|
|
+ appendPQExpBufferChar(&namebuf,
|
|
+ pg_tolower((unsigned char) ch));
|
|
+ cp++;
|
|
+ }
|
|
+ else if (!inquotes && ch == '*')
|
|
+ {
|
|
+ appendPQExpBufferStr(&namebuf, ".*");
|
|
+ cp++;
|
|
+ }
|
|
+ else if (!inquotes && ch == '?')
|
|
+ {
|
|
+ appendPQExpBufferChar(&namebuf, '.');
|
|
+ cp++;
|
|
+ }
|
|
+ else if (!inquotes && ch == '.')
|
|
+ {
|
|
+ /* Found schema/name separator, move current pattern to schema */
|
|
+ resetPQExpBuffer(&schemabuf);
|
|
+ appendPQExpBufferStr(&schemabuf, namebuf.data);
|
|
+ resetPQExpBuffer(&namebuf);
|
|
+ appendPQExpBufferStr(&namebuf, "^(");
|
|
+ cp++;
|
|
+ }
|
|
+ else if (ch == '$')
|
|
+ {
|
|
+ /*
|
|
+ * Dollar is always quoted, whether inside quotes or not. The
|
|
+ * reason is that it's allowed in SQL identifiers, so there's a
|
|
+ * significant use-case for treating it literally, while because
|
|
+ * we anchor the pattern automatically there is no use-case for
|
|
+ * having it possess its regexp meaning.
|
|
+ */
|
|
+ appendPQExpBufferStr(&namebuf, "\\$");
|
|
+ cp++;
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ /*
|
|
+ * Ordinary data character, transfer to pattern
|
|
+ *
|
|
+ * Inside double quotes, or at all times if force_escape is true,
|
|
+ * quote regexp special characters with a backslash to avoid
|
|
+ * regexp errors. Outside quotes, however, let them pass through
|
|
+ * as-is; this lets knowledgeable users build regexp expressions
|
|
+ * that are more powerful than shell-style patterns.
|
|
+ */
|
|
+ if ((inquotes || force_escape) &&
|
|
+ strchr("|*+?()[]{}.^$\\", ch))
|
|
+ appendPQExpBufferChar(&namebuf, '\\');
|
|
+ i = PQmblen(cp, encoding);
|
|
+ while (i-- && *cp)
|
|
+ {
|
|
+ appendPQExpBufferChar(&namebuf, *cp);
|
|
+ cp++;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Now decide what we need to emit. Note there will be a leading "^(" in
|
|
+ * the patterns in any case.
|
|
+ */
|
|
+ if (namebuf.len > 2)
|
|
+ {
|
|
+ /* We have a name pattern, so constrain the namevar(s) */
|
|
+
|
|
+ appendPQExpBufferStr(&namebuf, ")$");
|
|
+ /* Optimize away a "*" pattern */
|
|
+ if (strcmp(namebuf.data, "^(.*)$") != 0)
|
|
+ {
|
|
+ WHEREAND();
|
|
+ if (altnamevar)
|
|
+ {
|
|
+ appendPQExpBuffer(buf, "(%s ~ ", namevar);
|
|
+ appendStringLiteralConn(buf, namebuf.data, conn);
|
|
+ appendPQExpBuffer(buf, "\n OR %s ~ ", altnamevar);
|
|
+ appendStringLiteralConn(buf, namebuf.data, conn);
|
|
+ appendPQExpBufferStr(buf, ")\n");
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ appendPQExpBuffer(buf, "%s ~ ", namevar);
|
|
+ appendStringLiteralConn(buf, namebuf.data, conn);
|
|
+ appendPQExpBufferChar(buf, '\n');
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (schemabuf.len > 2)
|
|
+ {
|
|
+ /* We have a schema pattern, so constrain the schemavar */
|
|
+
|
|
+ appendPQExpBufferStr(&schemabuf, ")$");
|
|
+ /* Optimize away a "*" pattern */
|
|
+ if (strcmp(schemabuf.data, "^(.*)$") != 0 && schemavar)
|
|
+ {
|
|
+ WHEREAND();
|
|
+ appendPQExpBuffer(buf, "%s ~ ", schemavar);
|
|
+ appendStringLiteralConn(buf, schemabuf.data, conn);
|
|
+ appendPQExpBufferChar(buf, '\n');
|
|
+ }
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ /* No schema pattern given, so select only visible objects */
|
|
+ if (visibilityrule)
|
|
+ {
|
|
+ WHEREAND();
|
|
+ appendPQExpBuffer(buf, "%s\n", visibilityrule);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ termPQExpBuffer(&schemabuf);
|
|
+ termPQExpBuffer(&namebuf);
|
|
+
|
|
+ return added_clause;
|
|
+#undef WHEREAND
|
|
+}
|
|
+
|
|
+/*
|
|
+ * buildShSecLabelQuery
|
|
+ *
|
|
+ * Build a query to retrieve security labels for a shared object.
|
|
+ */
|
|
+void
|
|
+buildShSecLabelQuery(PGconn *conn, const char *catalog_name, uint32 objectId,
|
|
+ PQExpBuffer sql)
|
|
+{
|
|
+ appendPQExpBuffer(sql,
|
|
+ "SELECT provider, label FROM pg_catalog.pg_shseclabel "
|
|
+ "WHERE classoid = '%s'::pg_catalog.regclass AND "
|
|
+ "objoid = %u", catalog_name, objectId);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * emitShSecLabels
|
|
+ *
|
|
+ * Format security label data retrieved by the query generated in
|
|
+ * buildShSecLabelQuery.
|
|
+ */
|
|
+void
|
|
+emitShSecLabels(PGconn *conn, PGresult *res, PQExpBuffer buffer,
|
|
+ const char *target, const char *objname)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < PQntuples(res); i++)
|
|
+ {
|
|
+ char *provider = PQgetvalue(res, i, 0);
|
|
+ char *label = PQgetvalue(res, i, 1);
|
|
+
|
|
+ /* must use fmtId result before calling it again */
|
|
+ appendPQExpBuffer(buffer,
|
|
+ "SECURITY LABEL FOR %s ON %s",
|
|
+ fmtId(provider), target);
|
|
+ appendPQExpBuffer(buffer,
|
|
+ " %s IS ",
|
|
+ fmtId(objname));
|
|
+ appendStringLiteralConn(buffer, label, conn);
|
|
+ appendPQExpBufferStr(buffer, ";\n");
|
|
+ }
|
|
+}
|
|
+
|
|
+
|
|
+void
|
|
+simple_string_list_append(SimpleStringList *list, const char *val)
|
|
+{
|
|
+ SimpleStringListCell *cell;
|
|
+
|
|
+ /* this calculation correctly accounts for the null trailing byte */
|
|
+ cell = (SimpleStringListCell *)
|
|
+ pg_malloc(sizeof(SimpleStringListCell) + strlen(val));
|
|
+
|
|
+ cell->next = NULL;
|
|
+ strcpy(cell->val, val);
|
|
+
|
|
+ if (list->tail)
|
|
+ list->tail->next = cell;
|
|
+ else
|
|
+ list->head = cell;
|
|
+ list->tail = cell;
|
|
+}
|
|
+
|
|
+bool
|
|
+simple_string_list_member(SimpleStringList *list, const char *val)
|
|
+{
|
|
+ SimpleStringListCell *cell;
|
|
+
|
|
+ for (cell = list->head; cell; cell = cell->next)
|
|
+ {
|
|
+ if (strcmp(cell->val, val) == 0)
|
|
+ return true;
|
|
+ }
|
|
+ return false;
|
|
+}
|
|
--- /dev/null
|
|
+++ pglogical-2.2.2/pglogical_dump/dumputils.h
|
|
@@ -0,0 +1,74 @@
|
|
+/*-------------------------------------------------------------------------
|
|
+ *
|
|
+ * Utility routines for SQL dumping
|
|
+ * Basically this is stuff that is useful in both pg_dump and pg_dumpall.
|
|
+ * Lately it's also being used by psql and bin/scripts/ ...
|
|
+ *
|
|
+ *
|
|
+ * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
|
|
+ * Portions Copyright (c) 1994, Regents of the University of California
|
|
+ *
|
|
+ * src/bin/pg_dump/dumputils.h
|
|
+ *
|
|
+ *-------------------------------------------------------------------------
|
|
+ */
|
|
+
|
|
+#ifndef DUMPUTILS_H
|
|
+#define DUMPUTILS_H
|
|
+
|
|
+#include "compat.h"
|
|
+#include "libpq-fe.h"
|
|
+#include "pqexpbuffer.h"
|
|
+
|
|
+typedef struct SimpleStringListCell
|
|
+{
|
|
+ struct SimpleStringListCell *next;
|
|
+ char val[1]; /* VARIABLE LENGTH FIELD */
|
|
+} SimpleStringListCell;
|
|
+
|
|
+typedef struct SimpleStringList
|
|
+{
|
|
+ SimpleStringListCell *head;
|
|
+ SimpleStringListCell *tail;
|
|
+} SimpleStringList;
|
|
+
|
|
+
|
|
+extern int quote_all_identifiers;
|
|
+extern PQExpBuffer (*getLocalPQExpBuffer) (void);
|
|
+
|
|
+extern const char *fmtId(const char *identifier);
|
|
+extern const char *fmtQualifiedId(int remoteVersion,
|
|
+ const char *schema, const char *id);
|
|
+extern void appendStringLiteral(PQExpBuffer buf, const char *str,
|
|
+ int encoding, bool std_strings);
|
|
+extern void appendStringLiteralConn(PQExpBuffer buf, const char *str,
|
|
+ PGconn *conn);
|
|
+extern void appendStringLiteralDQ(PQExpBuffer buf, const char *str,
|
|
+ const char *dqprefix);
|
|
+extern void appendByteaLiteral(PQExpBuffer buf,
|
|
+ const unsigned char *str, size_t length,
|
|
+ bool std_strings);
|
|
+extern bool parsePGArray(const char *atext, char ***itemarray, int *nitems);
|
|
+extern bool buildACLCommands(const char *name, const char *subname,
|
|
+ const char *type, const char *acls, const char *owner,
|
|
+ const char *prefix, int remoteVersion,
|
|
+ PQExpBuffer sql);
|
|
+extern bool buildDefaultACLCommands(const char *type, const char *nspname,
|
|
+ const char *acls, const char *owner,
|
|
+ int remoteVersion,
|
|
+ PQExpBuffer sql);
|
|
+extern bool processSQLNamePattern(PGconn *conn, PQExpBuffer buf,
|
|
+ const char *pattern,
|
|
+ bool have_where, bool force_escape,
|
|
+ const char *schemavar, const char *namevar,
|
|
+ const char *altnamevar, const char *visibilityrule);
|
|
+extern void buildShSecLabelQuery(PGconn *conn, const char *catalog_name,
|
|
+ uint32 objectId, PQExpBuffer sql);
|
|
+extern void emitShSecLabels(PGconn *conn, PGresult *res,
|
|
+ PQExpBuffer buffer, const char *target, const char *objname);
|
|
+extern void set_dump_section(const char *arg, int *dumpSections);
|
|
+
|
|
+extern void simple_string_list_append(SimpleStringList *list, const char *val);
|
|
+extern bool simple_string_list_member(SimpleStringList *list, const char *val);
|
|
+
|
|
+#endif /* DUMPUTILS_H */
|
|
--- /dev/null
|
|
+++ pglogical-2.2.2/pglogical_dump/keywords.c
|
|
@@ -0,0 +1,30 @@
|
|
+/*-------------------------------------------------------------------------
|
|
+ *
|
|
+ * keywords.c
|
|
+ * lexical token lookup for key words in PostgreSQL
|
|
+ *
|
|
+ *
|
|
+ * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
|
|
+ * Portions Copyright (c) 1994, Regents of the University of California
|
|
+ *
|
|
+ *
|
|
+ * IDENTIFICATION
|
|
+ * src/bin/pg_dump/keywords.c
|
|
+ *
|
|
+ *-------------------------------------------------------------------------
|
|
+ */
|
|
+#include "postgres_fe.h"
|
|
+
|
|
+#include "parser/keywords.h"
|
|
+
|
|
+/*
|
|
+ * We don't need the token number, so leave it out to avoid requiring other
|
|
+ * backend headers.
|
|
+ */
|
|
+#define PG_KEYWORD(a,b,c) {a,0,c},
|
|
+
|
|
+const ScanKeyword FEScanKeywords[] = {
|
|
+#include "parser/kwlist.h"
|
|
+};
|
|
+
|
|
+const int NumFEScanKeywords = lengthof(FEScanKeywords);
|
|
--- /dev/null
|
|
+++ pglogical-2.2.2/pglogical_dump/kwlookup.c
|
|
@@ -0,0 +1,89 @@
|
|
+/*-------------------------------------------------------------------------
|
|
+ *
|
|
+ * kwlookup.c
|
|
+ * lexical token lookup for key words in PostgreSQL
|
|
+ *
|
|
+ * NB - this file is also used by ECPG and several frontend programs in
|
|
+ * src/bin/ including pg_dump and psql
|
|
+ *
|
|
+ * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
|
|
+ * Portions Copyright (c) 1994, Regents of the University of California
|
|
+ *
|
|
+ *
|
|
+ * IDENTIFICATION
|
|
+ * src/backend/parser/kwlookup.c
|
|
+ *
|
|
+ *-------------------------------------------------------------------------
|
|
+ */
|
|
+
|
|
+/* use c.h so this can be built as either frontend or backend */
|
|
+#include "c.h"
|
|
+
|
|
+#include <ctype.h>
|
|
+
|
|
+#include "parser/keywords.h"
|
|
+
|
|
+/*
|
|
+ * ScanKeywordLookup - see if a given word is a keyword
|
|
+ *
|
|
+ * Returns a pointer to the ScanKeyword table entry, or NULL if no match.
|
|
+ *
|
|
+ * The match is done case-insensitively. Note that we deliberately use a
|
|
+ * dumbed-down case conversion that will only translate 'A'-'Z' into 'a'-'z',
|
|
+ * even if we are in a locale where tolower() would produce more or different
|
|
+ * translations. This is to conform to the SQL99 spec, which says that
|
|
+ * keywords are to be matched in this way even though non-keyword identifiers
|
|
+ * receive a different case-normalization mapping.
|
|
+ */
|
|
+const ScanKeyword *
|
|
+ScanKeywordLookup(const char *text,
|
|
+ const ScanKeyword *keywords,
|
|
+ int num_keywords)
|
|
+{
|
|
+ int len,
|
|
+ i;
|
|
+ char word[NAMEDATALEN];
|
|
+ const ScanKeyword *low;
|
|
+ const ScanKeyword *high;
|
|
+
|
|
+ len = strlen(text);
|
|
+ /* We assume all keywords are shorter than NAMEDATALEN. */
|
|
+ if (len >= NAMEDATALEN)
|
|
+ return NULL;
|
|
+
|
|
+ /*
|
|
+ * Apply an ASCII-only downcasing. We must not use tolower() since it may
|
|
+ * produce the wrong translation in some locales (eg, Turkish).
|
|
+ */
|
|
+ for (i = 0; i < len; i++)
|
|
+ {
|
|
+ char ch = text[i];
|
|
+
|
|
+ if (ch >= 'A' && ch <= 'Z')
|
|
+ ch += 'a' - 'A';
|
|
+ word[i] = ch;
|
|
+ }
|
|
+ word[len] = '\0';
|
|
+
|
|
+ /*
|
|
+ * Now do a binary search using plain strcmp() comparison.
|
|
+ */
|
|
+ low = keywords;
|
|
+ high = keywords + (num_keywords - 1);
|
|
+ while (low <= high)
|
|
+ {
|
|
+ const ScanKeyword *middle;
|
|
+ int difference;
|
|
+
|
|
+ middle = low + (high - low) / 2;
|
|
+ difference = strcmp(middle->name, word);
|
|
+ if (difference == 0)
|
|
+ return middle;
|
|
+ else if (difference < 0)
|
|
+ low = middle + 1;
|
|
+ else
|
|
+ high = middle - 1;
|
|
+ }
|
|
+
|
|
+ return NULL;
|
|
+}
|
|
--- /dev/null
|
|
+++ pglogical-2.2.2/pglogical_dump/parallel.c
|
|
@@ -0,0 +1,1417 @@
|
|
+/*-------------------------------------------------------------------------
|
|
+ *
|
|
+ * parallel.c
|
|
+ *
|
|
+ * Parallel support for the pg_dump archiver
|
|
+ *
|
|
+ * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
|
|
+ * Portions Copyright (c) 1994, Regents of the University of California
|
|
+ *
|
|
+ * The author is not responsible for loss or damages that may
|
|
+ * result from its use.
|
|
+ *
|
|
+ * IDENTIFICATION
|
|
+ * src/bin/pg_dump/parallel.c
|
|
+ *
|
|
+ *-------------------------------------------------------------------------
|
|
+ */
|
|
+
|
|
+#include "postgres_fe.h"
|
|
+
|
|
+#include "pg_backup_utils.h"
|
|
+#include "parallel.h"
|
|
+
|
|
+#ifndef WIN32
|
|
+#include <sys/types.h>
|
|
+#include <sys/wait.h>
|
|
+#include "signal.h"
|
|
+#include <unistd.h>
|
|
+#include <fcntl.h>
|
|
+#endif
|
|
+
|
|
+#define PIPE_READ 0
|
|
+#define PIPE_WRITE 1
|
|
+
|
|
+/* file-scope variables */
|
|
+#ifdef WIN32
|
|
+static unsigned int tMasterThreadId = 0;
|
|
+static HANDLE termEvent = INVALID_HANDLE_VALUE;
|
|
+static int pgpipe(int handles[2]);
|
|
+static int piperead(int s, char *buf, int len);
|
|
+
|
|
+/*
|
|
+ * Structure to hold info passed by _beginthreadex() to the function it calls
|
|
+ * via its single allowed argument.
|
|
+ */
|
|
+typedef struct
|
|
+{
|
|
+ ArchiveHandle *AH;
|
|
+ RestoreOptions *ropt;
|
|
+ int worker;
|
|
+ int pipeRead;
|
|
+ int pipeWrite;
|
|
+} WorkerInfo;
|
|
+
|
|
+#define pipewrite(a,b,c) send(a,b,c,0)
|
|
+#else
|
|
+/*
|
|
+ * aborting is only ever used in the master, the workers are fine with just
|
|
+ * wantAbort.
|
|
+ */
|
|
+static bool aborting = false;
|
|
+static volatile sig_atomic_t wantAbort = 0;
|
|
+
|
|
+#define pgpipe(a) pipe(a)
|
|
+#define piperead(a,b,c) read(a,b,c)
|
|
+#define pipewrite(a,b,c) write(a,b,c)
|
|
+#endif
|
|
+
|
|
+typedef struct ShutdownInformation
|
|
+{
|
|
+ ParallelState *pstate;
|
|
+ Archive *AHX;
|
|
+} ShutdownInformation;
|
|
+
|
|
+static ShutdownInformation shutdown_info;
|
|
+
|
|
+static const char *modulename = gettext_noop("parallel archiver");
|
|
+
|
|
+static ParallelSlot *GetMyPSlot(ParallelState *pstate);
|
|
+static void
|
|
+parallel_msg_master(ParallelSlot *slot, const char *modulename,
|
|
+ const char *fmt, va_list ap)
|
|
+__attribute__((format(PG_PRINTF_ATTRIBUTE, 3, 0)));
|
|
+static void archive_close_connection(int code, void *arg);
|
|
+static void ShutdownWorkersHard(ParallelState *pstate);
|
|
+static void WaitForTerminatingWorkers(ParallelState *pstate);
|
|
+
|
|
+#ifndef WIN32
|
|
+static void sigTermHandler(int signum);
|
|
+#endif
|
|
+static void SetupWorker(ArchiveHandle *AH, int pipefd[2], int worker,
|
|
+ RestoreOptions *ropt);
|
|
+static bool HasEveryWorkerTerminated(ParallelState *pstate);
|
|
+
|
|
+static void lockTableNoWait(ArchiveHandle *AH, TocEntry *te);
|
|
+static void WaitForCommands(ArchiveHandle *AH, int pipefd[2]);
|
|
+static char *getMessageFromMaster(int pipefd[2]);
|
|
+static void sendMessageToMaster(int pipefd[2], const char *str);
|
|
+static int select_loop(int maxFd, fd_set *workerset);
|
|
+static char *getMessageFromWorker(ParallelState *pstate,
|
|
+ bool do_wait, int *worker);
|
|
+static void sendMessageToWorker(ParallelState *pstate,
|
|
+ int worker, const char *str);
|
|
+static char *readMessageFromPipe(int fd);
|
|
+
|
|
+#define messageStartsWith(msg, prefix) \
|
|
+ (strncmp(msg, prefix, strlen(prefix)) == 0)
|
|
+#define messageEquals(msg, pattern) \
|
|
+ (strcmp(msg, pattern) == 0)
|
|
+
|
|
+#ifdef WIN32
|
|
+static void shutdown_parallel_dump_utils(int code, void *unused);
|
|
+bool parallel_init_done = false;
|
|
+static DWORD tls_index;
|
|
+DWORD mainThreadId;
|
|
+#endif
|
|
+
|
|
+
|
|
+#ifdef WIN32
|
|
+static void
|
|
+shutdown_parallel_dump_utils(int code, void *unused)
|
|
+{
|
|
+ /* Call the cleanup function only from the main thread */
|
|
+ if (mainThreadId == GetCurrentThreadId())
|
|
+ WSACleanup();
|
|
+}
|
|
+#endif
|
|
+
|
|
+void
|
|
+init_parallel_dump_utils(void)
|
|
+{
|
|
+#ifdef WIN32
|
|
+ if (!parallel_init_done)
|
|
+ {
|
|
+ WSADATA wsaData;
|
|
+ int err;
|
|
+
|
|
+ tls_index = TlsAlloc();
|
|
+ mainThreadId = GetCurrentThreadId();
|
|
+ err = WSAStartup(MAKEWORD(2, 2), &wsaData);
|
|
+ if (err != 0)
|
|
+ {
|
|
+ fprintf(stderr, _("%s: WSAStartup failed: %d\n"), progname, err);
|
|
+ exit_nicely(1);
|
|
+ }
|
|
+ on_exit_nicely(shutdown_parallel_dump_utils, NULL);
|
|
+ parallel_init_done = true;
|
|
+ }
|
|
+#endif
|
|
+}
|
|
+
|
|
+static ParallelSlot *
|
|
+GetMyPSlot(ParallelState *pstate)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < pstate->numWorkers; i++)
|
|
+#ifdef WIN32
|
|
+ if (pstate->parallelSlot[i].threadId == GetCurrentThreadId())
|
|
+#else
|
|
+ if (pstate->parallelSlot[i].pid == getpid())
|
|
+#endif
|
|
+ return &(pstate->parallelSlot[i]);
|
|
+
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Fail and die, with a message to stderr. Parameters as for write_msg.
|
|
+ *
|
|
+ * This is defined in parallel.c, because in parallel mode, things are more
|
|
+ * complicated. If the worker process does exit_horribly(), we forward its
|
|
+ * last words to the master process. The master process then does
|
|
+ * exit_horribly() with this error message itself and prints it normally.
|
|
+ * After printing the message, exit_horribly() on the master will shut down
|
|
+ * the remaining worker processes.
|
|
+ */
|
|
+void
|
|
+exit_horribly(const char *modulename, const char *fmt,...)
|
|
+{
|
|
+ va_list ap;
|
|
+ ParallelState *pstate = shutdown_info.pstate;
|
|
+ ParallelSlot *slot;
|
|
+
|
|
+ va_start(ap, fmt);
|
|
+
|
|
+ if (pstate == NULL)
|
|
+ {
|
|
+ /* Not in parallel mode, just write to stderr */
|
|
+ vwrite_msg(modulename, fmt, ap);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ slot = GetMyPSlot(pstate);
|
|
+
|
|
+ if (!slot)
|
|
+ /* We're the parent, just write the message out */
|
|
+ vwrite_msg(modulename, fmt, ap);
|
|
+ else
|
|
+ /* If we're a worker process, send the msg to the master process */
|
|
+ parallel_msg_master(slot, modulename, fmt, ap);
|
|
+ }
|
|
+
|
|
+ va_end(ap);
|
|
+
|
|
+ exit_nicely(1);
|
|
+}
|
|
+
|
|
+/* Sends the error message from the worker to the master process */
|
|
+static void
|
|
+parallel_msg_master(ParallelSlot *slot, const char *modulename,
|
|
+ const char *fmt, va_list ap)
|
|
+{
|
|
+ char buf[512];
|
|
+ int pipefd[2];
|
|
+
|
|
+ pipefd[PIPE_READ] = slot->pipeRevRead;
|
|
+ pipefd[PIPE_WRITE] = slot->pipeRevWrite;
|
|
+
|
|
+ strcpy(buf, "ERROR ");
|
|
+ vsnprintf(buf + strlen("ERROR "),
|
|
+ sizeof(buf) - strlen("ERROR "), fmt, ap);
|
|
+
|
|
+ sendMessageToMaster(pipefd, buf);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * A thread-local version of getLocalPQExpBuffer().
|
|
+ *
|
|
+ * Non-reentrant but reduces memory leakage. (On Windows the memory leakage
|
|
+ * will be one buffer per thread, which is at least better than one per call).
|
|
+ */
|
|
+static PQExpBuffer
|
|
+getThreadLocalPQExpBuffer(void)
|
|
+{
|
|
+ /*
|
|
+ * The Tls code goes awry if we use a static var, so we provide for both
|
|
+ * static and auto, and omit any use of the static var when using Tls.
|
|
+ */
|
|
+ static PQExpBuffer s_id_return = NULL;
|
|
+ PQExpBuffer id_return;
|
|
+
|
|
+#ifdef WIN32
|
|
+ if (parallel_init_done)
|
|
+ id_return = (PQExpBuffer) TlsGetValue(tls_index); /* 0 when not set */
|
|
+ else
|
|
+ id_return = s_id_return;
|
|
+#else
|
|
+ id_return = s_id_return;
|
|
+#endif
|
|
+
|
|
+ if (id_return) /* first time through? */
|
|
+ {
|
|
+ /* same buffer, just wipe contents */
|
|
+ resetPQExpBuffer(id_return);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ /* new buffer */
|
|
+ id_return = createPQExpBuffer();
|
|
+#ifdef WIN32
|
|
+ if (parallel_init_done)
|
|
+ TlsSetValue(tls_index, id_return);
|
|
+ else
|
|
+ s_id_return = id_return;
|
|
+#else
|
|
+ s_id_return = id_return;
|
|
+#endif
|
|
+
|
|
+ }
|
|
+
|
|
+ return id_return;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * pg_dump and pg_restore register the Archive pointer for the exit handler
|
|
+ * (called from exit_horribly). This function mainly exists so that we can
|
|
+ * keep shutdown_info in file scope only.
|
|
+ */
|
|
+void
|
|
+on_exit_close_archive(Archive *AHX)
|
|
+{
|
|
+ shutdown_info.AHX = AHX;
|
|
+ on_exit_nicely(archive_close_connection, &shutdown_info);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * This function can close archives in both the parallel and non-parallel
|
|
+ * case.
|
|
+ */
|
|
+static void
|
|
+archive_close_connection(int code, void *arg)
|
|
+{
|
|
+ ShutdownInformation *si = (ShutdownInformation *) arg;
|
|
+
|
|
+ if (si->pstate)
|
|
+ {
|
|
+ ParallelSlot *slot = GetMyPSlot(si->pstate);
|
|
+
|
|
+ if (!slot)
|
|
+ {
|
|
+ /*
|
|
+ * We're the master: We have already printed out the message
|
|
+ * passed to exit_horribly() either from the master itself or from
|
|
+ * a worker process. Now we need to close our own database
|
|
+ * connection (only open during parallel dump but not restore) and
|
|
+ * shut down the remaining workers.
|
|
+ */
|
|
+ DisconnectDatabase(si->AHX);
|
|
+#ifndef WIN32
|
|
+
|
|
+ /*
|
|
+ * Setting aborting to true switches to best-effort-mode
|
|
+ * (send/receive but ignore errors) in communicating with our
|
|
+ * workers.
|
|
+ */
|
|
+ aborting = true;
|
|
+#endif
|
|
+ ShutdownWorkersHard(si->pstate);
|
|
+ }
|
|
+ else if (slot->args->AH)
|
|
+ DisconnectDatabase(&(slot->args->AH->public));
|
|
+ }
|
|
+ else if (si->AHX)
|
|
+ DisconnectDatabase(si->AHX);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * If we have one worker that terminates for some reason, we'd like the other
|
|
+ * threads to terminate as well (and not finish with their 70 GB table dump
|
|
+ * first...). Now in UNIX we can just kill these processes, and let the signal
|
|
+ * handler set wantAbort to 1. In Windows we set a termEvent and this serves
|
|
+ * as the signal for everyone to terminate.
|
|
+ */
|
|
+void
|
|
+checkAborting(ArchiveHandle *AH)
|
|
+{
|
|
+#ifdef WIN32
|
|
+ if (WaitForSingleObject(termEvent, 0) == WAIT_OBJECT_0)
|
|
+#else
|
|
+ if (wantAbort)
|
|
+#endif
|
|
+ exit_horribly(modulename, "worker is terminating\n");
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Shut down any remaining workers, this has an implicit do_wait == true.
|
|
+ *
|
|
+ * The fastest way we can make the workers terminate gracefully is when
|
|
+ * they are listening for new commands and we just tell them to terminate.
|
|
+ */
|
|
+static void
|
|
+ShutdownWorkersHard(ParallelState *pstate)
|
|
+{
|
|
+#ifndef WIN32
|
|
+ int i;
|
|
+
|
|
+ signal(SIGPIPE, SIG_IGN);
|
|
+
|
|
+ /*
|
|
+ * Close our write end of the sockets so that the workers know they can
|
|
+ * exit.
|
|
+ */
|
|
+ for (i = 0; i < pstate->numWorkers; i++)
|
|
+ closesocket(pstate->parallelSlot[i].pipeWrite);
|
|
+
|
|
+ for (i = 0; i < pstate->numWorkers; i++)
|
|
+ kill(pstate->parallelSlot[i].pid, SIGTERM);
|
|
+#else
|
|
+ /* The workers monitor this event via checkAborting(). */
|
|
+ SetEvent(termEvent);
|
|
+#endif
|
|
+
|
|
+ WaitForTerminatingWorkers(pstate);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Wait for the termination of the processes using the OS-specific method.
|
|
+ */
|
|
+static void
|
|
+WaitForTerminatingWorkers(ParallelState *pstate)
|
|
+{
|
|
+ while (!HasEveryWorkerTerminated(pstate))
|
|
+ {
|
|
+ ParallelSlot *slot = NULL;
|
|
+ int j;
|
|
+
|
|
+#ifndef WIN32
|
|
+ int status;
|
|
+ pid_t pid = wait(&status);
|
|
+
|
|
+ for (j = 0; j < pstate->numWorkers; j++)
|
|
+ if (pstate->parallelSlot[j].pid == pid)
|
|
+ slot = &(pstate->parallelSlot[j]);
|
|
+#else
|
|
+ uintptr_t hThread;
|
|
+ DWORD ret;
|
|
+ uintptr_t *lpHandles = pg_malloc(sizeof(HANDLE) * pstate->numWorkers);
|
|
+ int nrun = 0;
|
|
+
|
|
+ for (j = 0; j < pstate->numWorkers; j++)
|
|
+ if (pstate->parallelSlot[j].workerStatus != WRKR_TERMINATED)
|
|
+ {
|
|
+ lpHandles[nrun] = pstate->parallelSlot[j].hThread;
|
|
+ nrun++;
|
|
+ }
|
|
+ ret = WaitForMultipleObjects(nrun, (HANDLE *) lpHandles, false, INFINITE);
|
|
+ Assert(ret != WAIT_FAILED);
|
|
+ hThread = lpHandles[ret - WAIT_OBJECT_0];
|
|
+
|
|
+ for (j = 0; j < pstate->numWorkers; j++)
|
|
+ if (pstate->parallelSlot[j].hThread == hThread)
|
|
+ slot = &(pstate->parallelSlot[j]);
|
|
+
|
|
+ free(lpHandles);
|
|
+#endif
|
|
+ Assert(slot);
|
|
+
|
|
+ slot->workerStatus = WRKR_TERMINATED;
|
|
+ }
|
|
+ Assert(HasEveryWorkerTerminated(pstate));
|
|
+}
|
|
+
|
|
+#ifndef WIN32
|
|
+/* Signal handling (UNIX only) */
|
|
+static void
|
|
+sigTermHandler(int signum)
|
|
+{
|
|
+ wantAbort = 1;
|
|
+}
|
|
+#endif
|
|
+
|
|
+/*
|
|
+ * This function is called by both UNIX and Windows variants to set up a
|
|
+ * worker process.
|
|
+ */
|
|
+static void
|
|
+SetupWorker(ArchiveHandle *AH, int pipefd[2], int worker,
|
|
+ RestoreOptions *ropt)
|
|
+{
|
|
+ /*
|
|
+ * Call the setup worker function that's defined in the ArchiveHandle.
|
|
+ *
|
|
+ * We get the raw connection only for the reason that we can close it
|
|
+ * properly when we shut down. This happens only that way when it is
|
|
+ * brought down because of an error.
|
|
+ */
|
|
+ (AH->SetupWorkerPtr) ((Archive *) AH, ropt);
|
|
+
|
|
+ Assert(AH->connection != NULL);
|
|
+
|
|
+ WaitForCommands(AH, pipefd);
|
|
+
|
|
+ closesocket(pipefd[PIPE_READ]);
|
|
+ closesocket(pipefd[PIPE_WRITE]);
|
|
+}
|
|
+
|
|
+#ifdef WIN32
|
|
+static unsigned __stdcall
|
|
+init_spawned_worker_win32(WorkerInfo *wi)
|
|
+{
|
|
+ ArchiveHandle *AH;
|
|
+ int pipefd[2] = {wi->pipeRead, wi->pipeWrite};
|
|
+ int worker = wi->worker;
|
|
+ RestoreOptions *ropt = wi->ropt;
|
|
+
|
|
+ AH = CloneArchive(wi->AH);
|
|
+
|
|
+ free(wi);
|
|
+ SetupWorker(AH, pipefd, worker, ropt);
|
|
+
|
|
+ DeCloneArchive(AH);
|
|
+ _endthreadex(0);
|
|
+ return 0;
|
|
+}
|
|
+#endif
|
|
+
|
|
+/*
|
|
+ * This function starts the parallel dump or restore by spawning off the
|
|
+ * worker processes in both Unix and Windows. For Windows, it creates a number
|
|
+ * of threads while it does a fork() on Unix.
|
|
+ */
|
|
+ParallelState *
|
|
+ParallelBackupStart(ArchiveHandle *AH, RestoreOptions *ropt)
|
|
+{
|
|
+ ParallelState *pstate;
|
|
+ int i;
|
|
+ const size_t slotSize = AH->public.numWorkers * sizeof(ParallelSlot);
|
|
+
|
|
+ Assert(AH->public.numWorkers > 0);
|
|
+
|
|
+ /* Ensure stdio state is quiesced before forking */
|
|
+ fflush(NULL);
|
|
+
|
|
+ pstate = (ParallelState *) pg_malloc(sizeof(ParallelState));
|
|
+
|
|
+ pstate->numWorkers = AH->public.numWorkers;
|
|
+ pstate->parallelSlot = NULL;
|
|
+
|
|
+ if (AH->public.numWorkers == 1)
|
|
+ return pstate;
|
|
+
|
|
+ pstate->parallelSlot = (ParallelSlot *) pg_malloc(slotSize);
|
|
+ memset((void *) pstate->parallelSlot, 0, slotSize);
|
|
+
|
|
+ /*
|
|
+ * Set the pstate in the shutdown_info. The exit handler uses pstate if
|
|
+ * set and falls back to AHX otherwise.
|
|
+ */
|
|
+ shutdown_info.pstate = pstate;
|
|
+ getLocalPQExpBuffer = getThreadLocalPQExpBuffer;
|
|
+
|
|
+#ifdef WIN32
|
|
+ tMasterThreadId = GetCurrentThreadId();
|
|
+ termEvent = CreateEvent(NULL, true, false, "Terminate");
|
|
+#else
|
|
+ signal(SIGTERM, sigTermHandler);
|
|
+ signal(SIGINT, sigTermHandler);
|
|
+ signal(SIGQUIT, sigTermHandler);
|
|
+#endif
|
|
+
|
|
+ for (i = 0; i < pstate->numWorkers; i++)
|
|
+ {
|
|
+#ifdef WIN32
|
|
+ WorkerInfo *wi;
|
|
+ uintptr_t handle;
|
|
+#else
|
|
+ pid_t pid;
|
|
+#endif
|
|
+ int pipeMW[2],
|
|
+ pipeWM[2];
|
|
+
|
|
+ if (pgpipe(pipeMW) < 0 || pgpipe(pipeWM) < 0)
|
|
+ exit_horribly(modulename,
|
|
+ "could not create communication channels: %s\n",
|
|
+ strerror(errno));
|
|
+
|
|
+ pstate->parallelSlot[i].workerStatus = WRKR_IDLE;
|
|
+ pstate->parallelSlot[i].args = (ParallelArgs *) pg_malloc(sizeof(ParallelArgs));
|
|
+ pstate->parallelSlot[i].args->AH = NULL;
|
|
+ pstate->parallelSlot[i].args->te = NULL;
|
|
+#ifdef WIN32
|
|
+ /* Allocate a new structure for every worker */
|
|
+ wi = (WorkerInfo *) pg_malloc(sizeof(WorkerInfo));
|
|
+
|
|
+ wi->ropt = ropt;
|
|
+ wi->worker = i;
|
|
+ wi->AH = AH;
|
|
+ wi->pipeRead = pstate->parallelSlot[i].pipeRevRead = pipeMW[PIPE_READ];
|
|
+ wi->pipeWrite = pstate->parallelSlot[i].pipeRevWrite = pipeWM[PIPE_WRITE];
|
|
+
|
|
+ handle = _beginthreadex(NULL, 0, (void *) &init_spawned_worker_win32,
|
|
+ wi, 0, &(pstate->parallelSlot[i].threadId));
|
|
+ pstate->parallelSlot[i].hThread = handle;
|
|
+#else
|
|
+ pid = fork();
|
|
+ if (pid == 0)
|
|
+ {
|
|
+ /* we are the worker */
|
|
+ int j;
|
|
+ int pipefd[2];
|
|
+
|
|
+ pipefd[0] = pipeMW[PIPE_READ];
|
|
+ pipefd[1] = pipeWM[PIPE_WRITE];
|
|
+
|
|
+ /*
|
|
+ * Store the fds for the reverse communication in pstate. Actually
|
|
+ * we only use this in case of an error and don't use pstate
|
|
+ * otherwise in the worker process. On Windows we write to the
|
|
+ * global pstate, in Unix we write to our process-local copy but
|
|
+ * that's also where we'd retrieve this information back from.
|
|
+ */
|
|
+ pstate->parallelSlot[i].pipeRevRead = pipefd[PIPE_READ];
|
|
+ pstate->parallelSlot[i].pipeRevWrite = pipefd[PIPE_WRITE];
|
|
+ pstate->parallelSlot[i].pid = getpid();
|
|
+
|
|
+ /*
|
|
+ * Call CloneArchive on Unix as well even though technically we
|
|
+ * don't need to because fork() gives us a copy in our own address
|
|
+ * space already. But CloneArchive resets the state information
|
|
+ * and also clones the database connection (for parallel dump)
|
|
+ * which both seem kinda helpful.
|
|
+ */
|
|
+ pstate->parallelSlot[i].args->AH = CloneArchive(AH);
|
|
+
|
|
+ /* close read end of Worker -> Master */
|
|
+ closesocket(pipeWM[PIPE_READ]);
|
|
+ /* close write end of Master -> Worker */
|
|
+ closesocket(pipeMW[PIPE_WRITE]);
|
|
+
|
|
+ /*
|
|
+ * Close all inherited fds for communication of the master with
|
|
+ * the other workers.
|
|
+ */
|
|
+ for (j = 0; j < i; j++)
|
|
+ {
|
|
+ closesocket(pstate->parallelSlot[j].pipeRead);
|
|
+ closesocket(pstate->parallelSlot[j].pipeWrite);
|
|
+ }
|
|
+
|
|
+ SetupWorker(pstate->parallelSlot[i].args->AH, pipefd, i, ropt);
|
|
+
|
|
+ exit(0);
|
|
+ }
|
|
+ else if (pid < 0)
|
|
+ /* fork failed */
|
|
+ exit_horribly(modulename,
|
|
+ "could not create worker process: %s\n",
|
|
+ strerror(errno));
|
|
+
|
|
+ /* we are the Master, pid > 0 here */
|
|
+ Assert(pid > 0);
|
|
+
|
|
+ /* close read end of Master -> Worker */
|
|
+ closesocket(pipeMW[PIPE_READ]);
|
|
+ /* close write end of Worker -> Master */
|
|
+ closesocket(pipeWM[PIPE_WRITE]);
|
|
+
|
|
+ pstate->parallelSlot[i].pid = pid;
|
|
+#endif
|
|
+
|
|
+ pstate->parallelSlot[i].pipeRead = pipeWM[PIPE_READ];
|
|
+ pstate->parallelSlot[i].pipeWrite = pipeMW[PIPE_WRITE];
|
|
+ }
|
|
+
|
|
+ return pstate;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Tell all of our workers to terminate.
|
|
+ *
|
|
+ * Pretty straightforward routine, first we tell everyone to terminate, then
|
|
+ * we listen to the workers' replies and finally close the sockets that we
|
|
+ * have used for communication.
|
|
+ */
|
|
+void
|
|
+ParallelBackupEnd(ArchiveHandle *AH, ParallelState *pstate)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ if (pstate->numWorkers == 1)
|
|
+ return;
|
|
+
|
|
+ Assert(IsEveryWorkerIdle(pstate));
|
|
+
|
|
+ /* close the sockets so that the workers know they can exit */
|
|
+ for (i = 0; i < pstate->numWorkers; i++)
|
|
+ {
|
|
+ closesocket(pstate->parallelSlot[i].pipeRead);
|
|
+ closesocket(pstate->parallelSlot[i].pipeWrite);
|
|
+ }
|
|
+ WaitForTerminatingWorkers(pstate);
|
|
+
|
|
+ /*
|
|
+ * Remove the pstate again, so the exit handler in the parent will now
|
|
+ * again fall back to closing AH->connection (if connected).
|
|
+ */
|
|
+ shutdown_info.pstate = NULL;
|
|
+
|
|
+ free(pstate->parallelSlot);
|
|
+ free(pstate);
|
|
+}
|
|
+
|
|
+
|
|
+/*
|
|
+ * The sequence is the following (for dump, similar for restore):
|
|
+ *
|
|
+ * The master process starts the parallel backup in ParllelBackupStart, this
|
|
+ * forks the worker processes which enter WaitForCommand().
|
|
+ *
|
|
+ * The master process dispatches an individual work item to one of the worker
|
|
+ * processes in DispatchJobForTocEntry(). It calls
|
|
+ * AH->MasterStartParallelItemPtr, a routine of the output format. This
|
|
+ * function's arguments are the parents archive handle AH (containing the full
|
|
+ * catalog information), the TocEntry that the worker should work on and a
|
|
+ * T_Action act indicating whether this is a backup or a restore item. The
|
|
+ * function then converts the TocEntry assignment into a string that is then
|
|
+ * sent over to the worker process. In the simplest case that would be
|
|
+ * something like "DUMP 1234", with 1234 being the TocEntry id.
|
|
+ *
|
|
+ * The worker receives the message in the routine pointed to by
|
|
+ * WorkerJobDumpPtr or WorkerJobRestorePtr. These are also pointers to
|
|
+ * corresponding routines of the respective output format, e.g.
|
|
+ * _WorkerJobDumpDirectory().
|
|
+ *
|
|
+ * Remember that we have forked off the workers only after we have read in the
|
|
+ * catalog. That's why our worker processes can also access the catalog
|
|
+ * information. Now they re-translate the textual representation to a TocEntry
|
|
+ * on their side and do the required action (restore or dump).
|
|
+ *
|
|
+ * The result is again a textual string that is sent back to the master and is
|
|
+ * interpreted by AH->MasterEndParallelItemPtr. This function can update state
|
|
+ * or catalog information on the master's side, depending on the reply from
|
|
+ * the worker process. In the end it returns status which is 0 for successful
|
|
+ * execution.
|
|
+ *
|
|
+ * ---------------------------------------------------------------------
|
|
+ * Master Worker
|
|
+ *
|
|
+ * enters WaitForCommands()
|
|
+ * DispatchJobForTocEntry(...te...)
|
|
+ *
|
|
+ * [ Worker is IDLE ]
|
|
+ *
|
|
+ * arg = (MasterStartParallelItemPtr)()
|
|
+ * send: DUMP arg
|
|
+ * receive: DUMP arg
|
|
+ * str = (WorkerJobDumpPtr)(arg)
|
|
+ * [ Worker is WORKING ] ... gets te from arg ...
|
|
+ * ... dump te ...
|
|
+ * send: OK DUMP info
|
|
+ *
|
|
+ * In ListenToWorkers():
|
|
+ *
|
|
+ * [ Worker is FINISHED ]
|
|
+ * receive: OK DUMP info
|
|
+ * status = (MasterEndParallelItemPtr)(info)
|
|
+ *
|
|
+ * In ReapWorkerStatus(&ptr):
|
|
+ * *ptr = status;
|
|
+ * [ Worker is IDLE ]
|
|
+ * ---------------------------------------------------------------------
|
|
+ */
|
|
+void
|
|
+DispatchJobForTocEntry(ArchiveHandle *AH, ParallelState *pstate, TocEntry *te,
|
|
+ T_Action act)
|
|
+{
|
|
+ int worker;
|
|
+ char *arg;
|
|
+
|
|
+ /* our caller makes sure that at least one worker is idle */
|
|
+ Assert(GetIdleWorker(pstate) != NO_SLOT);
|
|
+ worker = GetIdleWorker(pstate);
|
|
+ Assert(worker != NO_SLOT);
|
|
+
|
|
+ arg = (AH->MasterStartParallelItemPtr) (AH, te, act);
|
|
+
|
|
+ sendMessageToWorker(pstate, worker, arg);
|
|
+
|
|
+ pstate->parallelSlot[worker].workerStatus = WRKR_WORKING;
|
|
+ pstate->parallelSlot[worker].args->te = te;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Find the first free parallel slot (if any).
|
|
+ */
|
|
+int
|
|
+GetIdleWorker(ParallelState *pstate)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < pstate->numWorkers; i++)
|
|
+ if (pstate->parallelSlot[i].workerStatus == WRKR_IDLE)
|
|
+ return i;
|
|
+ return NO_SLOT;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Return true iff every worker process is in the WRKR_TERMINATED state.
|
|
+ */
|
|
+static bool
|
|
+HasEveryWorkerTerminated(ParallelState *pstate)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < pstate->numWorkers; i++)
|
|
+ if (pstate->parallelSlot[i].workerStatus != WRKR_TERMINATED)
|
|
+ return false;
|
|
+ return true;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Return true iff every worker is in the WRKR_IDLE state.
|
|
+ */
|
|
+bool
|
|
+IsEveryWorkerIdle(ParallelState *pstate)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < pstate->numWorkers; i++)
|
|
+ if (pstate->parallelSlot[i].workerStatus != WRKR_IDLE)
|
|
+ return false;
|
|
+ return true;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * ---------------------------------------------------------------------
|
|
+ * One danger of the parallel backup is a possible deadlock:
|
|
+ *
|
|
+ * 1) Master dumps the schema and locks all tables in ACCESS SHARE mode.
|
|
+ * 2) Another process requests an ACCESS EXCLUSIVE lock (which is not granted
|
|
+ * because the master holds a conflicting ACCESS SHARE lock).
|
|
+ * 3) The worker process also requests an ACCESS SHARE lock to read the table.
|
|
+ * The worker's not granted that lock but is enqueued behind the ACCESS
|
|
+ * EXCLUSIVE lock request.
|
|
+ * ---------------------------------------------------------------------
|
|
+ *
|
|
+ * Now what we do here is to just request a lock in ACCESS SHARE but with
|
|
+ * NOWAIT in the worker prior to touching the table. If we don't get the lock,
|
|
+ * then we know that somebody else has requested an ACCESS EXCLUSIVE lock and
|
|
+ * are good to just fail the whole backup because we have detected a deadlock.
|
|
+ */
|
|
+static void
|
|
+lockTableNoWait(ArchiveHandle *AH, TocEntry *te)
|
|
+{
|
|
+ Archive *AHX = (Archive *) AH;
|
|
+ const char *qualId;
|
|
+ PQExpBuffer query = createPQExpBuffer();
|
|
+ PGresult *res;
|
|
+
|
|
+ Assert(AH->format == archDirectory);
|
|
+ Assert(strcmp(te->desc, "BLOBS") != 0);
|
|
+
|
|
+ appendPQExpBuffer(query,
|
|
+ "SELECT pg_namespace.nspname,"
|
|
+ " pg_class.relname "
|
|
+ " FROM pg_class "
|
|
+ " JOIN pg_namespace on pg_namespace.oid = relnamespace "
|
|
+ " WHERE pg_class.oid = %u", te->catalogId.oid);
|
|
+
|
|
+ res = PQexec(AH->connection, query->data);
|
|
+
|
|
+ if (!res || PQresultStatus(res) != PGRES_TUPLES_OK)
|
|
+ exit_horribly(modulename,
|
|
+ "could not get relation name for OID %u: %s\n",
|
|
+ te->catalogId.oid, PQerrorMessage(AH->connection));
|
|
+
|
|
+ resetPQExpBuffer(query);
|
|
+
|
|
+ qualId = fmtQualifiedId(AHX->remoteVersion,
|
|
+ PQgetvalue(res, 0, 0),
|
|
+ PQgetvalue(res, 0, 1));
|
|
+
|
|
+ appendPQExpBuffer(query, "LOCK TABLE %s IN ACCESS SHARE MODE NOWAIT",
|
|
+ qualId);
|
|
+ PQclear(res);
|
|
+
|
|
+ res = PQexec(AH->connection, query->data);
|
|
+
|
|
+ if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
|
|
+ exit_horribly(modulename,
|
|
+ "could not obtain lock on relation \"%s\"\n"
|
|
+ "This usually means that someone requested an ACCESS EXCLUSIVE lock "
|
|
+ "on the table after the pg_dump parent process had gotten the "
|
|
+ "initial ACCESS SHARE lock on the table.\n", qualId);
|
|
+
|
|
+ PQclear(res);
|
|
+ destroyPQExpBuffer(query);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * That's the main routine for the worker.
|
|
+ * When it starts up it enters this routine and waits for commands from the
|
|
+ * master process. After having processed a command it comes back to here to
|
|
+ * wait for the next command. Finally it will receive a TERMINATE command and
|
|
+ * exit.
|
|
+ */
|
|
+static void
|
|
+WaitForCommands(ArchiveHandle *AH, int pipefd[2])
|
|
+{
|
|
+ char *command;
|
|
+ DumpId dumpId;
|
|
+ int nBytes;
|
|
+ char *str = NULL;
|
|
+ TocEntry *te;
|
|
+
|
|
+ for (;;)
|
|
+ {
|
|
+ if (!(command = getMessageFromMaster(pipefd)))
|
|
+ {
|
|
+ PQfinish(AH->connection);
|
|
+ AH->connection = NULL;
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (messageStartsWith(command, "DUMP "))
|
|
+ {
|
|
+ Assert(AH->format == archDirectory);
|
|
+ sscanf(command + strlen("DUMP "), "%d%n", &dumpId, &nBytes);
|
|
+ Assert(nBytes == strlen(command) - strlen("DUMP "));
|
|
+
|
|
+ te = getTocEntryByDumpId(AH, dumpId);
|
|
+ Assert(te != NULL);
|
|
+
|
|
+ /*
|
|
+ * Lock the table but with NOWAIT. Note that the parent is already
|
|
+ * holding a lock. If we cannot acquire another ACCESS SHARE MODE
|
|
+ * lock, then somebody else has requested an exclusive lock in the
|
|
+ * meantime. lockTableNoWait dies in this case to prevent a
|
|
+ * deadlock.
|
|
+ */
|
|
+ if (strcmp(te->desc, "BLOBS") != 0)
|
|
+ lockTableNoWait(AH, te);
|
|
+
|
|
+ /*
|
|
+ * The message we return here has been pg_malloc()ed and we are
|
|
+ * responsible for free()ing it.
|
|
+ */
|
|
+ str = (AH->WorkerJobDumpPtr) (AH, te);
|
|
+ Assert(AH->connection != NULL);
|
|
+ sendMessageToMaster(pipefd, str);
|
|
+ free(str);
|
|
+ }
|
|
+ else if (messageStartsWith(command, "RESTORE "))
|
|
+ {
|
|
+ Assert(AH->format == archDirectory || AH->format == archCustom);
|
|
+ Assert(AH->connection != NULL);
|
|
+
|
|
+ sscanf(command + strlen("RESTORE "), "%d%n", &dumpId, &nBytes);
|
|
+ Assert(nBytes == strlen(command) - strlen("RESTORE "));
|
|
+
|
|
+ te = getTocEntryByDumpId(AH, dumpId);
|
|
+ Assert(te != NULL);
|
|
+
|
|
+ /*
|
|
+ * The message we return here has been pg_malloc()ed and we are
|
|
+ * responsible for free()ing it.
|
|
+ */
|
|
+ str = (AH->WorkerJobRestorePtr) (AH, te);
|
|
+ Assert(AH->connection != NULL);
|
|
+ sendMessageToMaster(pipefd, str);
|
|
+ free(str);
|
|
+ }
|
|
+ else
|
|
+ exit_horribly(modulename,
|
|
+ "unrecognized command on communication channel: %s\n",
|
|
+ command);
|
|
+
|
|
+ /* command was pg_malloc'd and we are responsible for free()ing it. */
|
|
+ free(command);
|
|
+ }
|
|
+}
|
|
+
|
|
+/*
|
|
+ * ---------------------------------------------------------------------
|
|
+ * Note the status change:
|
|
+ *
|
|
+ * DispatchJobForTocEntry WRKR_IDLE -> WRKR_WORKING
|
|
+ * ListenToWorkers WRKR_WORKING -> WRKR_FINISHED / WRKR_TERMINATED
|
|
+ * ReapWorkerStatus WRKR_FINISHED -> WRKR_IDLE
|
|
+ * ---------------------------------------------------------------------
|
|
+ *
|
|
+ * Just calling ReapWorkerStatus() when all workers are working might or might
|
|
+ * not give you an idle worker because you need to call ListenToWorkers() in
|
|
+ * between and only thereafter ReapWorkerStatus(). This is necessary in order
|
|
+ * to get and deal with the status (=result) of the worker's execution.
|
|
+ */
|
|
+void
|
|
+ListenToWorkers(ArchiveHandle *AH, ParallelState *pstate, bool do_wait)
|
|
+{
|
|
+ int worker;
|
|
+ char *msg;
|
|
+
|
|
+ msg = getMessageFromWorker(pstate, do_wait, &worker);
|
|
+
|
|
+ if (!msg)
|
|
+ {
|
|
+ if (do_wait)
|
|
+ exit_horribly(modulename, "a worker process died unexpectedly\n");
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (messageStartsWith(msg, "OK "))
|
|
+ {
|
|
+ char *statusString;
|
|
+ TocEntry *te;
|
|
+
|
|
+ pstate->parallelSlot[worker].workerStatus = WRKR_FINISHED;
|
|
+ te = pstate->parallelSlot[worker].args->te;
|
|
+ if (messageStartsWith(msg, "OK RESTORE "))
|
|
+ {
|
|
+ statusString = msg + strlen("OK RESTORE ");
|
|
+ pstate->parallelSlot[worker].status =
|
|
+ (AH->MasterEndParallelItemPtr)
|
|
+ (AH, te, statusString, ACT_RESTORE);
|
|
+ }
|
|
+ else if (messageStartsWith(msg, "OK DUMP "))
|
|
+ {
|
|
+ statusString = msg + strlen("OK DUMP ");
|
|
+ pstate->parallelSlot[worker].status =
|
|
+ (AH->MasterEndParallelItemPtr)
|
|
+ (AH, te, statusString, ACT_DUMP);
|
|
+ }
|
|
+ else
|
|
+ exit_horribly(modulename,
|
|
+ "invalid message received from worker: %s\n", msg);
|
|
+ }
|
|
+ else if (messageStartsWith(msg, "ERROR "))
|
|
+ {
|
|
+ Assert(AH->format == archDirectory || AH->format == archCustom);
|
|
+ pstate->parallelSlot[worker].workerStatus = WRKR_TERMINATED;
|
|
+ exit_horribly(modulename, "%s", msg + strlen("ERROR "));
|
|
+ }
|
|
+ else
|
|
+ exit_horribly(modulename, "invalid message received from worker: %s\n", msg);
|
|
+
|
|
+ /* both Unix and Win32 return pg_malloc()ed space, so we free it */
|
|
+ free(msg);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * This function is executed in the master process.
|
|
+ *
|
|
+ * This function is used to get the return value of a terminated worker
|
|
+ * process. If a process has terminated, its status is stored in *status and
|
|
+ * the id of the worker is returned.
|
|
+ */
|
|
+int
|
|
+ReapWorkerStatus(ParallelState *pstate, int *status)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < pstate->numWorkers; i++)
|
|
+ {
|
|
+ if (pstate->parallelSlot[i].workerStatus == WRKR_FINISHED)
|
|
+ {
|
|
+ *status = pstate->parallelSlot[i].status;
|
|
+ pstate->parallelSlot[i].status = 0;
|
|
+ pstate->parallelSlot[i].workerStatus = WRKR_IDLE;
|
|
+ return i;
|
|
+ }
|
|
+ }
|
|
+ return NO_SLOT;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * This function is executed in the master process.
|
|
+ *
|
|
+ * It looks for an idle worker process and only returns if there is one.
|
|
+ */
|
|
+void
|
|
+EnsureIdleWorker(ArchiveHandle *AH, ParallelState *pstate)
|
|
+{
|
|
+ int ret_worker;
|
|
+ int work_status;
|
|
+
|
|
+ for (;;)
|
|
+ {
|
|
+ int nTerm = 0;
|
|
+
|
|
+ while ((ret_worker = ReapWorkerStatus(pstate, &work_status)) != NO_SLOT)
|
|
+ {
|
|
+ if (work_status != 0)
|
|
+ exit_horribly(modulename, "error processing a parallel work item\n");
|
|
+
|
|
+ nTerm++;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * We need to make sure that we have an idle worker before dispatching
|
|
+ * the next item. If nTerm > 0 we already have that (quick check).
|
|
+ */
|
|
+ if (nTerm > 0)
|
|
+ return;
|
|
+
|
|
+ /* explicit check for an idle worker */
|
|
+ if (GetIdleWorker(pstate) != NO_SLOT)
|
|
+ return;
|
|
+
|
|
+ /*
|
|
+ * If we have no idle worker, read the result of one or more workers
|
|
+ * and loop the loop to call ReapWorkerStatus() on them
|
|
+ */
|
|
+ ListenToWorkers(AH, pstate, true);
|
|
+ }
|
|
+}
|
|
+
|
|
+/*
|
|
+ * This function is executed in the master process.
|
|
+ *
|
|
+ * It waits for all workers to terminate.
|
|
+ */
|
|
+void
|
|
+EnsureWorkersFinished(ArchiveHandle *AH, ParallelState *pstate)
|
|
+{
|
|
+ int work_status;
|
|
+
|
|
+ if (!pstate || pstate->numWorkers == 1)
|
|
+ return;
|
|
+
|
|
+ /* Waiting for the remaining worker processes to finish */
|
|
+ while (!IsEveryWorkerIdle(pstate))
|
|
+ {
|
|
+ if (ReapWorkerStatus(pstate, &work_status) == NO_SLOT)
|
|
+ ListenToWorkers(AH, pstate, true);
|
|
+ else if (work_status != 0)
|
|
+ exit_horribly(modulename,
|
|
+ "error processing a parallel work item\n");
|
|
+ }
|
|
+}
|
|
+
|
|
+/*
|
|
+ * This function is executed in the worker process.
|
|
+ *
|
|
+ * It returns the next message on the communication channel, blocking until it
|
|
+ * becomes available.
|
|
+ */
|
|
+static char *
|
|
+getMessageFromMaster(int pipefd[2])
|
|
+{
|
|
+ return readMessageFromPipe(pipefd[PIPE_READ]);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * This function is executed in the worker process.
|
|
+ *
|
|
+ * It sends a message to the master on the communication channel.
|
|
+ */
|
|
+static void
|
|
+sendMessageToMaster(int pipefd[2], const char *str)
|
|
+{
|
|
+ int len = strlen(str) + 1;
|
|
+
|
|
+ if (pipewrite(pipefd[PIPE_WRITE], str, len) != len)
|
|
+ exit_horribly(modulename,
|
|
+ "could not write to the communication channel: %s\n",
|
|
+ strerror(errno));
|
|
+}
|
|
+
|
|
+/*
|
|
+ * A select loop that repeats calling select until a descriptor in the read
|
|
+ * set becomes readable. On Windows we have to check for the termination event
|
|
+ * from time to time, on Unix we can just block forever.
|
|
+ */
|
|
+static int
|
|
+select_loop(int maxFd, fd_set *workerset)
|
|
+{
|
|
+ int i;
|
|
+ fd_set saveSet = *workerset;
|
|
+
|
|
+#ifdef WIN32
|
|
+ /* should always be the master */
|
|
+ Assert(tMasterThreadId == GetCurrentThreadId());
|
|
+
|
|
+ for (;;)
|
|
+ {
|
|
+ /*
|
|
+ * sleep a quarter of a second before checking if we should terminate.
|
|
+ */
|
|
+ struct timeval tv = {0, 250000};
|
|
+
|
|
+ *workerset = saveSet;
|
|
+ i = select(maxFd + 1, workerset, NULL, NULL, &tv);
|
|
+
|
|
+ if (i == SOCKET_ERROR && WSAGetLastError() == WSAEINTR)
|
|
+ continue;
|
|
+ if (i)
|
|
+ break;
|
|
+ }
|
|
+#else /* UNIX */
|
|
+
|
|
+ for (;;)
|
|
+ {
|
|
+ *workerset = saveSet;
|
|
+ i = select(maxFd + 1, workerset, NULL, NULL, NULL);
|
|
+
|
|
+ /*
|
|
+ * If we Ctrl-C the master process , it's likely that we interrupt
|
|
+ * select() here. The signal handler will set wantAbort == true and
|
|
+ * the shutdown journey starts from here. Note that we'll come back
|
|
+ * here later when we tell all workers to terminate and read their
|
|
+ * responses. But then we have aborting set to true.
|
|
+ */
|
|
+ if (wantAbort && !aborting)
|
|
+ exit_horribly(modulename, "terminated by user\n");
|
|
+
|
|
+ if (i < 0 && errno == EINTR)
|
|
+ continue;
|
|
+ break;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ return i;
|
|
+}
|
|
+
|
|
+
|
|
+/*
|
|
+ * This function is executed in the master process.
|
|
+ *
|
|
+ * It returns the next message from the worker on the communication channel,
|
|
+ * optionally blocking (do_wait) until it becomes available.
|
|
+ *
|
|
+ * The id of the worker is returned in *worker.
|
|
+ */
|
|
+static char *
|
|
+getMessageFromWorker(ParallelState *pstate, bool do_wait, int *worker)
|
|
+{
|
|
+ int i;
|
|
+ fd_set workerset;
|
|
+ int maxFd = -1;
|
|
+ struct timeval nowait = {0, 0};
|
|
+
|
|
+ FD_ZERO(&workerset);
|
|
+
|
|
+ for (i = 0; i < pstate->numWorkers; i++)
|
|
+ {
|
|
+ if (pstate->parallelSlot[i].workerStatus == WRKR_TERMINATED)
|
|
+ continue;
|
|
+ FD_SET(pstate->parallelSlot[i].pipeRead, &workerset);
|
|
+ /* actually WIN32 ignores the first parameter to select()... */
|
|
+ if (pstate->parallelSlot[i].pipeRead > maxFd)
|
|
+ maxFd = pstate->parallelSlot[i].pipeRead;
|
|
+ }
|
|
+
|
|
+ if (do_wait)
|
|
+ {
|
|
+ i = select_loop(maxFd, &workerset);
|
|
+ Assert(i != 0);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ if ((i = select(maxFd + 1, &workerset, NULL, NULL, &nowait)) == 0)
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ if (i < 0)
|
|
+ exit_horribly(modulename, "error in ListenToWorkers(): %s\n", strerror(errno));
|
|
+
|
|
+ for (i = 0; i < pstate->numWorkers; i++)
|
|
+ {
|
|
+ char *msg;
|
|
+
|
|
+ if (!FD_ISSET(pstate->parallelSlot[i].pipeRead, &workerset))
|
|
+ continue;
|
|
+
|
|
+ msg = readMessageFromPipe(pstate->parallelSlot[i].pipeRead);
|
|
+ *worker = i;
|
|
+ return msg;
|
|
+ }
|
|
+ Assert(false);
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * This function is executed in the master process.
|
|
+ *
|
|
+ * It sends a message to a certain worker on the communication channel.
|
|
+ */
|
|
+static void
|
|
+sendMessageToWorker(ParallelState *pstate, int worker, const char *str)
|
|
+{
|
|
+ int len = strlen(str) + 1;
|
|
+
|
|
+ if (pipewrite(pstate->parallelSlot[worker].pipeWrite, str, len) != len)
|
|
+ {
|
|
+ /*
|
|
+ * If we're already aborting anyway, don't care if we succeed or not.
|
|
+ * The child might have gone already.
|
|
+ */
|
|
+#ifndef WIN32
|
|
+ if (!aborting)
|
|
+#endif
|
|
+ exit_horribly(modulename,
|
|
+ "could not write to the communication channel: %s\n",
|
|
+ strerror(errno));
|
|
+ }
|
|
+}
|
|
+
|
|
+/*
|
|
+ * The underlying function to read a message from the communication channel
|
|
+ * (fd) with optional blocking (do_wait).
|
|
+ */
|
|
+static char *
|
|
+readMessageFromPipe(int fd)
|
|
+{
|
|
+ char *msg;
|
|
+ int msgsize,
|
|
+ bufsize;
|
|
+ int ret;
|
|
+
|
|
+ /*
|
|
+ * The problem here is that we need to deal with several possibilites: we
|
|
+ * could receive only a partial message or several messages at once. The
|
|
+ * caller expects us to return exactly one message however.
|
|
+ *
|
|
+ * We could either read in as much as we can and keep track of what we
|
|
+ * delivered back to the caller or we just read byte by byte. Once we see
|
|
+ * (char) 0, we know that it's the message's end. This would be quite
|
|
+ * inefficient for more data but since we are reading only on the command
|
|
+ * channel, the performance loss does not seem worth the trouble of
|
|
+ * keeping internal states for different file descriptors.
|
|
+ */
|
|
+ bufsize = 64; /* could be any number */
|
|
+ msg = (char *) pg_malloc(bufsize);
|
|
+
|
|
+ msgsize = 0;
|
|
+ for (;;)
|
|
+ {
|
|
+ Assert(msgsize <= bufsize);
|
|
+ ret = piperead(fd, msg + msgsize, 1);
|
|
+
|
|
+ /* worker has closed the connection or another error happened */
|
|
+ if (ret <= 0)
|
|
+ break;
|
|
+
|
|
+ Assert(ret == 1);
|
|
+
|
|
+ if (msg[msgsize] == '\0')
|
|
+ return msg;
|
|
+
|
|
+ msgsize++;
|
|
+ if (msgsize == bufsize)
|
|
+ {
|
|
+ /* could be any number */
|
|
+ bufsize += 16;
|
|
+ msg = (char *) pg_realloc(msg, bufsize);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Worker has closed the connection, make sure to clean up before return
|
|
+ * since we are not returning msg (but did allocate it).
|
|
+ */
|
|
+ pg_free(msg);
|
|
+
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+#ifdef WIN32
|
|
+/*
|
|
+ * This is a replacement version of pipe for Win32 which allows returned
|
|
+ * handles to be used in select(). Note that read/write calls must be replaced
|
|
+ * with recv/send. "handles" have to be integers so we check for errors then
|
|
+ * cast to integers.
|
|
+ */
|
|
+static int
|
|
+pgpipe(int handles[2])
|
|
+{
|
|
+ pgsocket s, tmp_sock;
|
|
+ struct sockaddr_in serv_addr;
|
|
+ int len = sizeof(serv_addr);
|
|
+
|
|
+ /* We have to use the Unix socket invalid file descriptor value here. */
|
|
+ handles[0] = handles[1] = -1;
|
|
+
|
|
+ /*
|
|
+ * setup listen socket
|
|
+ */
|
|
+ if ((s = socket(AF_INET, SOCK_STREAM, 0)) == PGINVALID_SOCKET)
|
|
+ {
|
|
+ write_msg(modulename, "pgpipe: could not create socket: error code %d\n",
|
|
+ WSAGetLastError());
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ memset((void *) &serv_addr, 0, sizeof(serv_addr));
|
|
+ serv_addr.sin_family = AF_INET;
|
|
+ serv_addr.sin_port = htons(0);
|
|
+ serv_addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
|
|
+ if (bind(s, (SOCKADDR *) &serv_addr, len) == SOCKET_ERROR)
|
|
+ {
|
|
+ write_msg(modulename, "pgpipe: could not bind: error code %d\n",
|
|
+ WSAGetLastError());
|
|
+ closesocket(s);
|
|
+ return -1;
|
|
+ }
|
|
+ if (listen(s, 1) == SOCKET_ERROR)
|
|
+ {
|
|
+ write_msg(modulename, "pgpipe: could not listen: error code %d\n",
|
|
+ WSAGetLastError());
|
|
+ closesocket(s);
|
|
+ return -1;
|
|
+ }
|
|
+ if (getsockname(s, (SOCKADDR *) &serv_addr, &len) == SOCKET_ERROR)
|
|
+ {
|
|
+ write_msg(modulename, "pgpipe: getsockname() failed: error code %d\n",
|
|
+ WSAGetLastError());
|
|
+ closesocket(s);
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * setup pipe handles
|
|
+ */
|
|
+ if ((tmp_sock = socket(AF_INET, SOCK_STREAM, 0)) == PGINVALID_SOCKET)
|
|
+ {
|
|
+ write_msg(modulename, "pgpipe: could not create second socket: error code %d\n",
|
|
+ WSAGetLastError());
|
|
+ closesocket(s);
|
|
+ return -1;
|
|
+ }
|
|
+ handles[1] = (int) tmp_sock;
|
|
+
|
|
+ if (connect(handles[1], (SOCKADDR *) &serv_addr, len) == SOCKET_ERROR)
|
|
+ {
|
|
+ write_msg(modulename, "pgpipe: could not connect socket: error code %d\n",
|
|
+ WSAGetLastError());
|
|
+ closesocket(s);
|
|
+ return -1;
|
|
+ }
|
|
+ if ((tmp_sock = accept(s, (SOCKADDR *) &serv_addr, &len)) == PGINVALID_SOCKET)
|
|
+ {
|
|
+ write_msg(modulename, "pgpipe: could not accept connection: error code %d\n",
|
|
+ WSAGetLastError());
|
|
+ closesocket(handles[1]);
|
|
+ handles[1] = -1;
|
|
+ closesocket(s);
|
|
+ return -1;
|
|
+ }
|
|
+ handles[0] = (int) tmp_sock;
|
|
+
|
|
+ closesocket(s);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int
|
|
+piperead(int s, char *buf, int len)
|
|
+{
|
|
+ int ret = recv(s, buf, len, 0);
|
|
+
|
|
+ if (ret < 0 && WSAGetLastError() == WSAECONNRESET)
|
|
+ /* EOF on the pipe! (win32 socket based implementation) */
|
|
+ ret = 0;
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+#endif
|
|
--- /dev/null
|
|
+++ pglogical-2.2.2/pglogical_dump/parallel.h
|
|
@@ -0,0 +1,95 @@
|
|
+/*-------------------------------------------------------------------------
|
|
+ *
|
|
+ * parallel.h
|
|
+ *
|
|
+ * Parallel support header file for the pg_dump archiver
|
|
+ *
|
|
+ * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
|
|
+ * Portions Copyright (c) 1994, Regents of the University of California
|
|
+ *
|
|
+ * The author is not responsible for loss or damages that may
|
|
+ * result from its use.
|
|
+ *
|
|
+ * IDENTIFICATION
|
|
+ * src/bin/pg_dump/parallel.h
|
|
+ *
|
|
+ *-------------------------------------------------------------------------
|
|
+ */
|
|
+
|
|
+#ifndef PG_DUMP_PARALLEL_H
|
|
+#define PG_DUMP_PARALLEL_H
|
|
+
|
|
+#include "pg_backup_db.h"
|
|
+
|
|
+struct _archiveHandle;
|
|
+struct _tocEntry;
|
|
+
|
|
+typedef enum
|
|
+{
|
|
+ WRKR_TERMINATED = 0,
|
|
+ WRKR_IDLE,
|
|
+ WRKR_WORKING,
|
|
+ WRKR_FINISHED
|
|
+} T_WorkerStatus;
|
|
+
|
|
+/* Arguments needed for a worker process */
|
|
+typedef struct ParallelArgs
|
|
+{
|
|
+ struct _archiveHandle *AH;
|
|
+ struct _tocEntry *te;
|
|
+} ParallelArgs;
|
|
+
|
|
+/* State for each parallel activity slot */
|
|
+typedef struct ParallelSlot
|
|
+{
|
|
+ ParallelArgs *args;
|
|
+ T_WorkerStatus workerStatus;
|
|
+ int status;
|
|
+ int pipeRead;
|
|
+ int pipeWrite;
|
|
+ int pipeRevRead;
|
|
+ int pipeRevWrite;
|
|
+#ifdef WIN32
|
|
+ uintptr_t hThread;
|
|
+ unsigned int threadId;
|
|
+#else
|
|
+ pid_t pid;
|
|
+#endif
|
|
+} ParallelSlot;
|
|
+
|
|
+#define NO_SLOT (-1)
|
|
+
|
|
+typedef struct ParallelState
|
|
+{
|
|
+ int numWorkers;
|
|
+ ParallelSlot *parallelSlot;
|
|
+} ParallelState;
|
|
+
|
|
+#ifdef WIN32
|
|
+extern bool parallel_init_done;
|
|
+extern DWORD mainThreadId;
|
|
+#endif
|
|
+
|
|
+extern void init_parallel_dump_utils(void);
|
|
+
|
|
+extern int GetIdleWorker(ParallelState *pstate);
|
|
+extern bool IsEveryWorkerIdle(ParallelState *pstate);
|
|
+extern void ListenToWorkers(struct _archiveHandle * AH, ParallelState *pstate, bool do_wait);
|
|
+extern int ReapWorkerStatus(ParallelState *pstate, int *status);
|
|
+extern void EnsureIdleWorker(struct _archiveHandle * AH, ParallelState *pstate);
|
|
+extern void EnsureWorkersFinished(struct _archiveHandle * AH, ParallelState *pstate);
|
|
+
|
|
+extern ParallelState *ParallelBackupStart(struct _archiveHandle * AH,
|
|
+ RestoreOptions *ropt);
|
|
+extern void DispatchJobForTocEntry(struct _archiveHandle * AH,
|
|
+ ParallelState *pstate,
|
|
+ struct _tocEntry * te, T_Action act);
|
|
+extern void ParallelBackupEnd(struct _archiveHandle * AH, ParallelState *pstate);
|
|
+
|
|
+extern void checkAborting(struct _archiveHandle * AH);
|
|
+
|
|
+extern void
|
|
+exit_horribly(const char *modulename, const char *fmt,...)
|
|
+__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3), noreturn));
|
|
+
|
|
+#endif /* PG_DUMP_PARALLEL_H */
|
|
--- /dev/null
|
|
+++ pglogical-2.2.2/pglogical_dump/pg_backup.h
|
|
@@ -0,0 +1,220 @@
|
|
+/*-------------------------------------------------------------------------
|
|
+ *
|
|
+ * pg_backup.h
|
|
+ *
|
|
+ * Public interface to the pg_dump archiver routines.
|
|
+ *
|
|
+ * See the headers to pg_restore for more details.
|
|
+ *
|
|
+ * Copyright (c) 2000, Philip Warner
|
|
+ * Rights are granted to use this software in any way so long
|
|
+ * as this notice is not removed.
|
|
+ *
|
|
+ * The author is not responsible for loss or damages that may
|
|
+ * result from it's use.
|
|
+ *
|
|
+ *
|
|
+ * IDENTIFICATION
|
|
+ * src/bin/pg_dump/pg_backup.h
|
|
+ *
|
|
+ *-------------------------------------------------------------------------
|
|
+ */
|
|
+
|
|
+#ifndef PG_BACKUP_H
|
|
+#define PG_BACKUP_H
|
|
+
|
|
+#include "postgres_fe.h"
|
|
+
|
|
+#include "pg_dump.h"
|
|
+#include "dumputils.h"
|
|
+
|
|
+#include "libpq-fe.h"
|
|
+
|
|
+
|
|
+#define atooid(x) ((Oid) strtoul((x), NULL, 10))
|
|
+#define oidcmp(x,y) ( ((x) < (y) ? -1 : ((x) > (y)) ? 1 : 0) )
|
|
+#define oideq(x,y) ( (x) == (y) )
|
|
+#define oidle(x,y) ( (x) <= (y) )
|
|
+#define oidge(x,y) ( (x) >= (y) )
|
|
+#define oidzero(x) ( (x) == 0 )
|
|
+
|
|
+enum trivalue
|
|
+{
|
|
+ TRI_DEFAULT,
|
|
+ TRI_NO,
|
|
+ TRI_YES
|
|
+};
|
|
+
|
|
+typedef enum _archiveFormat
|
|
+{
|
|
+ archUnknown = 0,
|
|
+ archCustom = 1,
|
|
+ archTar = 3,
|
|
+ archNull = 4,
|
|
+ archDirectory = 5
|
|
+} ArchiveFormat;
|
|
+
|
|
+typedef enum _archiveMode
|
|
+{
|
|
+ archModeAppend,
|
|
+ archModeWrite,
|
|
+ archModeRead
|
|
+} ArchiveMode;
|
|
+
|
|
+typedef enum _teSection
|
|
+{
|
|
+ SECTION_NONE = 1, /* COMMENTs, ACLs, etc; can be anywhere */
|
|
+ SECTION_PRE_DATA, /* stuff to be processed before data */
|
|
+ SECTION_DATA, /* TABLE DATA, BLOBS, BLOB COMMENTS */
|
|
+ SECTION_POST_DATA /* stuff to be processed after data */
|
|
+} teSection;
|
|
+
|
|
+/*
|
|
+ * We may want to have some more user-readable data, but in the mean
|
|
+ * time this gives us some abstraction and type checking.
|
|
+ */
|
|
+struct Archive
|
|
+{
|
|
+ int verbose;
|
|
+ char *remoteVersionStr; /* server's version string */
|
|
+ int remoteVersion; /* same in numeric form */
|
|
+
|
|
+ int minRemoteVersion; /* allowable range */
|
|
+ int maxRemoteVersion;
|
|
+
|
|
+ int numWorkers; /* number of parallel processes */
|
|
+ char *sync_snapshot_id; /* sync snapshot id for parallel
|
|
+ * operation */
|
|
+
|
|
+ /* info needed for string escaping */
|
|
+ int encoding; /* libpq code for client_encoding */
|
|
+ bool std_strings; /* standard_conforming_strings */
|
|
+ char *use_role; /* Issue SET ROLE to this */
|
|
+
|
|
+ /* error handling */
|
|
+ bool exit_on_error; /* whether to exit on SQL errors... */
|
|
+ int n_errors; /* number of errors (if no die) */
|
|
+
|
|
+ /* The rest is private */
|
|
+};
|
|
+
|
|
+typedef int (*DataDumperPtr) (Archive *AH, void *userArg);
|
|
+
|
|
+typedef struct _restoreOptions
|
|
+{
|
|
+ int createDB; /* Issue commands to create the database */
|
|
+ int noOwner; /* Don't try to match original object owner */
|
|
+ int noTablespace; /* Don't issue tablespace-related commands */
|
|
+ int disable_triggers; /* disable triggers during data-only
|
|
+ * restore */
|
|
+ int use_setsessauth;/* Use SET SESSION AUTHORIZATION commands
|
|
+ * instead of OWNER TO */
|
|
+ int no_security_labels; /* Skip security label entries */
|
|
+ char *superuser; /* Username to use as superuser */
|
|
+ char *use_role; /* Issue SET ROLE to this */
|
|
+ int dropSchema;
|
|
+ int if_exists;
|
|
+ const char *filename;
|
|
+ int dataOnly;
|
|
+ int schemaOnly;
|
|
+ int dumpSections;
|
|
+ int verbose;
|
|
+ int aclsSkip;
|
|
+ int tocSummary;
|
|
+ char *tocFile;
|
|
+ int format;
|
|
+ char *formatName;
|
|
+
|
|
+ int selTypes;
|
|
+ int selIndex;
|
|
+ int selFunction;
|
|
+ int selTrigger;
|
|
+ int selTable;
|
|
+ SimpleStringList indexNames;
|
|
+ SimpleStringList functionNames;
|
|
+ SimpleStringList schemaNames;
|
|
+ SimpleStringList triggerNames;
|
|
+ SimpleStringList tableNames;
|
|
+
|
|
+ int useDB;
|
|
+ char *dbname;
|
|
+ char *pgport;
|
|
+ char *pghost;
|
|
+ char *username;
|
|
+ int noDataForFailedTables;
|
|
+ enum trivalue promptPassword;
|
|
+ int exit_on_error;
|
|
+ int compression;
|
|
+ int suppressDumpWarnings; /* Suppress output of WARNING entries
|
|
+ * to stderr */
|
|
+ bool single_txn;
|
|
+
|
|
+ bool *idWanted; /* array showing which dump IDs to emit */
|
|
+} RestoreOptions;
|
|
+
|
|
+typedef void (*SetupWorkerPtr) (Archive *AH, RestoreOptions *ropt);
|
|
+
|
|
+/*
|
|
+ * Main archiver interface.
|
|
+ */
|
|
+
|
|
+extern void ConnectDatabase(Archive *AH,
|
|
+ const char *dbname,
|
|
+ const char *pghost,
|
|
+ const char *pgport,
|
|
+ const char *username,
|
|
+ enum trivalue prompt_password);
|
|
+extern void DisconnectDatabase(Archive *AHX);
|
|
+extern PGconn *GetConnection(Archive *AHX);
|
|
+
|
|
+/* Called to add a TOC entry */
|
|
+extern void ArchiveEntry(Archive *AHX,
|
|
+ CatalogId catalogId, DumpId dumpId,
|
|
+ const char *tag,
|
|
+ const char *namespace, const char *tablespace,
|
|
+ const char *owner, bool withOids,
|
|
+ const char *desc, teSection section,
|
|
+ const char *defn,
|
|
+ const char *dropStmt, const char *copyStmt,
|
|
+ const DumpId *deps, int nDeps,
|
|
+ DataDumperPtr dumpFn, void *dumpArg);
|
|
+
|
|
+/* Called to write *data* to the archive */
|
|
+extern void WriteData(Archive *AH, const void *data, size_t dLen);
|
|
+
|
|
+extern int StartBlob(Archive *AH, Oid oid);
|
|
+extern int EndBlob(Archive *AH, Oid oid);
|
|
+
|
|
+extern void CloseArchive(Archive *AH);
|
|
+
|
|
+extern void SetArchiveRestoreOptions(Archive *AH, RestoreOptions *ropt);
|
|
+
|
|
+extern void RestoreArchive(Archive *AH);
|
|
+
|
|
+/* Open an existing archive */
|
|
+extern Archive *OpenArchive(const char *FileSpec, const ArchiveFormat fmt);
|
|
+
|
|
+/* Create a new archive */
|
|
+extern Archive *CreateArchive(const char *FileSpec, const ArchiveFormat fmt,
|
|
+ const int compression, ArchiveMode mode,
|
|
+ SetupWorkerPtr setupDumpWorker);
|
|
+
|
|
+/* The --list option */
|
|
+extern void PrintTOCSummary(Archive *AH, RestoreOptions *ropt);
|
|
+
|
|
+extern RestoreOptions *NewRestoreOptions(void);
|
|
+
|
|
+/* Rearrange and filter TOC entries */
|
|
+extern void SortTocFromFile(Archive *AHX, RestoreOptions *ropt);
|
|
+
|
|
+/* Convenience functions used only when writing DATA */
|
|
+extern void archputs(const char *s, Archive *AH);
|
|
+extern int
|
|
+archprintf(Archive *AH, const char *fmt,...)
|
|
+/* This extension allows gcc to check the format string */
|
|
+__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
|
|
+
|
|
+#define appendStringLiteralAH(buf,str,AH) \
|
|
+ appendStringLiteral(buf, str, (AH)->encoding, (AH)->std_strings)
|
|
+
|
|
+#endif /* PG_BACKUP_H */
|
|
--- /dev/null
|
|
+++ pglogical-2.2.2/pglogical_dump/pg_backup_archiver.c
|
|
@@ -0,0 +1,4361 @@
|
|
+/*-------------------------------------------------------------------------
|
|
+ *
|
|
+ * pg_backup_archiver.c
|
|
+ *
|
|
+ * Private implementation of the archiver routines.
|
|
+ *
|
|
+ * See the headers to pg_restore for more details.
|
|
+ *
|
|
+ * Copyright (c) 2000, Philip Warner
|
|
+ * Rights are granted to use this software in any way so long
|
|
+ * as this notice is not removed.
|
|
+ *
|
|
+ * The author is not responsible for loss or damages that may
|
|
+ * result from its use.
|
|
+ *
|
|
+ *
|
|
+ * IDENTIFICATION
|
|
+ * src/bin/pg_dump/pg_backup_archiver.c
|
|
+ *
|
|
+ *-------------------------------------------------------------------------
|
|
+ */
|
|
+
|
|
+#include "pg_backup_db.h"
|
|
+#include "pg_backup_utils.h"
|
|
+#include "parallel.h"
|
|
+
|
|
+#include <ctype.h>
|
|
+#include <fcntl.h>
|
|
+#include <unistd.h>
|
|
+#include <sys/stat.h>
|
|
+#include <sys/types.h>
|
|
+#include <sys/wait.h>
|
|
+
|
|
+#ifdef WIN32
|
|
+#include <io.h>
|
|
+#endif
|
|
+
|
|
+#include "libpq/libpq-fs.h"
|
|
+
|
|
+#define TEXT_DUMP_HEADER "--\n-- PostgreSQL database dump\n--\n\n"
|
|
+#define TEXT_DUMPALL_HEADER "--\n-- PostgreSQL database cluster dump\n--\n\n"
|
|
+
|
|
+/* state needed to save/restore an archive's output target */
|
|
+typedef struct _outputContext
|
|
+{
|
|
+ void *OF;
|
|
+ int gzOut;
|
|
+} OutputContext;
|
|
+
|
|
+/* translator: this is a module name */
|
|
+static const char *modulename = gettext_noop("archiver");
|
|
+
|
|
+
|
|
+static ArchiveHandle *_allocAH(const char *FileSpec, const ArchiveFormat fmt,
|
|
+ const int compression, ArchiveMode mode, SetupWorkerPtr setupWorkerPtr);
|
|
+static void _getObjectDescription(PQExpBuffer buf, TocEntry *te,
|
|
+ ArchiveHandle *AH);
|
|
+static void _printTocEntry(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt, bool isData, bool acl_pass);
|
|
+static char *replace_line_endings(const char *str);
|
|
+static void _doSetFixedOutputState(ArchiveHandle *AH);
|
|
+static void _doSetSessionAuth(ArchiveHandle *AH, const char *user);
|
|
+static void _doSetWithOids(ArchiveHandle *AH, const bool withOids);
|
|
+static void _reconnectToDB(ArchiveHandle *AH, const char *dbname);
|
|
+static void _becomeUser(ArchiveHandle *AH, const char *user);
|
|
+static void _becomeOwner(ArchiveHandle *AH, TocEntry *te);
|
|
+static void _selectOutputSchema(ArchiveHandle *AH, const char *schemaName);
|
|
+static void _selectTablespace(ArchiveHandle *AH, const char *tablespace);
|
|
+static void processEncodingEntry(ArchiveHandle *AH, TocEntry *te);
|
|
+static void processStdStringsEntry(ArchiveHandle *AH, TocEntry *te);
|
|
+static teReqs _tocEntryRequired(TocEntry *te, teSection curSection, RestoreOptions *ropt);
|
|
+static bool _tocEntryIsACL(TocEntry *te);
|
|
+static void _disableTriggersIfNecessary(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt);
|
|
+static void _enableTriggersIfNecessary(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt);
|
|
+static void buildTocEntryArrays(ArchiveHandle *AH);
|
|
+static void _moveBefore(ArchiveHandle *AH, TocEntry *pos, TocEntry *te);
|
|
+static int _discoverArchiveFormat(ArchiveHandle *AH);
|
|
+
|
|
+static int RestoringToDB(ArchiveHandle *AH);
|
|
+static void dump_lo_buf(ArchiveHandle *AH);
|
|
+static void dumpTimestamp(ArchiveHandle *AH, const char *msg, time_t tim);
|
|
+static void SetOutput(ArchiveHandle *AH, const char *filename, int compression);
|
|
+static OutputContext SaveOutput(ArchiveHandle *AH);
|
|
+static void RestoreOutput(ArchiveHandle *AH, OutputContext savedContext);
|
|
+
|
|
+static int restore_toc_entry(ArchiveHandle *AH, TocEntry *te,
|
|
+ RestoreOptions *ropt, bool is_parallel);
|
|
+static void restore_toc_entries_prefork(ArchiveHandle *AH);
|
|
+static void restore_toc_entries_parallel(ArchiveHandle *AH, ParallelState *pstate,
|
|
+ TocEntry *pending_list);
|
|
+static void restore_toc_entries_postfork(ArchiveHandle *AH, TocEntry *pending_list);
|
|
+static void par_list_header_init(TocEntry *l);
|
|
+static void par_list_append(TocEntry *l, TocEntry *te);
|
|
+static void par_list_remove(TocEntry *te);
|
|
+static TocEntry *get_next_work_item(ArchiveHandle *AH,
|
|
+ TocEntry *ready_list,
|
|
+ ParallelState *pstate);
|
|
+static void mark_work_done(ArchiveHandle *AH, TocEntry *ready_list,
|
|
+ int worker, int status,
|
|
+ ParallelState *pstate);
|
|
+static void fix_dependencies(ArchiveHandle *AH);
|
|
+static bool has_lock_conflicts(TocEntry *te1, TocEntry *te2);
|
|
+static void repoint_table_dependencies(ArchiveHandle *AH);
|
|
+static void identify_locking_dependencies(ArchiveHandle *AH, TocEntry *te);
|
|
+static void reduce_dependencies(ArchiveHandle *AH, TocEntry *te,
|
|
+ TocEntry *ready_list);
|
|
+static void mark_create_done(ArchiveHandle *AH, TocEntry *te);
|
|
+static void inhibit_data_for_failed_table(ArchiveHandle *AH, TocEntry *te);
|
|
+
|
|
+/*
|
|
+ * Wrapper functions.
|
|
+ *
|
|
+ * The objective it to make writing new formats and dumpers as simple
|
|
+ * as possible, if necessary at the expense of extra function calls etc.
|
|
+ *
|
|
+ */
|
|
+
|
|
+/*
|
|
+ * The dump worker setup needs lots of knowledge of the internals of pg_dump,
|
|
+ * so It's defined in pg_dump.c and passed into OpenArchive. The restore worker
|
|
+ * setup doesn't need to know anything much, so it's defined here.
|
|
+ */
|
|
+static void
|
|
+setupRestoreWorker(Archive *AHX, RestoreOptions *ropt)
|
|
+{
|
|
+ ArchiveHandle *AH = (ArchiveHandle *) AHX;
|
|
+
|
|
+ (AH->ReopenPtr) (AH);
|
|
+}
|
|
+
|
|
+
|
|
+/* Create a new archive */
|
|
+/* Public */
|
|
+Archive *
|
|
+CreateArchive(const char *FileSpec, const ArchiveFormat fmt,
|
|
+ const int compression, ArchiveMode mode, SetupWorkerPtr setupDumpWorker)
|
|
+
|
|
+{
|
|
+ ArchiveHandle *AH = _allocAH(FileSpec, fmt, compression, mode, setupDumpWorker);
|
|
+
|
|
+ return (Archive *) AH;
|
|
+}
|
|
+
|
|
+/* Open an existing archive */
|
|
+/* Public */
|
|
+Archive *
|
|
+OpenArchive(const char *FileSpec, const ArchiveFormat fmt)
|
|
+{
|
|
+ ArchiveHandle *AH = _allocAH(FileSpec, fmt, 0, archModeRead, setupRestoreWorker);
|
|
+
|
|
+ return (Archive *) AH;
|
|
+}
|
|
+
|
|
+/* Public */
|
|
+void
|
|
+CloseArchive(Archive *AHX)
|
|
+{
|
|
+ int res = 0;
|
|
+ ArchiveHandle *AH = (ArchiveHandle *) AHX;
|
|
+
|
|
+ (*AH->ClosePtr) (AH);
|
|
+
|
|
+ /* Close the output */
|
|
+ if (AH->gzOut)
|
|
+ res = GZCLOSE(AH->OF);
|
|
+ else if (AH->OF != stdout)
|
|
+ res = fclose(AH->OF);
|
|
+
|
|
+ if (res != 0)
|
|
+ exit_horribly(modulename, "could not close output file: %s\n",
|
|
+ strerror(errno));
|
|
+}
|
|
+
|
|
+/* Public */
|
|
+void
|
|
+SetArchiveRestoreOptions(Archive *AHX, RestoreOptions *ropt)
|
|
+{
|
|
+ ArchiveHandle *AH = (ArchiveHandle *) AHX;
|
|
+ TocEntry *te;
|
|
+ teSection curSection;
|
|
+
|
|
+ /* Save options for later access */
|
|
+ AH->ropt = ropt;
|
|
+
|
|
+ /* Decide which TOC entries will be dumped/restored, and mark them */
|
|
+ curSection = SECTION_PRE_DATA;
|
|
+ for (te = AH->toc->next; te != AH->toc; te = te->next)
|
|
+ {
|
|
+ /*
|
|
+ * When writing an archive, we also take this opportunity to check
|
|
+ * that we have generated the entries in a sane order that respects
|
|
+ * the section divisions. When reading, don't complain, since buggy
|
|
+ * old versions of pg_dump might generate out-of-order archives.
|
|
+ */
|
|
+ if (AH->mode != archModeRead)
|
|
+ {
|
|
+ switch (te->section)
|
|
+ {
|
|
+ case SECTION_NONE:
|
|
+ /* ok to be anywhere */
|
|
+ break;
|
|
+ case SECTION_PRE_DATA:
|
|
+ if (curSection != SECTION_PRE_DATA)
|
|
+ write_msg(modulename,
|
|
+ "WARNING: archive items not in correct section order\n");
|
|
+ break;
|
|
+ case SECTION_DATA:
|
|
+ if (curSection == SECTION_POST_DATA)
|
|
+ write_msg(modulename,
|
|
+ "WARNING: archive items not in correct section order\n");
|
|
+ break;
|
|
+ case SECTION_POST_DATA:
|
|
+ /* ok no matter which section we were in */
|
|
+ break;
|
|
+ default:
|
|
+ exit_horribly(modulename, "unexpected section code %d\n",
|
|
+ (int) te->section);
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (te->section != SECTION_NONE)
|
|
+ curSection = te->section;
|
|
+
|
|
+ te->reqs = _tocEntryRequired(te, curSection, ropt);
|
|
+ }
|
|
+}
|
|
+
|
|
+/* Public */
|
|
+void
|
|
+RestoreArchive(Archive *AHX)
|
|
+{
|
|
+ ArchiveHandle *AH = (ArchiveHandle *) AHX;
|
|
+ RestoreOptions *ropt = AH->ropt;
|
|
+ bool parallel_mode;
|
|
+ TocEntry *te;
|
|
+ OutputContext sav;
|
|
+
|
|
+ AH->stage = STAGE_INITIALIZING;
|
|
+
|
|
+ /*
|
|
+ * Check for nonsensical option combinations.
|
|
+ *
|
|
+ * -C is not compatible with -1, because we can't create a database inside
|
|
+ * a transaction block.
|
|
+ */
|
|
+ if (ropt->createDB && ropt->single_txn)
|
|
+ exit_horribly(modulename, "-C and -1 are incompatible options\n");
|
|
+
|
|
+ /*
|
|
+ * If we're going to do parallel restore, there are some restrictions.
|
|
+ */
|
|
+ parallel_mode = (AH->public.numWorkers > 1 && ropt->useDB);
|
|
+ if (parallel_mode)
|
|
+ {
|
|
+ /* We haven't got round to making this work for all archive formats */
|
|
+ if (AH->ClonePtr == NULL || AH->ReopenPtr == NULL)
|
|
+ exit_horribly(modulename, "parallel restore is not supported with this archive file format\n");
|
|
+
|
|
+ /* Doesn't work if the archive represents dependencies as OIDs */
|
|
+ if (AH->version < K_VERS_1_8)
|
|
+ exit_horribly(modulename, "parallel restore is not supported with archives made by pre-8.0 pg_dump\n");
|
|
+
|
|
+ /*
|
|
+ * It's also not gonna work if we can't reopen the input file, so
|
|
+ * let's try that immediately.
|
|
+ */
|
|
+ (AH->ReopenPtr) (AH);
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Make sure we won't need (de)compression we haven't got
|
|
+ */
|
|
+#ifndef HAVE_LIBZ
|
|
+ if (AH->compression != 0 && AH->PrintTocDataPtr !=NULL)
|
|
+ {
|
|
+ for (te = AH->toc->next; te != AH->toc; te = te->next)
|
|
+ {
|
|
+ if (te->hadDumper && (te->reqs & REQ_DATA) != 0)
|
|
+ exit_horribly(modulename, "cannot restore from compressed archive (compression not supported in this installation)\n");
|
|
+ }
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ /*
|
|
+ * Prepare index arrays, so we can assume we have them throughout restore.
|
|
+ * It's possible we already did this, though.
|
|
+ */
|
|
+ if (AH->tocsByDumpId == NULL)
|
|
+ buildTocEntryArrays(AH);
|
|
+
|
|
+ /*
|
|
+ * If we're using a DB connection, then connect it.
|
|
+ */
|
|
+ if (ropt->useDB)
|
|
+ {
|
|
+ ahlog(AH, 1, "connecting to database for restore\n");
|
|
+ if (AH->version < K_VERS_1_3)
|
|
+ exit_horribly(modulename, "direct database connections are not supported in pre-1.3 archives\n");
|
|
+
|
|
+ /*
|
|
+ * We don't want to guess at whether the dump will successfully
|
|
+ * restore; allow the attempt regardless of the version of the restore
|
|
+ * target.
|
|
+ */
|
|
+ AHX->minRemoteVersion = 0;
|
|
+ AHX->maxRemoteVersion = 999999;
|
|
+
|
|
+ ConnectDatabase(AHX, ropt->dbname,
|
|
+ ropt->pghost, ropt->pgport, ropt->username,
|
|
+ ropt->promptPassword);
|
|
+
|
|
+ /*
|
|
+ * If we're talking to the DB directly, don't send comments since they
|
|
+ * obscure SQL when displaying errors
|
|
+ */
|
|
+ AH->noTocComments = 1;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Work out if we have an implied data-only restore. This can happen if
|
|
+ * the dump was data only or if the user has used a toc list to exclude
|
|
+ * all of the schema data. All we do is look for schema entries - if none
|
|
+ * are found then we set the dataOnly flag.
|
|
+ *
|
|
+ * We could scan for wanted TABLE entries, but that is not the same as
|
|
+ * dataOnly. At this stage, it seems unnecessary (6-Mar-2001).
|
|
+ */
|
|
+ if (!ropt->dataOnly)
|
|
+ {
|
|
+ int impliedDataOnly = 1;
|
|
+
|
|
+ for (te = AH->toc->next; te != AH->toc; te = te->next)
|
|
+ {
|
|
+ if ((te->reqs & REQ_SCHEMA) != 0)
|
|
+ { /* It's schema, and it's wanted */
|
|
+ impliedDataOnly = 0;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ if (impliedDataOnly)
|
|
+ {
|
|
+ ropt->dataOnly = impliedDataOnly;
|
|
+ ahlog(AH, 1, "implied data-only restore\n");
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Setup the output file if necessary.
|
|
+ */
|
|
+ sav = SaveOutput(AH);
|
|
+ if (ropt->filename || ropt->compression)
|
|
+ SetOutput(AH, ropt->filename, ropt->compression);
|
|
+
|
|
+ ahprintf(AH, "--\n-- PostgreSQL database dump\n--\n\n");
|
|
+
|
|
+ if (AH->public.verbose)
|
|
+ {
|
|
+ if (AH->archiveRemoteVersion)
|
|
+ ahprintf(AH, "-- Dumped from database version %s\n",
|
|
+ AH->archiveRemoteVersion);
|
|
+ if (AH->archiveDumpVersion)
|
|
+ ahprintf(AH, "-- Dumped by pg_dump version %s\n",
|
|
+ AH->archiveDumpVersion);
|
|
+ dumpTimestamp(AH, "Started on", AH->createDate);
|
|
+ }
|
|
+
|
|
+ if (ropt->single_txn)
|
|
+ {
|
|
+ if (AH->connection)
|
|
+ StartTransaction(AH);
|
|
+ else
|
|
+ ahprintf(AH, "BEGIN;\n\n");
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Establish important parameter values right away.
|
|
+ */
|
|
+ _doSetFixedOutputState(AH);
|
|
+
|
|
+ AH->stage = STAGE_PROCESSING;
|
|
+
|
|
+ /*
|
|
+ * Drop the items at the start, in reverse order
|
|
+ */
|
|
+ if (ropt->dropSchema)
|
|
+ {
|
|
+ for (te = AH->toc->prev; te != AH->toc; te = te->prev)
|
|
+ {
|
|
+ AH->currentTE = te;
|
|
+
|
|
+ /*
|
|
+ * In createDB mode, issue a DROP *only* for the database as a
|
|
+ * whole. Issuing drops against anything else would be wrong,
|
|
+ * because at this point we're connected to the wrong database.
|
|
+ * Conversely, if we're not in createDB mode, we'd better not
|
|
+ * issue a DROP against the database at all.
|
|
+ */
|
|
+ if (ropt->createDB)
|
|
+ {
|
|
+ if (strcmp(te->desc, "DATABASE") != 0)
|
|
+ continue;
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ if (strcmp(te->desc, "DATABASE") == 0)
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ /* Otherwise, drop anything that's selected and has a dropStmt */
|
|
+ if (((te->reqs & (REQ_SCHEMA | REQ_DATA)) != 0) && te->dropStmt)
|
|
+ {
|
|
+ ahlog(AH, 1, "dropping %s %s\n", te->desc, te->tag);
|
|
+ /* Select owner and schema as necessary */
|
|
+ _becomeOwner(AH, te);
|
|
+ _selectOutputSchema(AH, te->namespace);
|
|
+
|
|
+ /*
|
|
+ * Now emit the DROP command, if the object has one. Note we
|
|
+ * don't necessarily emit it verbatim; at this point we add an
|
|
+ * appropriate IF EXISTS clause, if the user requested it.
|
|
+ */
|
|
+ if (*te->dropStmt != '\0')
|
|
+ {
|
|
+ if (!ropt->if_exists)
|
|
+ {
|
|
+ /* No --if-exists? Then just use the original */
|
|
+ ahprintf(AH, "%s", te->dropStmt);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ /*
|
|
+ * Inject an appropriate spelling of "if exists". For
|
|
+ * large objects, we have a separate routine that
|
|
+ * knows how to do it, without depending on
|
|
+ * te->dropStmt; use that. For other objects we need
|
|
+ * to parse the command.
|
|
+ *
|
|
+ */
|
|
+ if (strncmp(te->desc, "BLOB", 4) == 0)
|
|
+ {
|
|
+ DropBlobIfExists(AH, te->catalogId.oid);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ char buffer[40];
|
|
+ char *mark;
|
|
+ char *dropStmt = pg_strdup(te->dropStmt);
|
|
+ char *dropStmtPtr = dropStmt;
|
|
+ PQExpBuffer ftStmt = createPQExpBuffer();
|
|
+
|
|
+ /*
|
|
+ * Need to inject IF EXISTS clause after ALTER
|
|
+ * TABLE part in ALTER TABLE .. DROP statement
|
|
+ */
|
|
+ if (strncmp(dropStmt, "ALTER TABLE", 11) == 0)
|
|
+ {
|
|
+ appendPQExpBuffer(ftStmt,
|
|
+ "ALTER TABLE IF EXISTS");
|
|
+ dropStmt = dropStmt + 11;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * ALTER TABLE..ALTER COLUMN..DROP DEFAULT does
|
|
+ * not support the IF EXISTS clause, and therefore
|
|
+ * we simply emit the original command for such
|
|
+ * objects. For other objects, we need to extract
|
|
+ * the first part of the DROP which includes the
|
|
+ * object type. Most of the time this matches
|
|
+ * te->desc, so search for that; however for the
|
|
+ * different kinds of CONSTRAINTs, we know to
|
|
+ * search for hardcoded "DROP CONSTRAINT" instead.
|
|
+ */
|
|
+ if (strcmp(te->desc, "DEFAULT") == 0)
|
|
+ appendPQExpBuffer(ftStmt, "%s", dropStmt);
|
|
+ else
|
|
+ {
|
|
+ if (strcmp(te->desc, "CONSTRAINT") == 0 ||
|
|
+ strcmp(te->desc, "CHECK CONSTRAINT") == 0 ||
|
|
+ strcmp(te->desc, "FK CONSTRAINT") == 0)
|
|
+ strcpy(buffer, "DROP CONSTRAINT");
|
|
+ else
|
|
+ snprintf(buffer, sizeof(buffer), "DROP %s",
|
|
+ te->desc);
|
|
+
|
|
+ mark = strstr(dropStmt, buffer);
|
|
+ Assert(mark != NULL);
|
|
+
|
|
+ *mark = '\0';
|
|
+ appendPQExpBuffer(ftStmt, "%s%s IF EXISTS%s",
|
|
+ dropStmt, buffer,
|
|
+ mark + strlen(buffer));
|
|
+ }
|
|
+
|
|
+ ahprintf(AH, "%s", ftStmt->data);
|
|
+
|
|
+ destroyPQExpBuffer(ftStmt);
|
|
+
|
|
+ pg_free(dropStmtPtr);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * _selectOutputSchema may have set currSchema to reflect the effect
|
|
+ * of a "SET search_path" command it emitted. However, by now we may
|
|
+ * have dropped that schema; or it might not have existed in the first
|
|
+ * place. In either case the effective value of search_path will not
|
|
+ * be what we think. Forcibly reset currSchema so that we will
|
|
+ * re-establish the search_path setting when needed (after creating
|
|
+ * the schema).
|
|
+ *
|
|
+ * If we treated users as pg_dump'able objects then we'd need to reset
|
|
+ * currUser here too.
|
|
+ */
|
|
+ if (AH->currSchema)
|
|
+ free(AH->currSchema);
|
|
+ AH->currSchema = NULL;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * In serial mode, we now process each non-ACL TOC entry.
|
|
+ *
|
|
+ * In parallel mode, turn control over to the parallel-restore logic.
|
|
+ */
|
|
+ if (parallel_mode)
|
|
+ {
|
|
+ ParallelState *pstate;
|
|
+ TocEntry pending_list;
|
|
+
|
|
+ par_list_header_init(&pending_list);
|
|
+
|
|
+ /* This runs PRE_DATA items and then disconnects from the database */
|
|
+ restore_toc_entries_prefork(AH);
|
|
+ Assert(AH->connection == NULL);
|
|
+
|
|
+ /* ParallelBackupStart() will actually fork the processes */
|
|
+ pstate = ParallelBackupStart(AH, ropt);
|
|
+ restore_toc_entries_parallel(AH, pstate, &pending_list);
|
|
+ ParallelBackupEnd(AH, pstate);
|
|
+
|
|
+ /* reconnect the master and see if we missed something */
|
|
+ restore_toc_entries_postfork(AH, &pending_list);
|
|
+ Assert(AH->connection != NULL);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ for (te = AH->toc->next; te != AH->toc; te = te->next)
|
|
+ (void) restore_toc_entry(AH, te, ropt, false);
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Scan TOC again to output ownership commands and ACLs
|
|
+ */
|
|
+ for (te = AH->toc->next; te != AH->toc; te = te->next)
|
|
+ {
|
|
+ AH->currentTE = te;
|
|
+
|
|
+ /* Both schema and data objects might now have ownership/ACLs */
|
|
+ if ((te->reqs & (REQ_SCHEMA | REQ_DATA)) != 0)
|
|
+ {
|
|
+ ahlog(AH, 1, "setting owner and privileges for %s %s\n",
|
|
+ te->desc, te->tag);
|
|
+ _printTocEntry(AH, te, ropt, false, true);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (ropt->single_txn)
|
|
+ {
|
|
+ if (AH->connection)
|
|
+ CommitTransaction(AH);
|
|
+ else
|
|
+ ahprintf(AH, "COMMIT;\n\n");
|
|
+ }
|
|
+
|
|
+ if (AH->public.verbose)
|
|
+ dumpTimestamp(AH, "Completed on", time(NULL));
|
|
+
|
|
+ ahprintf(AH, "--\n-- PostgreSQL database dump complete\n--\n\n");
|
|
+
|
|
+ /*
|
|
+ * Clean up & we're done.
|
|
+ */
|
|
+ AH->stage = STAGE_FINALIZING;
|
|
+
|
|
+ if (ropt->filename || ropt->compression)
|
|
+ RestoreOutput(AH, sav);
|
|
+
|
|
+ if (ropt->useDB)
|
|
+ DisconnectDatabase(&AH->public);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Restore a single TOC item. Used in both parallel and non-parallel restore;
|
|
+ * is_parallel is true if we are in a worker child process.
|
|
+ *
|
|
+ * Returns 0 normally, but WORKER_CREATE_DONE or WORKER_INHIBIT_DATA if
|
|
+ * the parallel parent has to make the corresponding status update.
|
|
+ */
|
|
+static int
|
|
+restore_toc_entry(ArchiveHandle *AH, TocEntry *te,
|
|
+ RestoreOptions *ropt, bool is_parallel)
|
|
+{
|
|
+ int status = WORKER_OK;
|
|
+ teReqs reqs;
|
|
+ bool defnDumped;
|
|
+
|
|
+ AH->currentTE = te;
|
|
+
|
|
+ /* Work out what, if anything, we want from this entry */
|
|
+ if (_tocEntryIsACL(te))
|
|
+ reqs = 0; /* ACLs are never restored here */
|
|
+ else
|
|
+ reqs = te->reqs;
|
|
+
|
|
+ /*
|
|
+ * Ignore DATABASE entry unless we should create it. We must check this
|
|
+ * here, not in _tocEntryRequired, because the createDB option should not
|
|
+ * affect emitting a DATABASE entry to an archive file.
|
|
+ */
|
|
+ if (!ropt->createDB && strcmp(te->desc, "DATABASE") == 0)
|
|
+ reqs = 0;
|
|
+
|
|
+ /* Dump any relevant dump warnings to stderr */
|
|
+ if (!ropt->suppressDumpWarnings && strcmp(te->desc, "WARNING") == 0)
|
|
+ {
|
|
+ if (!ropt->dataOnly && te->defn != NULL && strlen(te->defn) != 0)
|
|
+ write_msg(modulename, "warning from original dump file: %s\n", te->defn);
|
|
+ else if (te->copyStmt != NULL && strlen(te->copyStmt) != 0)
|
|
+ write_msg(modulename, "warning from original dump file: %s\n", te->copyStmt);
|
|
+ }
|
|
+
|
|
+ defnDumped = false;
|
|
+
|
|
+ if ((reqs & REQ_SCHEMA) != 0) /* We want the schema */
|
|
+ {
|
|
+ ahlog(AH, 1, "creating %s %s\n", te->desc, te->tag);
|
|
+
|
|
+ _printTocEntry(AH, te, ropt, false, false);
|
|
+ defnDumped = true;
|
|
+
|
|
+ if (strcmp(te->desc, "TABLE") == 0)
|
|
+ {
|
|
+ if (AH->lastErrorTE == te)
|
|
+ {
|
|
+ /*
|
|
+ * We failed to create the table. If
|
|
+ * --no-data-for-failed-tables was given, mark the
|
|
+ * corresponding TABLE DATA to be ignored.
|
|
+ *
|
|
+ * In the parallel case this must be done in the parent, so we
|
|
+ * just set the return value.
|
|
+ */
|
|
+ if (ropt->noDataForFailedTables)
|
|
+ {
|
|
+ if (is_parallel)
|
|
+ status = WORKER_INHIBIT_DATA;
|
|
+ else
|
|
+ inhibit_data_for_failed_table(AH, te);
|
|
+ }
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ /*
|
|
+ * We created the table successfully. Mark the corresponding
|
|
+ * TABLE DATA for possible truncation.
|
|
+ *
|
|
+ * In the parallel case this must be done in the parent, so we
|
|
+ * just set the return value.
|
|
+ */
|
|
+ if (is_parallel)
|
|
+ status = WORKER_CREATE_DONE;
|
|
+ else
|
|
+ mark_create_done(AH, te);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* If we created a DB, connect to it... */
|
|
+ if (strcmp(te->desc, "DATABASE") == 0)
|
|
+ {
|
|
+ ahlog(AH, 1, "connecting to new database \"%s\"\n", te->tag);
|
|
+ _reconnectToDB(AH, te->tag);
|
|
+ ropt->dbname = pg_strdup(te->tag);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * If we have a data component, then process it
|
|
+ */
|
|
+ if ((reqs & REQ_DATA) != 0)
|
|
+ {
|
|
+ /*
|
|
+ * hadDumper will be set if there is genuine data component for this
|
|
+ * node. Otherwise, we need to check the defn field for statements
|
|
+ * that need to be executed in data-only restores.
|
|
+ */
|
|
+ if (te->hadDumper)
|
|
+ {
|
|
+ /*
|
|
+ * If we can output the data, then restore it.
|
|
+ */
|
|
+ if (AH->PrintTocDataPtr !=NULL)
|
|
+ {
|
|
+ _printTocEntry(AH, te, ropt, true, false);
|
|
+
|
|
+ if (strcmp(te->desc, "BLOBS") == 0 ||
|
|
+ strcmp(te->desc, "BLOB COMMENTS") == 0)
|
|
+ {
|
|
+ ahlog(AH, 1, "processing %s\n", te->desc);
|
|
+
|
|
+ _selectOutputSchema(AH, "pg_catalog");
|
|
+
|
|
+ /* Send BLOB COMMENTS data to ExecuteSimpleCommands() */
|
|
+ if (strcmp(te->desc, "BLOB COMMENTS") == 0)
|
|
+ AH->outputKind = OUTPUT_OTHERDATA;
|
|
+
|
|
+ (*AH->PrintTocDataPtr) (AH, te, ropt);
|
|
+
|
|
+ AH->outputKind = OUTPUT_SQLCMDS;
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ _disableTriggersIfNecessary(AH, te, ropt);
|
|
+
|
|
+ /* Select owner and schema as necessary */
|
|
+ _becomeOwner(AH, te);
|
|
+ _selectOutputSchema(AH, te->namespace);
|
|
+
|
|
+ ahlog(AH, 1, "processing data for table \"%s\"\n",
|
|
+ te->tag);
|
|
+
|
|
+ /*
|
|
+ * In parallel restore, if we created the table earlier in
|
|
+ * the run then we wrap the COPY in a transaction and
|
|
+ * precede it with a TRUNCATE. If archiving is not on
|
|
+ * this prevents WAL-logging the COPY. This obtains a
|
|
+ * speedup similar to that from using single_txn mode in
|
|
+ * non-parallel restores.
|
|
+ */
|
|
+ if (is_parallel && te->created)
|
|
+ {
|
|
+ /*
|
|
+ * Parallel restore is always talking directly to a
|
|
+ * server, so no need to see if we should issue BEGIN.
|
|
+ */
|
|
+ StartTransaction(AH);
|
|
+
|
|
+ /*
|
|
+ * If the server version is >= 8.4, make sure we issue
|
|
+ * TRUNCATE with ONLY so that child tables are not
|
|
+ * wiped.
|
|
+ */
|
|
+ ahprintf(AH, "TRUNCATE TABLE %s%s;\n\n",
|
|
+ (PQserverVersion(AH->connection) >= 80400 ?
|
|
+ "ONLY " : ""),
|
|
+ fmtId(te->tag));
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * If we have a copy statement, use it.
|
|
+ */
|
|
+ if (te->copyStmt && strlen(te->copyStmt) > 0)
|
|
+ {
|
|
+ ahprintf(AH, "%s", te->copyStmt);
|
|
+ AH->outputKind = OUTPUT_COPYDATA;
|
|
+ }
|
|
+ else
|
|
+ AH->outputKind = OUTPUT_OTHERDATA;
|
|
+
|
|
+ (*AH->PrintTocDataPtr) (AH, te, ropt);
|
|
+
|
|
+ /*
|
|
+ * Terminate COPY if needed.
|
|
+ */
|
|
+ if (AH->outputKind == OUTPUT_COPYDATA &&
|
|
+ RestoringToDB(AH))
|
|
+ EndDBCopyMode(AH, te);
|
|
+ AH->outputKind = OUTPUT_SQLCMDS;
|
|
+
|
|
+ /* close out the transaction started above */
|
|
+ if (is_parallel && te->created)
|
|
+ CommitTransaction(AH);
|
|
+
|
|
+ _enableTriggersIfNecessary(AH, te, ropt);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ else if (!defnDumped)
|
|
+ {
|
|
+ /* If we haven't already dumped the defn part, do so now */
|
|
+ ahlog(AH, 1, "executing %s %s\n", te->desc, te->tag);
|
|
+ _printTocEntry(AH, te, ropt, false, false);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (AH->public.n_errors > 0 && status == WORKER_OK)
|
|
+ status = WORKER_IGNORED_ERRORS;
|
|
+
|
|
+ return status;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Allocate a new RestoreOptions block.
|
|
+ * This is mainly so we can initialize it, but also for future expansion,
|
|
+ */
|
|
+RestoreOptions *
|
|
+NewRestoreOptions(void)
|
|
+{
|
|
+ RestoreOptions *opts;
|
|
+
|
|
+ opts = (RestoreOptions *) pg_malloc0(sizeof(RestoreOptions));
|
|
+
|
|
+ /* set any fields that shouldn't default to zeroes */
|
|
+ opts->format = archUnknown;
|
|
+ opts->promptPassword = TRI_DEFAULT;
|
|
+ opts->dumpSections = DUMP_UNSECTIONED;
|
|
+
|
|
+ return opts;
|
|
+}
|
|
+
|
|
+static void
|
|
+_disableTriggersIfNecessary(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt)
|
|
+{
|
|
+ /* This hack is only needed in a data-only restore */
|
|
+ if (!ropt->dataOnly || !ropt->disable_triggers)
|
|
+ return;
|
|
+
|
|
+ ahlog(AH, 1, "disabling triggers for %s\n", te->tag);
|
|
+
|
|
+ /*
|
|
+ * Become superuser if possible, since they are the only ones who can
|
|
+ * disable constraint triggers. If -S was not given, assume the initial
|
|
+ * user identity is a superuser. (XXX would it be better to become the
|
|
+ * table owner?)
|
|
+ */
|
|
+ _becomeUser(AH, ropt->superuser);
|
|
+
|
|
+ /*
|
|
+ * Disable them.
|
|
+ */
|
|
+ _selectOutputSchema(AH, te->namespace);
|
|
+
|
|
+ ahprintf(AH, "ALTER TABLE %s DISABLE TRIGGER ALL;\n\n",
|
|
+ fmtId(te->tag));
|
|
+}
|
|
+
|
|
+static void
|
|
+_enableTriggersIfNecessary(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt)
|
|
+{
|
|
+ /* This hack is only needed in a data-only restore */
|
|
+ if (!ropt->dataOnly || !ropt->disable_triggers)
|
|
+ return;
|
|
+
|
|
+ ahlog(AH, 1, "enabling triggers for %s\n", te->tag);
|
|
+
|
|
+ /*
|
|
+ * Become superuser if possible, since they are the only ones who can
|
|
+ * disable constraint triggers. If -S was not given, assume the initial
|
|
+ * user identity is a superuser. (XXX would it be better to become the
|
|
+ * table owner?)
|
|
+ */
|
|
+ _becomeUser(AH, ropt->superuser);
|
|
+
|
|
+ /*
|
|
+ * Enable them.
|
|
+ */
|
|
+ _selectOutputSchema(AH, te->namespace);
|
|
+
|
|
+ ahprintf(AH, "ALTER TABLE %s ENABLE TRIGGER ALL;\n\n",
|
|
+ fmtId(te->tag));
|
|
+}
|
|
+
|
|
+/*
|
|
+ * This is a routine that is part of the dumper interface, hence the 'Archive*' parameter.
|
|
+ */
|
|
+
|
|
+/* Public */
|
|
+void
|
|
+WriteData(Archive *AHX, const void *data, size_t dLen)
|
|
+{
|
|
+ ArchiveHandle *AH = (ArchiveHandle *) AHX;
|
|
+
|
|
+ if (!AH->currToc)
|
|
+ exit_horribly(modulename, "internal error -- WriteData cannot be called outside the context of a DataDumper routine\n");
|
|
+
|
|
+ (*AH->WriteDataPtr) (AH, data, dLen);
|
|
+
|
|
+ return;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Create a new TOC entry. The TOC was designed as a TOC, but is now the
|
|
+ * repository for all metadata. But the name has stuck.
|
|
+ */
|
|
+
|
|
+/* Public */
|
|
+void
|
|
+ArchiveEntry(Archive *AHX,
|
|
+ CatalogId catalogId, DumpId dumpId,
|
|
+ const char *tag,
|
|
+ const char *namespace,
|
|
+ const char *tablespace,
|
|
+ const char *owner, bool withOids,
|
|
+ const char *desc, teSection section,
|
|
+ const char *defn,
|
|
+ const char *dropStmt, const char *copyStmt,
|
|
+ const DumpId *deps, int nDeps,
|
|
+ DataDumperPtr dumpFn, void *dumpArg)
|
|
+{
|
|
+ ArchiveHandle *AH = (ArchiveHandle *) AHX;
|
|
+ TocEntry *newToc;
|
|
+
|
|
+ newToc = (TocEntry *) pg_malloc0(sizeof(TocEntry));
|
|
+
|
|
+ AH->tocCount++;
|
|
+ if (dumpId > AH->maxDumpId)
|
|
+ AH->maxDumpId = dumpId;
|
|
+
|
|
+ newToc->prev = AH->toc->prev;
|
|
+ newToc->next = AH->toc;
|
|
+ AH->toc->prev->next = newToc;
|
|
+ AH->toc->prev = newToc;
|
|
+
|
|
+ newToc->catalogId = catalogId;
|
|
+ newToc->dumpId = dumpId;
|
|
+ newToc->section = section;
|
|
+
|
|
+ newToc->tag = pg_strdup(tag);
|
|
+ newToc->namespace = namespace ? pg_strdup(namespace) : NULL;
|
|
+ newToc->tablespace = tablespace ? pg_strdup(tablespace) : NULL;
|
|
+ newToc->owner = pg_strdup(owner);
|
|
+ newToc->withOids = withOids;
|
|
+ newToc->desc = pg_strdup(desc);
|
|
+ newToc->defn = pg_strdup(defn);
|
|
+ newToc->dropStmt = pg_strdup(dropStmt);
|
|
+ newToc->copyStmt = copyStmt ? pg_strdup(copyStmt) : NULL;
|
|
+
|
|
+ if (nDeps > 0)
|
|
+ {
|
|
+ newToc->dependencies = (DumpId *) pg_malloc(nDeps * sizeof(DumpId));
|
|
+ memcpy(newToc->dependencies, deps, nDeps * sizeof(DumpId));
|
|
+ newToc->nDeps = nDeps;
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ newToc->dependencies = NULL;
|
|
+ newToc->nDeps = 0;
|
|
+ }
|
|
+
|
|
+ newToc->dataDumper = dumpFn;
|
|
+ newToc->dataDumperArg = dumpArg;
|
|
+ newToc->hadDumper = dumpFn ? true : false;
|
|
+
|
|
+ newToc->formatData = NULL;
|
|
+
|
|
+ if (AH->ArchiveEntryPtr !=NULL)
|
|
+ (*AH->ArchiveEntryPtr) (AH, newToc);
|
|
+}
|
|
+
|
|
+/* Public */
|
|
+void
|
|
+PrintTOCSummary(Archive *AHX, RestoreOptions *ropt)
|
|
+{
|
|
+ ArchiveHandle *AH = (ArchiveHandle *) AHX;
|
|
+ TocEntry *te;
|
|
+ teSection curSection;
|
|
+ OutputContext sav;
|
|
+ const char *fmtName;
|
|
+
|
|
+ sav = SaveOutput(AH);
|
|
+ if (ropt->filename)
|
|
+ SetOutput(AH, ropt->filename, 0 /* no compression */ );
|
|
+
|
|
+ ahprintf(AH, ";\n; Archive created at %s", ctime(&AH->createDate));
|
|
+ ahprintf(AH, "; dbname: %s\n; TOC Entries: %d\n; Compression: %d\n",
|
|
+ AH->archdbname, AH->tocCount, AH->compression);
|
|
+
|
|
+ switch (AH->format)
|
|
+ {
|
|
+ case archCustom:
|
|
+ fmtName = "CUSTOM";
|
|
+ break;
|
|
+ case archDirectory:
|
|
+ fmtName = "DIRECTORY";
|
|
+ break;
|
|
+ case archTar:
|
|
+ fmtName = "TAR";
|
|
+ break;
|
|
+ default:
|
|
+ fmtName = "UNKNOWN";
|
|
+ }
|
|
+
|
|
+ ahprintf(AH, "; Dump Version: %d.%d-%d\n", AH->vmaj, AH->vmin, AH->vrev);
|
|
+ ahprintf(AH, "; Format: %s\n", fmtName);
|
|
+ ahprintf(AH, "; Integer: %d bytes\n", (int) AH->intSize);
|
|
+ ahprintf(AH, "; Offset: %d bytes\n", (int) AH->offSize);
|
|
+ if (AH->archiveRemoteVersion)
|
|
+ ahprintf(AH, "; Dumped from database version: %s\n",
|
|
+ AH->archiveRemoteVersion);
|
|
+ if (AH->archiveDumpVersion)
|
|
+ ahprintf(AH, "; Dumped by pg_dump version: %s\n",
|
|
+ AH->archiveDumpVersion);
|
|
+
|
|
+ ahprintf(AH, ";\n;\n; Selected TOC Entries:\n;\n");
|
|
+
|
|
+ curSection = SECTION_PRE_DATA;
|
|
+ for (te = AH->toc->next; te != AH->toc; te = te->next)
|
|
+ {
|
|
+ if (te->section != SECTION_NONE)
|
|
+ curSection = te->section;
|
|
+ if (ropt->verbose ||
|
|
+ (_tocEntryRequired(te, curSection, ropt) & (REQ_SCHEMA | REQ_DATA)) != 0)
|
|
+ ahprintf(AH, "%d; %u %u %s %s %s %s\n", te->dumpId,
|
|
+ te->catalogId.tableoid, te->catalogId.oid,
|
|
+ te->desc, te->namespace ? te->namespace : "-",
|
|
+ te->tag, te->owner);
|
|
+ if (ropt->verbose && te->nDeps > 0)
|
|
+ {
|
|
+ int i;
|
|
+
|
|
+ ahprintf(AH, ";\tdepends on:");
|
|
+ for (i = 0; i < te->nDeps; i++)
|
|
+ ahprintf(AH, " %d", te->dependencies[i]);
|
|
+ ahprintf(AH, "\n");
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (ropt->filename)
|
|
+ RestoreOutput(AH, sav);
|
|
+}
|
|
+
|
|
+/***********
|
|
+ * BLOB Archival
|
|
+ ***********/
|
|
+
|
|
+/* Called by a dumper to signal start of a BLOB */
|
|
+int
|
|
+StartBlob(Archive *AHX, Oid oid)
|
|
+{
|
|
+ ArchiveHandle *AH = (ArchiveHandle *) AHX;
|
|
+
|
|
+ if (!AH->StartBlobPtr)
|
|
+ exit_horribly(modulename, "large-object output not supported in chosen format\n");
|
|
+
|
|
+ (*AH->StartBlobPtr) (AH, AH->currToc, oid);
|
|
+
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+/* Called by a dumper to signal end of a BLOB */
|
|
+int
|
|
+EndBlob(Archive *AHX, Oid oid)
|
|
+{
|
|
+ ArchiveHandle *AH = (ArchiveHandle *) AHX;
|
|
+
|
|
+ if (AH->EndBlobPtr)
|
|
+ (*AH->EndBlobPtr) (AH, AH->currToc, oid);
|
|
+
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+/**********
|
|
+ * BLOB Restoration
|
|
+ **********/
|
|
+
|
|
+/*
|
|
+ * Called by a format handler before any blobs are restored
|
|
+ */
|
|
+void
|
|
+StartRestoreBlobs(ArchiveHandle *AH)
|
|
+{
|
|
+ if (!AH->ropt->single_txn)
|
|
+ {
|
|
+ if (AH->connection)
|
|
+ StartTransaction(AH);
|
|
+ else
|
|
+ ahprintf(AH, "BEGIN;\n\n");
|
|
+ }
|
|
+
|
|
+ AH->blobCount = 0;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Called by a format handler after all blobs are restored
|
|
+ */
|
|
+void
|
|
+EndRestoreBlobs(ArchiveHandle *AH)
|
|
+{
|
|
+ if (!AH->ropt->single_txn)
|
|
+ {
|
|
+ if (AH->connection)
|
|
+ CommitTransaction(AH);
|
|
+ else
|
|
+ ahprintf(AH, "COMMIT;\n\n");
|
|
+ }
|
|
+
|
|
+ ahlog(AH, 1, ngettext("restored %d large object\n",
|
|
+ "restored %d large objects\n",
|
|
+ AH->blobCount),
|
|
+ AH->blobCount);
|
|
+}
|
|
+
|
|
+
|
|
+/*
|
|
+ * Called by a format handler to initiate restoration of a blob
|
|
+ */
|
|
+void
|
|
+StartRestoreBlob(ArchiveHandle *AH, Oid oid, bool drop)
|
|
+{
|
|
+ bool old_blob_style = (AH->version < K_VERS_1_12);
|
|
+ Oid loOid;
|
|
+
|
|
+ AH->blobCount++;
|
|
+
|
|
+ /* Initialize the LO Buffer */
|
|
+ AH->lo_buf_used = 0;
|
|
+
|
|
+ ahlog(AH, 1, "restoring large object with OID %u\n", oid);
|
|
+
|
|
+ /* With an old archive we must do drop and create logic here */
|
|
+ if (old_blob_style && drop)
|
|
+ DropBlobIfExists(AH, oid);
|
|
+
|
|
+ if (AH->connection)
|
|
+ {
|
|
+ if (old_blob_style)
|
|
+ {
|
|
+ loOid = lo_create(AH->connection, oid);
|
|
+ if (loOid == 0 || loOid != oid)
|
|
+ exit_horribly(modulename, "could not create large object %u: %s",
|
|
+ oid, PQerrorMessage(AH->connection));
|
|
+ }
|
|
+ AH->loFd = lo_open(AH->connection, oid, INV_WRITE);
|
|
+ if (AH->loFd == -1)
|
|
+ exit_horribly(modulename, "could not open large object %u: %s",
|
|
+ oid, PQerrorMessage(AH->connection));
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ if (old_blob_style)
|
|
+ ahprintf(AH, "SELECT pg_catalog.lo_open(pg_catalog.lo_create('%u'), %d);\n",
|
|
+ oid, INV_WRITE);
|
|
+ else
|
|
+ ahprintf(AH, "SELECT pg_catalog.lo_open('%u', %d);\n",
|
|
+ oid, INV_WRITE);
|
|
+ }
|
|
+
|
|
+ AH->writingBlob = 1;
|
|
+}
|
|
+
|
|
+void
|
|
+EndRestoreBlob(ArchiveHandle *AH, Oid oid)
|
|
+{
|
|
+ if (AH->lo_buf_used > 0)
|
|
+ {
|
|
+ /* Write remaining bytes from the LO buffer */
|
|
+ dump_lo_buf(AH);
|
|
+ }
|
|
+
|
|
+ AH->writingBlob = 0;
|
|
+
|
|
+ if (AH->connection)
|
|
+ {
|
|
+ lo_close(AH->connection, AH->loFd);
|
|
+ AH->loFd = -1;
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ ahprintf(AH, "SELECT pg_catalog.lo_close(0);\n\n");
|
|
+ }
|
|
+}
|
|
+
|
|
+/***********
|
|
+ * Sorting and Reordering
|
|
+ ***********/
|
|
+
|
|
+void
|
|
+SortTocFromFile(Archive *AHX, RestoreOptions *ropt)
|
|
+{
|
|
+ ArchiveHandle *AH = (ArchiveHandle *) AHX;
|
|
+ FILE *fh;
|
|
+ char buf[100];
|
|
+ bool incomplete_line;
|
|
+
|
|
+ /* Allocate space for the 'wanted' array, and init it */
|
|
+ ropt->idWanted = (bool *) pg_malloc(sizeof(bool) * AH->maxDumpId);
|
|
+ memset(ropt->idWanted, 0, sizeof(bool) * AH->maxDumpId);
|
|
+
|
|
+ /* Setup the file */
|
|
+ fh = fopen(ropt->tocFile, PG_BINARY_R);
|
|
+ if (!fh)
|
|
+ exit_horribly(modulename, "could not open TOC file \"%s\": %s\n",
|
|
+ ropt->tocFile, strerror(errno));
|
|
+
|
|
+ incomplete_line = false;
|
|
+ while (fgets(buf, sizeof(buf), fh) != NULL)
|
|
+ {
|
|
+ bool prev_incomplete_line = incomplete_line;
|
|
+ int buflen;
|
|
+ char *cmnt;
|
|
+ char *endptr;
|
|
+ DumpId id;
|
|
+ TocEntry *te;
|
|
+
|
|
+ /*
|
|
+ * Some lines in the file might be longer than sizeof(buf). This is
|
|
+ * no problem, since we only care about the leading numeric ID which
|
|
+ * can be at most a few characters; but we have to skip continuation
|
|
+ * bufferloads when processing a long line.
|
|
+ */
|
|
+ buflen = strlen(buf);
|
|
+ if (buflen > 0 && buf[buflen - 1] == '\n')
|
|
+ incomplete_line = false;
|
|
+ else
|
|
+ incomplete_line = true;
|
|
+ if (prev_incomplete_line)
|
|
+ continue;
|
|
+
|
|
+ /* Truncate line at comment, if any */
|
|
+ cmnt = strchr(buf, ';');
|
|
+ if (cmnt != NULL)
|
|
+ cmnt[0] = '\0';
|
|
+
|
|
+ /* Ignore if all blank */
|
|
+ if (strspn(buf, " \t\r\n") == strlen(buf))
|
|
+ continue;
|
|
+
|
|
+ /* Get an ID, check it's valid and not already seen */
|
|
+ id = strtol(buf, &endptr, 10);
|
|
+ if (endptr == buf || id <= 0 || id > AH->maxDumpId ||
|
|
+ ropt->idWanted[id - 1])
|
|
+ {
|
|
+ write_msg(modulename, "WARNING: line ignored: %s\n", buf);
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ /* Find TOC entry */
|
|
+ te = getTocEntryByDumpId(AH, id);
|
|
+ if (!te)
|
|
+ exit_horribly(modulename, "could not find entry for ID %d\n",
|
|
+ id);
|
|
+
|
|
+ /* Mark it wanted */
|
|
+ ropt->idWanted[id - 1] = true;
|
|
+
|
|
+ /*
|
|
+ * Move each item to the end of the list as it is selected, so that
|
|
+ * they are placed in the desired order. Any unwanted items will end
|
|
+ * up at the front of the list, which may seem unintuitive but it's
|
|
+ * what we need. In an ordinary serial restore that makes no
|
|
+ * difference, but in a parallel restore we need to mark unrestored
|
|
+ * items' dependencies as satisfied before we start examining
|
|
+ * restorable items. Otherwise they could have surprising
|
|
+ * side-effects on the order in which restorable items actually get
|
|
+ * restored.
|
|
+ */
|
|
+ _moveBefore(AH, AH->toc, te);
|
|
+ }
|
|
+
|
|
+ if (fclose(fh) != 0)
|
|
+ exit_horribly(modulename, "could not close TOC file: %s\n",
|
|
+ strerror(errno));
|
|
+}
|
|
+
|
|
+/**********************
|
|
+ * 'Convenience functions that look like standard IO functions
|
|
+ * for writing data when in dump mode.
|
|
+ **********************/
|
|
+
|
|
+/* Public */
|
|
+void
|
|
+archputs(const char *s, Archive *AH)
|
|
+{
|
|
+ WriteData(AH, s, strlen(s));
|
|
+ return;
|
|
+}
|
|
+
|
|
+/* Public */
|
|
+int
|
|
+archprintf(Archive *AH, const char *fmt,...)
|
|
+{
|
|
+ char *p;
|
|
+ size_t len = 128; /* initial assumption about buffer size */
|
|
+ size_t cnt;
|
|
+
|
|
+ for (;;)
|
|
+ {
|
|
+ va_list args;
|
|
+
|
|
+ /* Allocate work buffer. */
|
|
+ p = (char *) pg_malloc(len);
|
|
+
|
|
+ /* Try to format the data. */
|
|
+ va_start(args, fmt);
|
|
+ cnt = pvsnprintf(p, len, fmt, args);
|
|
+ va_end(args);
|
|
+
|
|
+ if (cnt < len)
|
|
+ break; /* success */
|
|
+
|
|
+ /* Release buffer and loop around to try again with larger len. */
|
|
+ free(p);
|
|
+ len = cnt;
|
|
+ }
|
|
+
|
|
+ WriteData(AH, p, cnt);
|
|
+ free(p);
|
|
+ return (int) cnt;
|
|
+}
|
|
+
|
|
+
|
|
+/*******************************
|
|
+ * Stuff below here should be 'private' to the archiver routines
|
|
+ *******************************/
|
|
+
|
|
+static void
|
|
+SetOutput(ArchiveHandle *AH, const char *filename, int compression)
|
|
+{
|
|
+ int fn;
|
|
+
|
|
+ if (filename)
|
|
+ fn = -1;
|
|
+ else if (AH->FH)
|
|
+ fn = fileno(AH->FH);
|
|
+ else if (AH->fSpec)
|
|
+ {
|
|
+ fn = -1;
|
|
+ filename = AH->fSpec;
|
|
+ }
|
|
+ else
|
|
+ fn = fileno(stdout);
|
|
+
|
|
+ /* If compression explicitly requested, use gzopen */
|
|
+#ifdef HAVE_LIBZ
|
|
+ if (compression != 0)
|
|
+ {
|
|
+ char fmode[10];
|
|
+
|
|
+ /* Don't use PG_BINARY_x since this is zlib */
|
|
+ sprintf(fmode, "wb%d", compression);
|
|
+ if (fn >= 0)
|
|
+ AH->OF = gzdopen(dup(fn), fmode);
|
|
+ else
|
|
+ AH->OF = gzopen(filename, fmode);
|
|
+ AH->gzOut = 1;
|
|
+ }
|
|
+ else
|
|
+#endif
|
|
+ { /* Use fopen */
|
|
+ if (AH->mode == archModeAppend)
|
|
+ {
|
|
+ if (fn >= 0)
|
|
+ AH->OF = fdopen(dup(fn), PG_BINARY_A);
|
|
+ else
|
|
+ AH->OF = fopen(filename, PG_BINARY_A);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ if (fn >= 0)
|
|
+ AH->OF = fdopen(dup(fn), PG_BINARY_W);
|
|
+ else
|
|
+ AH->OF = fopen(filename, PG_BINARY_W);
|
|
+ }
|
|
+ AH->gzOut = 0;
|
|
+ }
|
|
+
|
|
+ if (!AH->OF)
|
|
+ {
|
|
+ if (filename)
|
|
+ exit_horribly(modulename, "could not open output file \"%s\": %s\n",
|
|
+ filename, strerror(errno));
|
|
+ else
|
|
+ exit_horribly(modulename, "could not open output file: %s\n",
|
|
+ strerror(errno));
|
|
+ }
|
|
+}
|
|
+
|
|
+static OutputContext
|
|
+SaveOutput(ArchiveHandle *AH)
|
|
+{
|
|
+ OutputContext sav;
|
|
+
|
|
+ sav.OF = AH->OF;
|
|
+ sav.gzOut = AH->gzOut;
|
|
+
|
|
+ return sav;
|
|
+}
|
|
+
|
|
+static void
|
|
+RestoreOutput(ArchiveHandle *AH, OutputContext savedContext)
|
|
+{
|
|
+ int res;
|
|
+
|
|
+ if (AH->gzOut)
|
|
+ res = GZCLOSE(AH->OF);
|
|
+ else
|
|
+ res = fclose(AH->OF);
|
|
+
|
|
+ if (res != 0)
|
|
+ exit_horribly(modulename, "could not close output file: %s\n",
|
|
+ strerror(errno));
|
|
+
|
|
+ AH->gzOut = savedContext.gzOut;
|
|
+ AH->OF = savedContext.OF;
|
|
+}
|
|
+
|
|
+
|
|
+
|
|
+/*
|
|
+ * Print formatted text to the output file (usually stdout).
|
|
+ */
|
|
+int
|
|
+ahprintf(ArchiveHandle *AH, const char *fmt,...)
|
|
+{
|
|
+ char *p;
|
|
+ size_t len = 128; /* initial assumption about buffer size */
|
|
+ size_t cnt;
|
|
+
|
|
+ for (;;)
|
|
+ {
|
|
+ va_list args;
|
|
+
|
|
+ /* Allocate work buffer. */
|
|
+ p = (char *) pg_malloc(len);
|
|
+
|
|
+ /* Try to format the data. */
|
|
+ va_start(args, fmt);
|
|
+ cnt = pvsnprintf(p, len, fmt, args);
|
|
+ va_end(args);
|
|
+
|
|
+ if (cnt < len)
|
|
+ break; /* success */
|
|
+
|
|
+ /* Release buffer and loop around to try again with larger len. */
|
|
+ free(p);
|
|
+ len = cnt;
|
|
+ }
|
|
+
|
|
+ ahwrite(p, 1, cnt, AH);
|
|
+ free(p);
|
|
+ return (int) cnt;
|
|
+}
|
|
+
|
|
+void
|
|
+ahlog(ArchiveHandle *AH, int level, const char *fmt,...)
|
|
+{
|
|
+ va_list ap;
|
|
+
|
|
+ if (AH->debugLevel < level && (!AH->public.verbose || level > 1))
|
|
+ return;
|
|
+
|
|
+ va_start(ap, fmt);
|
|
+ vwrite_msg(NULL, fmt, ap);
|
|
+ va_end(ap);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Single place for logic which says 'We are restoring to a direct DB connection'.
|
|
+ */
|
|
+static int
|
|
+RestoringToDB(ArchiveHandle *AH)
|
|
+{
|
|
+ return (AH->ropt && AH->ropt->useDB && AH->connection);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Dump the current contents of the LO data buffer while writing a BLOB
|
|
+ */
|
|
+static void
|
|
+dump_lo_buf(ArchiveHandle *AH)
|
|
+{
|
|
+ if (AH->connection)
|
|
+ {
|
|
+ size_t res;
|
|
+
|
|
+ res = lo_write(AH->connection, AH->loFd, AH->lo_buf, AH->lo_buf_used);
|
|
+ ahlog(AH, 5, ngettext("wrote %lu byte of large object data (result = %lu)\n",
|
|
+ "wrote %lu bytes of large object data (result = %lu)\n",
|
|
+ AH->lo_buf_used),
|
|
+ (unsigned long) AH->lo_buf_used, (unsigned long) res);
|
|
+ if (res != AH->lo_buf_used)
|
|
+ exit_horribly(modulename,
|
|
+ "could not write to large object (result: %lu, expected: %lu)\n",
|
|
+ (unsigned long) res, (unsigned long) AH->lo_buf_used);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ PQExpBuffer buf = createPQExpBuffer();
|
|
+
|
|
+ appendByteaLiteralAHX(buf,
|
|
+ (const unsigned char *) AH->lo_buf,
|
|
+ AH->lo_buf_used,
|
|
+ AH);
|
|
+
|
|
+ /* Hack: turn off writingBlob so ahwrite doesn't recurse to here */
|
|
+ AH->writingBlob = 0;
|
|
+ ahprintf(AH, "SELECT pg_catalog.lowrite(0, %s);\n", buf->data);
|
|
+ AH->writingBlob = 1;
|
|
+
|
|
+ destroyPQExpBuffer(buf);
|
|
+ }
|
|
+ AH->lo_buf_used = 0;
|
|
+}
|
|
+
|
|
+
|
|
+/*
|
|
+ * Write buffer to the output file (usually stdout). This is used for
|
|
+ * outputting 'restore' scripts etc. It is even possible for an archive
|
|
+ * format to create a custom output routine to 'fake' a restore if it
|
|
+ * wants to generate a script (see TAR output).
|
|
+ */
|
|
+void
|
|
+ahwrite(const void *ptr, size_t size, size_t nmemb, ArchiveHandle *AH)
|
|
+{
|
|
+ int bytes_written = 0;
|
|
+
|
|
+ if (AH->writingBlob)
|
|
+ {
|
|
+ size_t remaining = size * nmemb;
|
|
+
|
|
+ while (AH->lo_buf_used + remaining > AH->lo_buf_size)
|
|
+ {
|
|
+ size_t avail = AH->lo_buf_size - AH->lo_buf_used;
|
|
+
|
|
+ memcpy((char *) AH->lo_buf + AH->lo_buf_used, ptr, avail);
|
|
+ ptr = (const void *) ((const char *) ptr + avail);
|
|
+ remaining -= avail;
|
|
+ AH->lo_buf_used += avail;
|
|
+ dump_lo_buf(AH);
|
|
+ }
|
|
+
|
|
+ memcpy((char *) AH->lo_buf + AH->lo_buf_used, ptr, remaining);
|
|
+ AH->lo_buf_used += remaining;
|
|
+
|
|
+ bytes_written = size * nmemb;
|
|
+ }
|
|
+ else if (AH->gzOut)
|
|
+ bytes_written = GZWRITE(ptr, size, nmemb, AH->OF);
|
|
+ else if (AH->CustomOutPtr)
|
|
+ bytes_written = AH->CustomOutPtr (AH, ptr, size * nmemb);
|
|
+
|
|
+ else
|
|
+ {
|
|
+ /*
|
|
+ * If we're doing a restore, and it's direct to DB, and we're
|
|
+ * connected then send it to the DB.
|
|
+ */
|
|
+ if (RestoringToDB(AH))
|
|
+ bytes_written = ExecuteSqlCommandBuf(AH, (const char *) ptr, size * nmemb);
|
|
+ else
|
|
+ bytes_written = fwrite(ptr, size, nmemb, AH->OF) * size;
|
|
+ }
|
|
+
|
|
+ if (bytes_written != size * nmemb)
|
|
+ WRITE_ERROR_EXIT;
|
|
+
|
|
+ return;
|
|
+}
|
|
+
|
|
+/* on some error, we may decide to go on... */
|
|
+void
|
|
+warn_or_exit_horribly(ArchiveHandle *AH,
|
|
+ const char *modulename, const char *fmt,...)
|
|
+{
|
|
+ va_list ap;
|
|
+
|
|
+ switch (AH->stage)
|
|
+ {
|
|
+
|
|
+ case STAGE_NONE:
|
|
+ /* Do nothing special */
|
|
+ break;
|
|
+
|
|
+ case STAGE_INITIALIZING:
|
|
+ if (AH->stage != AH->lastErrorStage)
|
|
+ write_msg(modulename, "Error while INITIALIZING:\n");
|
|
+ break;
|
|
+
|
|
+ case STAGE_PROCESSING:
|
|
+ if (AH->stage != AH->lastErrorStage)
|
|
+ write_msg(modulename, "Error while PROCESSING TOC:\n");
|
|
+ break;
|
|
+
|
|
+ case STAGE_FINALIZING:
|
|
+ if (AH->stage != AH->lastErrorStage)
|
|
+ write_msg(modulename, "Error while FINALIZING:\n");
|
|
+ break;
|
|
+ }
|
|
+ if (AH->currentTE != NULL && AH->currentTE != AH->lastErrorTE)
|
|
+ {
|
|
+ write_msg(modulename, "Error from TOC entry %d; %u %u %s %s %s\n",
|
|
+ AH->currentTE->dumpId,
|
|
+ AH->currentTE->catalogId.tableoid, AH->currentTE->catalogId.oid,
|
|
+ AH->currentTE->desc, AH->currentTE->tag, AH->currentTE->owner);
|
|
+ }
|
|
+ AH->lastErrorStage = AH->stage;
|
|
+ AH->lastErrorTE = AH->currentTE;
|
|
+
|
|
+ va_start(ap, fmt);
|
|
+ vwrite_msg(modulename, fmt, ap);
|
|
+ va_end(ap);
|
|
+
|
|
+ if (AH->public.exit_on_error)
|
|
+ exit_nicely(1);
|
|
+ else
|
|
+ AH->public.n_errors++;
|
|
+}
|
|
+
|
|
+#ifdef NOT_USED
|
|
+
|
|
+static void
|
|
+_moveAfter(ArchiveHandle *AH, TocEntry *pos, TocEntry *te)
|
|
+{
|
|
+ /* Unlink te from list */
|
|
+ te->prev->next = te->next;
|
|
+ te->next->prev = te->prev;
|
|
+
|
|
+ /* and insert it after "pos" */
|
|
+ te->prev = pos;
|
|
+ te->next = pos->next;
|
|
+ pos->next->prev = te;
|
|
+ pos->next = te;
|
|
+}
|
|
+#endif
|
|
+
|
|
+static void
|
|
+_moveBefore(ArchiveHandle *AH, TocEntry *pos, TocEntry *te)
|
|
+{
|
|
+ /* Unlink te from list */
|
|
+ te->prev->next = te->next;
|
|
+ te->next->prev = te->prev;
|
|
+
|
|
+ /* and insert it before "pos" */
|
|
+ te->prev = pos->prev;
|
|
+ te->next = pos;
|
|
+ pos->prev->next = te;
|
|
+ pos->prev = te;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Build index arrays for the TOC list
|
|
+ *
|
|
+ * This should be invoked only after we have created or read in all the TOC
|
|
+ * items.
|
|
+ *
|
|
+ * The arrays are indexed by dump ID (so entry zero is unused). Note that the
|
|
+ * array entries run only up to maxDumpId. We might see dependency dump IDs
|
|
+ * beyond that (if the dump was partial); so always check the array bound
|
|
+ * before trying to touch an array entry.
|
|
+ */
|
|
+static void
|
|
+buildTocEntryArrays(ArchiveHandle *AH)
|
|
+{
|
|
+ DumpId maxDumpId = AH->maxDumpId;
|
|
+ TocEntry *te;
|
|
+
|
|
+ AH->tocsByDumpId = (TocEntry **) pg_malloc0((maxDumpId + 1) * sizeof(TocEntry *));
|
|
+ AH->tableDataId = (DumpId *) pg_malloc0((maxDumpId + 1) * sizeof(DumpId));
|
|
+
|
|
+ for (te = AH->toc->next; te != AH->toc; te = te->next)
|
|
+ {
|
|
+ /* this check is purely paranoia, maxDumpId should be correct */
|
|
+ if (te->dumpId <= 0 || te->dumpId > maxDumpId)
|
|
+ exit_horribly(modulename, "bad dumpId\n");
|
|
+
|
|
+ /* tocsByDumpId indexes all TOCs by their dump ID */
|
|
+ AH->tocsByDumpId[te->dumpId] = te;
|
|
+
|
|
+ /*
|
|
+ * tableDataId provides the TABLE DATA item's dump ID for each TABLE
|
|
+ * TOC entry that has a DATA item. We compute this by reversing the
|
|
+ * TABLE DATA item's dependency, knowing that a TABLE DATA item has
|
|
+ * just one dependency and it is the TABLE item.
|
|
+ */
|
|
+ if (strcmp(te->desc, "TABLE DATA") == 0 && te->nDeps > 0)
|
|
+ {
|
|
+ DumpId tableId = te->dependencies[0];
|
|
+
|
|
+ /*
|
|
+ * The TABLE item might not have been in the archive, if this was
|
|
+ * a data-only dump; but its dump ID should be less than its data
|
|
+ * item's dump ID, so there should be a place for it in the array.
|
|
+ */
|
|
+ if (tableId <= 0 || tableId > maxDumpId)
|
|
+ exit_horribly(modulename, "bad table dumpId for TABLE DATA item\n");
|
|
+
|
|
+ AH->tableDataId[tableId] = te->dumpId;
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+TocEntry *
|
|
+getTocEntryByDumpId(ArchiveHandle *AH, DumpId id)
|
|
+{
|
|
+ /* build index arrays if we didn't already */
|
|
+ if (AH->tocsByDumpId == NULL)
|
|
+ buildTocEntryArrays(AH);
|
|
+
|
|
+ if (id > 0 && id <= AH->maxDumpId)
|
|
+ return AH->tocsByDumpId[id];
|
|
+
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+teReqs
|
|
+TocIDRequired(ArchiveHandle *AH, DumpId id)
|
|
+{
|
|
+ TocEntry *te = getTocEntryByDumpId(AH, id);
|
|
+
|
|
+ if (!te)
|
|
+ return 0;
|
|
+
|
|
+ return te->reqs;
|
|
+}
|
|
+
|
|
+size_t
|
|
+WriteOffset(ArchiveHandle *AH, pgoff_t o, int wasSet)
|
|
+{
|
|
+ int off;
|
|
+
|
|
+ /* Save the flag */
|
|
+ (*AH->WriteBytePtr) (AH, wasSet);
|
|
+
|
|
+ /* Write out pgoff_t smallest byte first, prevents endian mismatch */
|
|
+ for (off = 0; off < sizeof(pgoff_t); off++)
|
|
+ {
|
|
+ (*AH->WriteBytePtr) (AH, o & 0xFF);
|
|
+ o >>= 8;
|
|
+ }
|
|
+ return sizeof(pgoff_t) + 1;
|
|
+}
|
|
+
|
|
+int
|
|
+ReadOffset(ArchiveHandle *AH, pgoff_t * o)
|
|
+{
|
|
+ int i;
|
|
+ int off;
|
|
+ int offsetFlg;
|
|
+
|
|
+ /* Initialize to zero */
|
|
+ *o = 0;
|
|
+
|
|
+ /* Check for old version */
|
|
+ if (AH->version < K_VERS_1_7)
|
|
+ {
|
|
+ /* Prior versions wrote offsets using WriteInt */
|
|
+ i = ReadInt(AH);
|
|
+ /* -1 means not set */
|
|
+ if (i < 0)
|
|
+ return K_OFFSET_POS_NOT_SET;
|
|
+ else if (i == 0)
|
|
+ return K_OFFSET_NO_DATA;
|
|
+
|
|
+ /* Cast to pgoff_t because it was written as an int. */
|
|
+ *o = (pgoff_t) i;
|
|
+ return K_OFFSET_POS_SET;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Read the flag indicating the state of the data pointer. Check if valid
|
|
+ * and die if not.
|
|
+ *
|
|
+ * This used to be handled by a negative or zero pointer, now we use an
|
|
+ * extra byte specifically for the state.
|
|
+ */
|
|
+ offsetFlg = (*AH->ReadBytePtr) (AH) & 0xFF;
|
|
+
|
|
+ switch (offsetFlg)
|
|
+ {
|
|
+ case K_OFFSET_POS_NOT_SET:
|
|
+ case K_OFFSET_NO_DATA:
|
|
+ case K_OFFSET_POS_SET:
|
|
+
|
|
+ break;
|
|
+
|
|
+ default:
|
|
+ exit_horribly(modulename, "unexpected data offset flag %d\n", offsetFlg);
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Read the bytes
|
|
+ */
|
|
+ for (off = 0; off < AH->offSize; off++)
|
|
+ {
|
|
+ if (off < sizeof(pgoff_t))
|
|
+ *o |= ((pgoff_t) ((*AH->ReadBytePtr) (AH))) << (off * 8);
|
|
+ else
|
|
+ {
|
|
+ if ((*AH->ReadBytePtr) (AH) != 0)
|
|
+ exit_horribly(modulename, "file offset in dump file is too large\n");
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return offsetFlg;
|
|
+}
|
|
+
|
|
+size_t
|
|
+WriteInt(ArchiveHandle *AH, int i)
|
|
+{
|
|
+ int b;
|
|
+
|
|
+ /*
|
|
+ * This is a bit yucky, but I don't want to make the binary format very
|
|
+ * dependent on representation, and not knowing much about it, I write out
|
|
+ * a sign byte. If you change this, don't forget to change the file
|
|
+ * version #, and modify readInt to read the new format AS WELL AS the old
|
|
+ * formats.
|
|
+ */
|
|
+
|
|
+ /* SIGN byte */
|
|
+ if (i < 0)
|
|
+ {
|
|
+ (*AH->WriteBytePtr) (AH, 1);
|
|
+ i = -i;
|
|
+ }
|
|
+ else
|
|
+ (*AH->WriteBytePtr) (AH, 0);
|
|
+
|
|
+ for (b = 0; b < AH->intSize; b++)
|
|
+ {
|
|
+ (*AH->WriteBytePtr) (AH, i & 0xFF);
|
|
+ i >>= 8;
|
|
+ }
|
|
+
|
|
+ return AH->intSize + 1;
|
|
+}
|
|
+
|
|
+int
|
|
+ReadInt(ArchiveHandle *AH)
|
|
+{
|
|
+ int res = 0;
|
|
+ int bv,
|
|
+ b;
|
|
+ int sign = 0; /* Default positive */
|
|
+ int bitShift = 0;
|
|
+
|
|
+ if (AH->version > K_VERS_1_0)
|
|
+ /* Read a sign byte */
|
|
+ sign = (*AH->ReadBytePtr) (AH);
|
|
+
|
|
+ for (b = 0; b < AH->intSize; b++)
|
|
+ {
|
|
+ bv = (*AH->ReadBytePtr) (AH) & 0xFF;
|
|
+ if (bv != 0)
|
|
+ res = res + (bv << bitShift);
|
|
+ bitShift += 8;
|
|
+ }
|
|
+
|
|
+ if (sign)
|
|
+ res = -res;
|
|
+
|
|
+ return res;
|
|
+}
|
|
+
|
|
+size_t
|
|
+WriteStr(ArchiveHandle *AH, const char *c)
|
|
+{
|
|
+ size_t res;
|
|
+
|
|
+ if (c)
|
|
+ {
|
|
+ int len = strlen(c);
|
|
+
|
|
+ res = WriteInt(AH, len);
|
|
+ (*AH->WriteBufPtr) (AH, c, len);
|
|
+ res += len;
|
|
+ }
|
|
+ else
|
|
+ res = WriteInt(AH, -1);
|
|
+
|
|
+ return res;
|
|
+}
|
|
+
|
|
+char *
|
|
+ReadStr(ArchiveHandle *AH)
|
|
+{
|
|
+ char *buf;
|
|
+ int l;
|
|
+
|
|
+ l = ReadInt(AH);
|
|
+ if (l < 0)
|
|
+ buf = NULL;
|
|
+ else
|
|
+ {
|
|
+ buf = (char *) pg_malloc(l + 1);
|
|
+ (*AH->ReadBufPtr) (AH, (void *) buf, l);
|
|
+
|
|
+ buf[l] = '\0';
|
|
+ }
|
|
+
|
|
+ return buf;
|
|
+}
|
|
+
|
|
+static int
|
|
+_discoverArchiveFormat(ArchiveHandle *AH)
|
|
+{
|
|
+ FILE *fh;
|
|
+ char sig[6]; /* More than enough */
|
|
+ size_t cnt;
|
|
+ int wantClose = 0;
|
|
+
|
|
+#if 0
|
|
+ write_msg(modulename, "attempting to ascertain archive format\n");
|
|
+#endif
|
|
+
|
|
+ if (AH->lookahead)
|
|
+ free(AH->lookahead);
|
|
+
|
|
+ AH->lookaheadSize = 512;
|
|
+ AH->lookahead = pg_malloc0(512);
|
|
+ AH->lookaheadLen = 0;
|
|
+ AH->lookaheadPos = 0;
|
|
+
|
|
+ if (AH->fSpec)
|
|
+ {
|
|
+ struct stat st;
|
|
+
|
|
+ wantClose = 1;
|
|
+
|
|
+ /*
|
|
+ * Check if the specified archive is a directory. If so, check if
|
|
+ * there's a "toc.dat" (or "toc.dat.gz") file in it.
|
|
+ */
|
|
+ if (stat(AH->fSpec, &st) == 0 && S_ISDIR(st.st_mode))
|
|
+ {
|
|
+ char buf[MAXPGPATH];
|
|
+
|
|
+ if (snprintf(buf, MAXPGPATH, "%s/toc.dat", AH->fSpec) >= MAXPGPATH)
|
|
+ exit_horribly(modulename, "directory name too long: \"%s\"\n",
|
|
+ AH->fSpec);
|
|
+ if (stat(buf, &st) == 0 && S_ISREG(st.st_mode))
|
|
+ {
|
|
+ AH->format = archDirectory;
|
|
+ return AH->format;
|
|
+ }
|
|
+
|
|
+#ifdef HAVE_LIBZ
|
|
+ if (snprintf(buf, MAXPGPATH, "%s/toc.dat.gz", AH->fSpec) >= MAXPGPATH)
|
|
+ exit_horribly(modulename, "directory name too long: \"%s\"\n",
|
|
+ AH->fSpec);
|
|
+ if (stat(buf, &st) == 0 && S_ISREG(st.st_mode))
|
|
+ {
|
|
+ AH->format = archDirectory;
|
|
+ return AH->format;
|
|
+ }
|
|
+#endif
|
|
+ exit_horribly(modulename, "directory \"%s\" does not appear to be a valid archive (\"toc.dat\" does not exist)\n",
|
|
+ AH->fSpec);
|
|
+ fh = NULL; /* keep compiler quiet */
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ fh = fopen(AH->fSpec, PG_BINARY_R);
|
|
+ if (!fh)
|
|
+ exit_horribly(modulename, "could not open input file \"%s\": %s\n",
|
|
+ AH->fSpec, strerror(errno));
|
|
+ }
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ fh = stdin;
|
|
+ if (!fh)
|
|
+ exit_horribly(modulename, "could not open input file: %s\n",
|
|
+ strerror(errno));
|
|
+ }
|
|
+
|
|
+ if ((cnt = fread(sig, 1, 5, fh)) != 5)
|
|
+ {
|
|
+ if (ferror(fh))
|
|
+ exit_horribly(modulename, "could not read input file: %s\n", strerror(errno));
|
|
+ else
|
|
+ exit_horribly(modulename, "input file is too short (read %lu, expected 5)\n",
|
|
+ (unsigned long) cnt);
|
|
+ }
|
|
+
|
|
+ /* Save it, just in case we need it later */
|
|
+ strncpy(&AH->lookahead[0], sig, 5);
|
|
+ AH->lookaheadLen = 5;
|
|
+
|
|
+ if (strncmp(sig, "PGDMP", 5) == 0)
|
|
+ {
|
|
+ int byteread;
|
|
+
|
|
+ /*
|
|
+ * Finish reading (most of) a custom-format header.
|
|
+ *
|
|
+ * NB: this code must agree with ReadHead().
|
|
+ */
|
|
+ if ((byteread = fgetc(fh)) == EOF)
|
|
+ READ_ERROR_EXIT(fh);
|
|
+
|
|
+ AH->vmaj = byteread;
|
|
+
|
|
+ if ((byteread = fgetc(fh)) == EOF)
|
|
+ READ_ERROR_EXIT(fh);
|
|
+
|
|
+ AH->vmin = byteread;
|
|
+
|
|
+ /* Save these too... */
|
|
+ AH->lookahead[AH->lookaheadLen++] = AH->vmaj;
|
|
+ AH->lookahead[AH->lookaheadLen++] = AH->vmin;
|
|
+
|
|
+ /* Check header version; varies from V1.0 */
|
|
+ if (AH->vmaj > 1 || ((AH->vmaj == 1) && (AH->vmin > 0))) /* Version > 1.0 */
|
|
+ {
|
|
+ if ((byteread = fgetc(fh)) == EOF)
|
|
+ READ_ERROR_EXIT(fh);
|
|
+
|
|
+ AH->vrev = byteread;
|
|
+ AH->lookahead[AH->lookaheadLen++] = AH->vrev;
|
|
+ }
|
|
+ else
|
|
+ AH->vrev = 0;
|
|
+
|
|
+ /* Make a convenient integer <maj><min><rev>00 */
|
|
+ AH->version = ((AH->vmaj * 256 + AH->vmin) * 256 + AH->vrev) * 256 + 0;
|
|
+
|
|
+ if ((AH->intSize = fgetc(fh)) == EOF)
|
|
+ READ_ERROR_EXIT(fh);
|
|
+ AH->lookahead[AH->lookaheadLen++] = AH->intSize;
|
|
+
|
|
+ if (AH->version >= K_VERS_1_7)
|
|
+ {
|
|
+ if ((AH->offSize = fgetc(fh)) == EOF)
|
|
+ READ_ERROR_EXIT(fh);
|
|
+ AH->lookahead[AH->lookaheadLen++] = AH->offSize;
|
|
+ }
|
|
+ else
|
|
+ AH->offSize = AH->intSize;
|
|
+
|
|
+ if ((byteread = fgetc(fh)) == EOF)
|
|
+ READ_ERROR_EXIT(fh);
|
|
+
|
|
+ AH->format = byteread;
|
|
+ AH->lookahead[AH->lookaheadLen++] = AH->format;
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ /*
|
|
+ * *Maybe* we have a tar archive format file or a text dump ... So,
|
|
+ * read first 512 byte header...
|
|
+ */
|
|
+ cnt = fread(&AH->lookahead[AH->lookaheadLen], 1, 512 - AH->lookaheadLen, fh);
|
|
+ /* read failure is checked below */
|
|
+ AH->lookaheadLen += cnt;
|
|
+
|
|
+ if (AH->lookaheadLen >= strlen(TEXT_DUMPALL_HEADER) &&
|
|
+ (strncmp(AH->lookahead, TEXT_DUMP_HEADER, strlen(TEXT_DUMP_HEADER)) == 0 ||
|
|
+ strncmp(AH->lookahead, TEXT_DUMPALL_HEADER, strlen(TEXT_DUMPALL_HEADER)) == 0))
|
|
+ {
|
|
+ /*
|
|
+ * looks like it's probably a text format dump. so suggest they
|
|
+ * try psql
|
|
+ */
|
|
+ exit_horribly(modulename, "input file appears to be a text format dump. Please use psql.\n");
|
|
+ }
|
|
+
|
|
+ if (AH->lookaheadLen != 512)
|
|
+ {
|
|
+ if (feof(fh))
|
|
+ exit_horribly(modulename, "input file does not appear to be a valid archive (too short?)\n");
|
|
+ else
|
|
+ READ_ERROR_EXIT(fh);
|
|
+ }
|
|
+
|
|
+ if (!isValidTarHeader(AH->lookahead))
|
|
+ exit_horribly(modulename, "input file does not appear to be a valid archive\n");
|
|
+
|
|
+ AH->format = archTar;
|
|
+ }
|
|
+
|
|
+ /* If we can't seek, then mark the header as read */
|
|
+ if (fseeko(fh, 0, SEEK_SET) != 0)
|
|
+ {
|
|
+ /*
|
|
+ * NOTE: Formats that use the lookahead buffer can unset this in their
|
|
+ * Init routine.
|
|
+ */
|
|
+ AH->readHeader = 1;
|
|
+ }
|
|
+ else
|
|
+ AH->lookaheadLen = 0; /* Don't bother since we've reset the file */
|
|
+
|
|
+ /* Close the file */
|
|
+ if (wantClose)
|
|
+ if (fclose(fh) != 0)
|
|
+ exit_horribly(modulename, "could not close input file: %s\n",
|
|
+ strerror(errno));
|
|
+
|
|
+ return AH->format;
|
|
+}
|
|
+
|
|
+
|
|
+/*
|
|
+ * Allocate an archive handle
|
|
+ */
|
|
+static ArchiveHandle *
|
|
+_allocAH(const char *FileSpec, const ArchiveFormat fmt,
|
|
+ const int compression, ArchiveMode mode, SetupWorkerPtr setupWorkerPtr)
|
|
+{
|
|
+ ArchiveHandle *AH;
|
|
+
|
|
+#if 0
|
|
+ write_msg(modulename, "allocating AH for %s, format %d\n", FileSpec, fmt);
|
|
+#endif
|
|
+
|
|
+ AH = (ArchiveHandle *) pg_malloc0(sizeof(ArchiveHandle));
|
|
+
|
|
+ /* AH->debugLevel = 100; */
|
|
+
|
|
+ AH->vmaj = K_VERS_MAJOR;
|
|
+ AH->vmin = K_VERS_MINOR;
|
|
+ AH->vrev = K_VERS_REV;
|
|
+
|
|
+ /* Make a convenient integer <maj><min><rev>00 */
|
|
+ AH->version = ((AH->vmaj * 256 + AH->vmin) * 256 + AH->vrev) * 256 + 0;
|
|
+
|
|
+ /* initialize for backwards compatible string processing */
|
|
+ AH->public.encoding = 0; /* PG_SQL_ASCII */
|
|
+ AH->public.std_strings = false;
|
|
+
|
|
+ /* sql error handling */
|
|
+ AH->public.exit_on_error = true;
|
|
+ AH->public.n_errors = 0;
|
|
+
|
|
+ AH->archiveDumpVersion = PG_VERSION;
|
|
+
|
|
+ AH->createDate = time(NULL);
|
|
+
|
|
+ AH->intSize = sizeof(int);
|
|
+ AH->offSize = sizeof(pgoff_t);
|
|
+ if (FileSpec)
|
|
+ {
|
|
+ AH->fSpec = pg_strdup(FileSpec);
|
|
+
|
|
+ /*
|
|
+ * Not used; maybe later....
|
|
+ *
|
|
+ * AH->workDir = pg_strdup(FileSpec); for(i=strlen(FileSpec) ; i > 0 ;
|
|
+ * i--) if (AH->workDir[i-1] == '/')
|
|
+ */
|
|
+ }
|
|
+ else
|
|
+ AH->fSpec = NULL;
|
|
+
|
|
+ AH->currUser = NULL; /* unknown */
|
|
+ AH->currSchema = NULL; /* ditto */
|
|
+ AH->currTablespace = NULL; /* ditto */
|
|
+ AH->currWithOids = -1; /* force SET */
|
|
+
|
|
+ AH->toc = (TocEntry *) pg_malloc0(sizeof(TocEntry));
|
|
+
|
|
+ AH->toc->next = AH->toc;
|
|
+ AH->toc->prev = AH->toc;
|
|
+
|
|
+ AH->mode = mode;
|
|
+ AH->compression = compression;
|
|
+
|
|
+ memset(&(AH->sqlparse), 0, sizeof(AH->sqlparse));
|
|
+
|
|
+ /* Open stdout with no compression for AH output handle */
|
|
+ AH->gzOut = 0;
|
|
+ AH->OF = stdout;
|
|
+
|
|
+ /*
|
|
+ * On Windows, we need to use binary mode to read/write non-text archive
|
|
+ * formats. Force stdin/stdout into binary mode if that is what we are
|
|
+ * using.
|
|
+ */
|
|
+#ifdef WIN32
|
|
+ if (fmt != archNull &&
|
|
+ (AH->fSpec == NULL || strcmp(AH->fSpec, "") == 0))
|
|
+ {
|
|
+ if (mode == archModeWrite)
|
|
+ setmode(fileno(stdout), O_BINARY);
|
|
+ else
|
|
+ setmode(fileno(stdin), O_BINARY);
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ AH->SetupWorkerPtr = setupWorkerPtr;
|
|
+
|
|
+ if (fmt == archUnknown)
|
|
+ AH->format = _discoverArchiveFormat(AH);
|
|
+ else
|
|
+ AH->format = fmt;
|
|
+
|
|
+ AH->promptPassword = TRI_DEFAULT;
|
|
+
|
|
+ switch (AH->format)
|
|
+ {
|
|
+ case archCustom:
|
|
+ InitArchiveFmt_Custom(AH);
|
|
+ break;
|
|
+
|
|
+ case archNull:
|
|
+ InitArchiveFmt_Null(AH);
|
|
+ break;
|
|
+
|
|
+ case archDirectory:
|
|
+ InitArchiveFmt_Directory(AH);
|
|
+ break;
|
|
+
|
|
+ case archTar:
|
|
+ InitArchiveFmt_Tar(AH);
|
|
+ break;
|
|
+
|
|
+ default:
|
|
+ exit_horribly(modulename, "unrecognized file format \"%d\"\n", fmt);
|
|
+ }
|
|
+
|
|
+ return AH;
|
|
+}
|
|
+
|
|
+void
|
|
+WriteDataChunks(ArchiveHandle *AH, ParallelState *pstate)
|
|
+{
|
|
+ TocEntry *te;
|
|
+
|
|
+ for (te = AH->toc->next; te != AH->toc; te = te->next)
|
|
+ {
|
|
+ if (!te->dataDumper)
|
|
+ continue;
|
|
+
|
|
+ if ((te->reqs & REQ_DATA) == 0)
|
|
+ continue;
|
|
+
|
|
+ if (pstate && pstate->numWorkers > 1)
|
|
+ {
|
|
+ /*
|
|
+ * If we are in a parallel backup, then we are always the master
|
|
+ * process.
|
|
+ */
|
|
+ EnsureIdleWorker(AH, pstate);
|
|
+ Assert(GetIdleWorker(pstate) != NO_SLOT);
|
|
+ DispatchJobForTocEntry(AH, pstate, te, ACT_DUMP);
|
|
+ }
|
|
+ else
|
|
+ WriteDataChunksForTocEntry(AH, te);
|
|
+ }
|
|
+ EnsureWorkersFinished(AH, pstate);
|
|
+}
|
|
+
|
|
+void
|
|
+WriteDataChunksForTocEntry(ArchiveHandle *AH, TocEntry *te)
|
|
+{
|
|
+ StartDataPtr startPtr;
|
|
+ EndDataPtr endPtr;
|
|
+
|
|
+ AH->currToc = te;
|
|
+
|
|
+ if (strcmp(te->desc, "BLOBS") == 0)
|
|
+ {
|
|
+ startPtr = AH->StartBlobsPtr;
|
|
+ endPtr = AH->EndBlobsPtr;
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ startPtr = AH->StartDataPtr;
|
|
+ endPtr = AH->EndDataPtr;
|
|
+ }
|
|
+
|
|
+ if (startPtr != NULL)
|
|
+ (*startPtr) (AH, te);
|
|
+
|
|
+ /*
|
|
+ * The user-provided DataDumper routine needs to call AH->WriteData
|
|
+ */
|
|
+ (*te->dataDumper) ((Archive *) AH, te->dataDumperArg);
|
|
+
|
|
+ if (endPtr != NULL)
|
|
+ (*endPtr) (AH, te);
|
|
+
|
|
+ AH->currToc = NULL;
|
|
+}
|
|
+
|
|
+void
|
|
+WriteToc(ArchiveHandle *AH)
|
|
+{
|
|
+ TocEntry *te;
|
|
+ char workbuf[32];
|
|
+ int tocCount;
|
|
+ int i;
|
|
+
|
|
+ /* count entries that will actually be dumped */
|
|
+ tocCount = 0;
|
|
+ for (te = AH->toc->next; te != AH->toc; te = te->next)
|
|
+ {
|
|
+ if ((te->reqs & (REQ_SCHEMA | REQ_DATA | REQ_SPECIAL)) != 0)
|
|
+ tocCount++;
|
|
+ }
|
|
+
|
|
+ /* printf("%d TOC Entries to save\n", tocCount); */
|
|
+
|
|
+ WriteInt(AH, tocCount);
|
|
+
|
|
+ for (te = AH->toc->next; te != AH->toc; te = te->next)
|
|
+ {
|
|
+ if ((te->reqs & (REQ_SCHEMA | REQ_DATA | REQ_SPECIAL)) == 0)
|
|
+ continue;
|
|
+
|
|
+ WriteInt(AH, te->dumpId);
|
|
+ WriteInt(AH, te->dataDumper ? 1 : 0);
|
|
+
|
|
+ /* OID is recorded as a string for historical reasons */
|
|
+ sprintf(workbuf, "%u", te->catalogId.tableoid);
|
|
+ WriteStr(AH, workbuf);
|
|
+ sprintf(workbuf, "%u", te->catalogId.oid);
|
|
+ WriteStr(AH, workbuf);
|
|
+
|
|
+ WriteStr(AH, te->tag);
|
|
+ WriteStr(AH, te->desc);
|
|
+ WriteInt(AH, te->section);
|
|
+ WriteStr(AH, te->defn);
|
|
+ WriteStr(AH, te->dropStmt);
|
|
+ WriteStr(AH, te->copyStmt);
|
|
+ WriteStr(AH, te->namespace);
|
|
+ WriteStr(AH, te->tablespace);
|
|
+ WriteStr(AH, te->owner);
|
|
+ WriteStr(AH, te->withOids ? "true" : "false");
|
|
+
|
|
+ /* Dump list of dependencies */
|
|
+ for (i = 0; i < te->nDeps; i++)
|
|
+ {
|
|
+ sprintf(workbuf, "%d", te->dependencies[i]);
|
|
+ WriteStr(AH, workbuf);
|
|
+ }
|
|
+ WriteStr(AH, NULL); /* Terminate List */
|
|
+
|
|
+ if (AH->WriteExtraTocPtr)
|
|
+ (*AH->WriteExtraTocPtr) (AH, te);
|
|
+ }
|
|
+}
|
|
+
|
|
+void
|
|
+ReadToc(ArchiveHandle *AH)
|
|
+{
|
|
+ int i;
|
|
+ char *tmp;
|
|
+ DumpId *deps;
|
|
+ int depIdx;
|
|
+ int depSize;
|
|
+ TocEntry *te;
|
|
+
|
|
+ AH->tocCount = ReadInt(AH);
|
|
+ AH->maxDumpId = 0;
|
|
+
|
|
+ for (i = 0; i < AH->tocCount; i++)
|
|
+ {
|
|
+ te = (TocEntry *) pg_malloc0(sizeof(TocEntry));
|
|
+ te->dumpId = ReadInt(AH);
|
|
+
|
|
+ if (te->dumpId > AH->maxDumpId)
|
|
+ AH->maxDumpId = te->dumpId;
|
|
+
|
|
+ /* Sanity check */
|
|
+ if (te->dumpId <= 0)
|
|
+ exit_horribly(modulename,
|
|
+ "entry ID %d out of range -- perhaps a corrupt TOC\n",
|
|
+ te->dumpId);
|
|
+
|
|
+ te->hadDumper = ReadInt(AH);
|
|
+
|
|
+ if (AH->version >= K_VERS_1_8)
|
|
+ {
|
|
+ tmp = ReadStr(AH);
|
|
+ sscanf(tmp, "%u", &te->catalogId.tableoid);
|
|
+ free(tmp);
|
|
+ }
|
|
+ else
|
|
+ te->catalogId.tableoid = InvalidOid;
|
|
+ tmp = ReadStr(AH);
|
|
+ sscanf(tmp, "%u", &te->catalogId.oid);
|
|
+ free(tmp);
|
|
+
|
|
+ te->tag = ReadStr(AH);
|
|
+ te->desc = ReadStr(AH);
|
|
+
|
|
+ if (AH->version >= K_VERS_1_11)
|
|
+ {
|
|
+ te->section = ReadInt(AH);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ /*
|
|
+ * Rules for pre-8.4 archives wherein pg_dump hasn't classified
|
|
+ * the entries into sections. This list need not cover entry
|
|
+ * types added later than 8.4.
|
|
+ */
|
|
+ if (strcmp(te->desc, "COMMENT") == 0 ||
|
|
+ strcmp(te->desc, "ACL") == 0 ||
|
|
+ strcmp(te->desc, "ACL LANGUAGE") == 0)
|
|
+ te->section = SECTION_NONE;
|
|
+ else if (strcmp(te->desc, "TABLE DATA") == 0 ||
|
|
+ strcmp(te->desc, "BLOBS") == 0 ||
|
|
+ strcmp(te->desc, "BLOB COMMENTS") == 0)
|
|
+ te->section = SECTION_DATA;
|
|
+ else if (strcmp(te->desc, "CONSTRAINT") == 0 ||
|
|
+ strcmp(te->desc, "CHECK CONSTRAINT") == 0 ||
|
|
+ strcmp(te->desc, "FK CONSTRAINT") == 0 ||
|
|
+ strcmp(te->desc, "INDEX") == 0 ||
|
|
+ strcmp(te->desc, "RULE") == 0 ||
|
|
+ strcmp(te->desc, "TRIGGER") == 0)
|
|
+ te->section = SECTION_POST_DATA;
|
|
+ else
|
|
+ te->section = SECTION_PRE_DATA;
|
|
+ }
|
|
+
|
|
+ te->defn = ReadStr(AH);
|
|
+ te->dropStmt = ReadStr(AH);
|
|
+
|
|
+ if (AH->version >= K_VERS_1_3)
|
|
+ te->copyStmt = ReadStr(AH);
|
|
+
|
|
+ if (AH->version >= K_VERS_1_6)
|
|
+ te->namespace = ReadStr(AH);
|
|
+
|
|
+ if (AH->version >= K_VERS_1_10)
|
|
+ te->tablespace = ReadStr(AH);
|
|
+
|
|
+ te->owner = ReadStr(AH);
|
|
+ if (AH->version >= K_VERS_1_9)
|
|
+ {
|
|
+ if (strcmp(ReadStr(AH), "true") == 0)
|
|
+ te->withOids = true;
|
|
+ else
|
|
+ te->withOids = false;
|
|
+ }
|
|
+ else
|
|
+ te->withOids = true;
|
|
+
|
|
+ /* Read TOC entry dependencies */
|
|
+ if (AH->version >= K_VERS_1_5)
|
|
+ {
|
|
+ depSize = 100;
|
|
+ deps = (DumpId *) pg_malloc(sizeof(DumpId) * depSize);
|
|
+ depIdx = 0;
|
|
+ for (;;)
|
|
+ {
|
|
+ tmp = ReadStr(AH);
|
|
+ if (!tmp)
|
|
+ break; /* end of list */
|
|
+ if (depIdx >= depSize)
|
|
+ {
|
|
+ depSize *= 2;
|
|
+ deps = (DumpId *) pg_realloc(deps, sizeof(DumpId) * depSize);
|
|
+ }
|
|
+ sscanf(tmp, "%d", &deps[depIdx]);
|
|
+ free(tmp);
|
|
+ depIdx++;
|
|
+ }
|
|
+
|
|
+ if (depIdx > 0) /* We have a non-null entry */
|
|
+ {
|
|
+ deps = (DumpId *) pg_realloc(deps, sizeof(DumpId) * depIdx);
|
|
+ te->dependencies = deps;
|
|
+ te->nDeps = depIdx;
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ free(deps);
|
|
+ te->dependencies = NULL;
|
|
+ te->nDeps = 0;
|
|
+ }
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ te->dependencies = NULL;
|
|
+ te->nDeps = 0;
|
|
+ }
|
|
+
|
|
+ if (AH->ReadExtraTocPtr)
|
|
+ (*AH->ReadExtraTocPtr) (AH, te);
|
|
+
|
|
+ ahlog(AH, 3, "read TOC entry %d (ID %d) for %s %s\n",
|
|
+ i, te->dumpId, te->desc, te->tag);
|
|
+
|
|
+ /* link completed entry into TOC circular list */
|
|
+ te->prev = AH->toc->prev;
|
|
+ AH->toc->prev->next = te;
|
|
+ AH->toc->prev = te;
|
|
+ te->next = AH->toc;
|
|
+
|
|
+ /* special processing immediately upon read for some items */
|
|
+ if (strcmp(te->desc, "ENCODING") == 0)
|
|
+ processEncodingEntry(AH, te);
|
|
+ else if (strcmp(te->desc, "STDSTRINGS") == 0)
|
|
+ processStdStringsEntry(AH, te);
|
|
+ }
|
|
+}
|
|
+
|
|
+static void
|
|
+processEncodingEntry(ArchiveHandle *AH, TocEntry *te)
|
|
+{
|
|
+ /* te->defn should have the form SET client_encoding = 'foo'; */
|
|
+ char *defn = pg_strdup(te->defn);
|
|
+ char *ptr1;
|
|
+ char *ptr2 = NULL;
|
|
+ int encoding;
|
|
+
|
|
+ ptr1 = strchr(defn, '\'');
|
|
+ if (ptr1)
|
|
+ ptr2 = strchr(++ptr1, '\'');
|
|
+ if (ptr2)
|
|
+ {
|
|
+ *ptr2 = '\0';
|
|
+ encoding = pg_char_to_encoding(ptr1);
|
|
+ if (encoding < 0)
|
|
+ exit_horribly(modulename, "unrecognized encoding \"%s\"\n",
|
|
+ ptr1);
|
|
+ AH->public.encoding = encoding;
|
|
+ }
|
|
+ else
|
|
+ exit_horribly(modulename, "invalid ENCODING item: %s\n",
|
|
+ te->defn);
|
|
+
|
|
+ free(defn);
|
|
+}
|
|
+
|
|
+static void
|
|
+processStdStringsEntry(ArchiveHandle *AH, TocEntry *te)
|
|
+{
|
|
+ /* te->defn should have the form SET standard_conforming_strings = 'x'; */
|
|
+ char *ptr1;
|
|
+
|
|
+ ptr1 = strchr(te->defn, '\'');
|
|
+ if (ptr1 && strncmp(ptr1, "'on'", 4) == 0)
|
|
+ AH->public.std_strings = true;
|
|
+ else if (ptr1 && strncmp(ptr1, "'off'", 5) == 0)
|
|
+ AH->public.std_strings = false;
|
|
+ else
|
|
+ exit_horribly(modulename, "invalid STDSTRINGS item: %s\n",
|
|
+ te->defn);
|
|
+}
|
|
+
|
|
+static teReqs
|
|
+_tocEntryRequired(TocEntry *te, teSection curSection, RestoreOptions *ropt)
|
|
+{
|
|
+ teReqs res = REQ_SCHEMA | REQ_DATA;
|
|
+
|
|
+ /* ENCODING and STDSTRINGS items are treated specially */
|
|
+ if (strcmp(te->desc, "ENCODING") == 0 ||
|
|
+ strcmp(te->desc, "STDSTRINGS") == 0)
|
|
+ return REQ_SPECIAL;
|
|
+
|
|
+ /* If it's an ACL, maybe ignore it */
|
|
+ if (ropt->aclsSkip && _tocEntryIsACL(te))
|
|
+ return 0;
|
|
+
|
|
+ /* If it's security labels, maybe ignore it */
|
|
+ if (ropt->no_security_labels && strcmp(te->desc, "SECURITY LABEL") == 0)
|
|
+ return 0;
|
|
+
|
|
+ /* Ignore it if section is not to be dumped/restored */
|
|
+ switch (curSection)
|
|
+ {
|
|
+ case SECTION_PRE_DATA:
|
|
+ if (!(ropt->dumpSections & DUMP_PRE_DATA))
|
|
+ return 0;
|
|
+ break;
|
|
+ case SECTION_DATA:
|
|
+ if (!(ropt->dumpSections & DUMP_DATA))
|
|
+ return 0;
|
|
+ break;
|
|
+ case SECTION_POST_DATA:
|
|
+ if (!(ropt->dumpSections & DUMP_POST_DATA))
|
|
+ return 0;
|
|
+ break;
|
|
+ default:
|
|
+ /* shouldn't get here, really, but ignore it */
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ /* Check options for selective dump/restore */
|
|
+ if (ropt->schemaNames.head != NULL)
|
|
+ {
|
|
+ /* If no namespace is specified, it means all. */
|
|
+ if (!te->namespace)
|
|
+ return 0;
|
|
+ if (!(simple_string_list_member(&ropt->schemaNames, te->namespace)))
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ if (ropt->selTypes)
|
|
+ {
|
|
+ if (strcmp(te->desc, "TABLE") == 0 ||
|
|
+ strcmp(te->desc, "TABLE DATA") == 0)
|
|
+ {
|
|
+ if (!ropt->selTable)
|
|
+ return 0;
|
|
+ if (ropt->tableNames.head != NULL && (!(simple_string_list_member(&ropt->tableNames, te->tag))))
|
|
+ return 0;
|
|
+ }
|
|
+ else if (strcmp(te->desc, "INDEX") == 0)
|
|
+ {
|
|
+ if (!ropt->selIndex)
|
|
+ return 0;
|
|
+ if (ropt->indexNames.head != NULL && (!(simple_string_list_member(&ropt->indexNames, te->tag))))
|
|
+ return 0;
|
|
+ }
|
|
+ else if (strcmp(te->desc, "FUNCTION") == 0)
|
|
+ {
|
|
+ if (!ropt->selFunction)
|
|
+ return 0;
|
|
+ if (ropt->functionNames.head != NULL && (!(simple_string_list_member(&ropt->functionNames, te->tag))))
|
|
+ return 0;
|
|
+ }
|
|
+ else if (strcmp(te->desc, "TRIGGER") == 0)
|
|
+ {
|
|
+ if (!ropt->selTrigger)
|
|
+ return 0;
|
|
+ if (ropt->triggerNames.head != NULL && (!(simple_string_list_member(&ropt->triggerNames, te->tag))))
|
|
+ return 0;
|
|
+ }
|
|
+ else
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Check if we had a dataDumper. Indicates if the entry is schema or data
|
|
+ */
|
|
+ if (!te->hadDumper)
|
|
+ {
|
|
+ /*
|
|
+ * Special Case: If 'SEQUENCE SET' or anything to do with BLOBs, then
|
|
+ * it is considered a data entry. We don't need to check for the
|
|
+ * BLOBS entry or old-style BLOB COMMENTS, because they will have
|
|
+ * hadDumper = true ... but we do need to check new-style BLOB
|
|
+ * comments.
|
|
+ */
|
|
+ if (strcmp(te->desc, "SEQUENCE SET") == 0 ||
|
|
+ strcmp(te->desc, "BLOB") == 0 ||
|
|
+ (strcmp(te->desc, "ACL") == 0 &&
|
|
+ strncmp(te->tag, "LARGE OBJECT ", 13) == 0) ||
|
|
+ (strcmp(te->desc, "COMMENT") == 0 &&
|
|
+ strncmp(te->tag, "LARGE OBJECT ", 13) == 0) ||
|
|
+ (strcmp(te->desc, "SECURITY LABEL") == 0 &&
|
|
+ strncmp(te->tag, "LARGE OBJECT ", 13) == 0))
|
|
+ res = res & REQ_DATA;
|
|
+ else
|
|
+ res = res & ~REQ_DATA;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Special case: <Init> type with <Max OID> tag; this is obsolete and we
|
|
+ * always ignore it.
|
|
+ */
|
|
+ if ((strcmp(te->desc, "<Init>") == 0) && (strcmp(te->tag, "Max OID") == 0))
|
|
+ return 0;
|
|
+
|
|
+ /* Mask it if we only want schema */
|
|
+ if (ropt->schemaOnly)
|
|
+ res = res & REQ_SCHEMA;
|
|
+
|
|
+ /* Mask it if we only want data */
|
|
+ if (ropt->dataOnly)
|
|
+ res = res & REQ_DATA;
|
|
+
|
|
+ /* Mask it if we don't have a schema contribution */
|
|
+ if (!te->defn || strlen(te->defn) == 0)
|
|
+ res = res & ~REQ_SCHEMA;
|
|
+
|
|
+ /* Finally, if there's a per-ID filter, limit based on that as well */
|
|
+ if (ropt->idWanted && !ropt->idWanted[te->dumpId - 1])
|
|
+ return 0;
|
|
+
|
|
+ return res;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Identify TOC entries that are ACLs.
|
|
+ */
|
|
+static bool
|
|
+_tocEntryIsACL(TocEntry *te)
|
|
+{
|
|
+ /* "ACL LANGUAGE" was a crock emitted only in PG 7.4 */
|
|
+ if (strcmp(te->desc, "ACL") == 0 ||
|
|
+ strcmp(te->desc, "ACL LANGUAGE") == 0 ||
|
|
+ strcmp(te->desc, "DEFAULT ACL") == 0)
|
|
+ return true;
|
|
+ return false;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Issue SET commands for parameters that we want to have set the same way
|
|
+ * at all times during execution of a restore script.
|
|
+ */
|
|
+static void
|
|
+_doSetFixedOutputState(ArchiveHandle *AH)
|
|
+{
|
|
+ /* Disable statement_timeout since restore is probably slow */
|
|
+ ahprintf(AH, "SET statement_timeout = 0;\n");
|
|
+
|
|
+ /* Likewise for lock_timeout */
|
|
+ ahprintf(AH, "SET lock_timeout = 0;\n");
|
|
+
|
|
+ /* Select the correct character set encoding */
|
|
+ ahprintf(AH, "SET client_encoding = '%s';\n",
|
|
+ pg_encoding_to_char(AH->public.encoding));
|
|
+
|
|
+ /* Select the correct string literal syntax */
|
|
+ ahprintf(AH, "SET standard_conforming_strings = %s;\n",
|
|
+ AH->public.std_strings ? "on" : "off");
|
|
+
|
|
+ /* Select the role to be used during restore */
|
|
+ if (AH->ropt && AH->ropt->use_role)
|
|
+ ahprintf(AH, "SET ROLE %s;\n", fmtId(AH->ropt->use_role));
|
|
+
|
|
+ /* Make sure function checking is disabled */
|
|
+ ahprintf(AH, "SET check_function_bodies = false;\n");
|
|
+
|
|
+ /* Avoid annoying notices etc */
|
|
+ ahprintf(AH, "SET client_min_messages = warning;\n");
|
|
+ if (!AH->public.std_strings)
|
|
+ ahprintf(AH, "SET escape_string_warning = off;\n");
|
|
+
|
|
+ ahprintf(AH, "\n");
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Issue a SET SESSION AUTHORIZATION command. Caller is responsible
|
|
+ * for updating state if appropriate. If user is NULL or an empty string,
|
|
+ * the specification DEFAULT will be used.
|
|
+ */
|
|
+static void
|
|
+_doSetSessionAuth(ArchiveHandle *AH, const char *user)
|
|
+{
|
|
+ PQExpBuffer cmd = createPQExpBuffer();
|
|
+
|
|
+ appendPQExpBufferStr(cmd, "SET SESSION AUTHORIZATION ");
|
|
+
|
|
+ /*
|
|
+ * SQL requires a string literal here. Might as well be correct.
|
|
+ */
|
|
+ if (user && *user)
|
|
+ appendStringLiteralAHX(cmd, user, AH);
|
|
+ else
|
|
+ appendPQExpBufferStr(cmd, "DEFAULT");
|
|
+ appendPQExpBufferChar(cmd, ';');
|
|
+
|
|
+ if (RestoringToDB(AH))
|
|
+ {
|
|
+ PGresult *res;
|
|
+
|
|
+ res = PQexec(AH->connection, cmd->data);
|
|
+
|
|
+ if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
|
|
+ /* NOT warn_or_exit_horribly... use -O instead to skip this. */
|
|
+ exit_horribly(modulename, "could not set session user to \"%s\": %s",
|
|
+ user, PQerrorMessage(AH->connection));
|
|
+
|
|
+ PQclear(res);
|
|
+ }
|
|
+ else
|
|
+ ahprintf(AH, "%s\n\n", cmd->data);
|
|
+
|
|
+ destroyPQExpBuffer(cmd);
|
|
+}
|
|
+
|
|
+
|
|
+/*
|
|
+ * Issue a SET default_with_oids command. Caller is responsible
|
|
+ * for updating state if appropriate.
|
|
+ */
|
|
+static void
|
|
+_doSetWithOids(ArchiveHandle *AH, const bool withOids)
|
|
+{
|
|
+ PQExpBuffer cmd = createPQExpBuffer();
|
|
+
|
|
+ appendPQExpBuffer(cmd, "SET default_with_oids = %s;", withOids ?
|
|
+ "true" : "false");
|
|
+
|
|
+ if (RestoringToDB(AH))
|
|
+ {
|
|
+ PGresult *res;
|
|
+
|
|
+ res = PQexec(AH->connection, cmd->data);
|
|
+
|
|
+ if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
|
|
+ warn_or_exit_horribly(AH, modulename,
|
|
+ "could not set default_with_oids: %s",
|
|
+ PQerrorMessage(AH->connection));
|
|
+
|
|
+ PQclear(res);
|
|
+ }
|
|
+ else
|
|
+ ahprintf(AH, "%s\n\n", cmd->data);
|
|
+
|
|
+ destroyPQExpBuffer(cmd);
|
|
+}
|
|
+
|
|
+
|
|
+/*
|
|
+ * Issue the commands to connect to the specified database.
|
|
+ *
|
|
+ * If we're currently restoring right into a database, this will
|
|
+ * actually establish a connection. Otherwise it puts a \connect into
|
|
+ * the script output.
|
|
+ *
|
|
+ * NULL dbname implies reconnecting to the current DB (pretty useless).
|
|
+ */
|
|
+static void
|
|
+_reconnectToDB(ArchiveHandle *AH, const char *dbname)
|
|
+{
|
|
+ if (RestoringToDB(AH))
|
|
+ ReconnectToServer(AH, dbname, NULL);
|
|
+ else
|
|
+ {
|
|
+ PQExpBuffer qry = createPQExpBuffer();
|
|
+
|
|
+ appendPQExpBuffer(qry, "\\connect %s\n\n",
|
|
+ dbname ? fmtId(dbname) : "-");
|
|
+ ahprintf(AH, "%s", qry->data);
|
|
+ destroyPQExpBuffer(qry);
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * NOTE: currUser keeps track of what the imaginary session user in our
|
|
+ * script is. It's now effectively reset to the original userID.
|
|
+ */
|
|
+ if (AH->currUser)
|
|
+ free(AH->currUser);
|
|
+ AH->currUser = NULL;
|
|
+
|
|
+ /* don't assume we still know the output schema, tablespace, etc either */
|
|
+ if (AH->currSchema)
|
|
+ free(AH->currSchema);
|
|
+ AH->currSchema = NULL;
|
|
+ if (AH->currTablespace)
|
|
+ free(AH->currTablespace);
|
|
+ AH->currTablespace = NULL;
|
|
+ AH->currWithOids = -1;
|
|
+
|
|
+ /* re-establish fixed state */
|
|
+ _doSetFixedOutputState(AH);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Become the specified user, and update state to avoid redundant commands
|
|
+ *
|
|
+ * NULL or empty argument is taken to mean restoring the session default
|
|
+ */
|
|
+static void
|
|
+_becomeUser(ArchiveHandle *AH, const char *user)
|
|
+{
|
|
+ if (!user)
|
|
+ user = ""; /* avoid null pointers */
|
|
+
|
|
+ if (AH->currUser && strcmp(AH->currUser, user) == 0)
|
|
+ return; /* no need to do anything */
|
|
+
|
|
+ _doSetSessionAuth(AH, user);
|
|
+
|
|
+ /*
|
|
+ * NOTE: currUser keeps track of what the imaginary session user in our
|
|
+ * script is
|
|
+ */
|
|
+ if (AH->currUser)
|
|
+ free(AH->currUser);
|
|
+ AH->currUser = pg_strdup(user);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Become the owner of the given TOC entry object. If
|
|
+ * changes in ownership are not allowed, this doesn't do anything.
|
|
+ */
|
|
+static void
|
|
+_becomeOwner(ArchiveHandle *AH, TocEntry *te)
|
|
+{
|
|
+ if (AH->ropt && (AH->ropt->noOwner || !AH->ropt->use_setsessauth))
|
|
+ return;
|
|
+
|
|
+ _becomeUser(AH, te->owner);
|
|
+}
|
|
+
|
|
+
|
|
+/*
|
|
+ * Set the proper default_with_oids value for the table.
|
|
+ */
|
|
+static void
|
|
+_setWithOids(ArchiveHandle *AH, TocEntry *te)
|
|
+{
|
|
+ if (AH->currWithOids != te->withOids)
|
|
+ {
|
|
+ _doSetWithOids(AH, te->withOids);
|
|
+ AH->currWithOids = te->withOids;
|
|
+ }
|
|
+}
|
|
+
|
|
+
|
|
+/*
|
|
+ * Issue the commands to select the specified schema as the current schema
|
|
+ * in the target database.
|
|
+ */
|
|
+static void
|
|
+_selectOutputSchema(ArchiveHandle *AH, const char *schemaName)
|
|
+{
|
|
+ PQExpBuffer qry;
|
|
+
|
|
+ if (!schemaName || *schemaName == '\0' ||
|
|
+ (AH->currSchema && strcmp(AH->currSchema, schemaName) == 0))
|
|
+ return; /* no need to do anything */
|
|
+
|
|
+ qry = createPQExpBuffer();
|
|
+
|
|
+ appendPQExpBuffer(qry, "SET search_path = %s",
|
|
+ fmtId(schemaName));
|
|
+ if (strcmp(schemaName, "pg_catalog") != 0)
|
|
+ appendPQExpBufferStr(qry, ", pg_catalog");
|
|
+
|
|
+ if (RestoringToDB(AH))
|
|
+ {
|
|
+ PGresult *res;
|
|
+
|
|
+ res = PQexec(AH->connection, qry->data);
|
|
+
|
|
+ if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
|
|
+ warn_or_exit_horribly(AH, modulename,
|
|
+ "could not set search_path to \"%s\": %s",
|
|
+ schemaName, PQerrorMessage(AH->connection));
|
|
+
|
|
+ PQclear(res);
|
|
+ }
|
|
+ else
|
|
+ ahprintf(AH, "%s;\n\n", qry->data);
|
|
+
|
|
+ if (AH->currSchema)
|
|
+ free(AH->currSchema);
|
|
+ AH->currSchema = pg_strdup(schemaName);
|
|
+
|
|
+ destroyPQExpBuffer(qry);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Issue the commands to select the specified tablespace as the current one
|
|
+ * in the target database.
|
|
+ */
|
|
+static void
|
|
+_selectTablespace(ArchiveHandle *AH, const char *tablespace)
|
|
+{
|
|
+ PQExpBuffer qry;
|
|
+ const char *want,
|
|
+ *have;
|
|
+
|
|
+ /* do nothing in --no-tablespaces mode */
|
|
+ if (AH->ropt->noTablespace)
|
|
+ return;
|
|
+
|
|
+ have = AH->currTablespace;
|
|
+ want = tablespace;
|
|
+
|
|
+ /* no need to do anything for non-tablespace object */
|
|
+ if (!want)
|
|
+ return;
|
|
+
|
|
+ if (have && strcmp(want, have) == 0)
|
|
+ return; /* no need to do anything */
|
|
+
|
|
+ qry = createPQExpBuffer();
|
|
+
|
|
+ if (strcmp(want, "") == 0)
|
|
+ {
|
|
+ /* We want the tablespace to be the database's default */
|
|
+ appendPQExpBufferStr(qry, "SET default_tablespace = ''");
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ /* We want an explicit tablespace */
|
|
+ appendPQExpBuffer(qry, "SET default_tablespace = %s", fmtId(want));
|
|
+ }
|
|
+
|
|
+ if (RestoringToDB(AH))
|
|
+ {
|
|
+ PGresult *res;
|
|
+
|
|
+ res = PQexec(AH->connection, qry->data);
|
|
+
|
|
+ if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
|
|
+ warn_or_exit_horribly(AH, modulename,
|
|
+ "could not set default_tablespace to %s: %s",
|
|
+ fmtId(want), PQerrorMessage(AH->connection));
|
|
+
|
|
+ PQclear(res);
|
|
+ }
|
|
+ else
|
|
+ ahprintf(AH, "%s;\n\n", qry->data);
|
|
+
|
|
+ if (AH->currTablespace)
|
|
+ free(AH->currTablespace);
|
|
+ AH->currTablespace = pg_strdup(want);
|
|
+
|
|
+ destroyPQExpBuffer(qry);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Extract an object description for a TOC entry, and append it to buf.
|
|
+ *
|
|
+ * This is used for ALTER ... OWNER TO.
|
|
+ */
|
|
+static void
|
|
+_getObjectDescription(PQExpBuffer buf, TocEntry *te, ArchiveHandle *AH)
|
|
+{
|
|
+ const char *type = te->desc;
|
|
+
|
|
+ /* Use ALTER TABLE for views and sequences */
|
|
+ if (strcmp(type, "VIEW") == 0 || strcmp(type, "SEQUENCE") == 0 ||
|
|
+ strcmp(type, "MATERIALIZED VIEW") == 0)
|
|
+ type = "TABLE";
|
|
+
|
|
+ /* objects that don't require special decoration */
|
|
+ if (strcmp(type, "COLLATION") == 0 ||
|
|
+ strcmp(type, "CONVERSION") == 0 ||
|
|
+ strcmp(type, "DOMAIN") == 0 ||
|
|
+ strcmp(type, "TABLE") == 0 ||
|
|
+ strcmp(type, "TYPE") == 0 ||
|
|
+ strcmp(type, "FOREIGN TABLE") == 0 ||
|
|
+ strcmp(type, "TEXT SEARCH DICTIONARY") == 0 ||
|
|
+ strcmp(type, "TEXT SEARCH CONFIGURATION") == 0 ||
|
|
+ /* non-schema-specified objects */
|
|
+ strcmp(type, "DATABASE") == 0 ||
|
|
+ strcmp(type, "PROCEDURAL LANGUAGE") == 0 ||
|
|
+ strcmp(type, "SCHEMA") == 0 ||
|
|
+ strcmp(type, "FOREIGN DATA WRAPPER") == 0 ||
|
|
+ strcmp(type, "SERVER") == 0 ||
|
|
+ strcmp(type, "USER MAPPING") == 0)
|
|
+ {
|
|
+ /* We already know that search_path was set properly */
|
|
+ appendPQExpBuffer(buf, "%s %s", type, fmtId(te->tag));
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ /* BLOBs just have a name, but it's numeric so must not use fmtId */
|
|
+ if (strcmp(type, "BLOB") == 0)
|
|
+ {
|
|
+ appendPQExpBuffer(buf, "LARGE OBJECT %s", te->tag);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * These object types require additional decoration. Fortunately, the
|
|
+ * information needed is exactly what's in the DROP command.
|
|
+ */
|
|
+ if (strcmp(type, "AGGREGATE") == 0 ||
|
|
+ strcmp(type, "FUNCTION") == 0 ||
|
|
+ strcmp(type, "OPERATOR") == 0 ||
|
|
+ strcmp(type, "OPERATOR CLASS") == 0 ||
|
|
+ strcmp(type, "OPERATOR FAMILY") == 0)
|
|
+ {
|
|
+ /* Chop "DROP " off the front and make a modifiable copy */
|
|
+ char *first = pg_strdup(te->dropStmt + 5);
|
|
+ char *last;
|
|
+
|
|
+ /* point to last character in string */
|
|
+ last = first + strlen(first) - 1;
|
|
+
|
|
+ /* Strip off any ';' or '\n' at the end */
|
|
+ while (last >= first && (*last == '\n' || *last == ';'))
|
|
+ last--;
|
|
+ *(last + 1) = '\0';
|
|
+
|
|
+ appendPQExpBufferStr(buf, first);
|
|
+
|
|
+ free(first);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ write_msg(modulename, "WARNING: don't know how to set owner for object type %s\n",
|
|
+ type);
|
|
+}
|
|
+
|
|
+static void
|
|
+_printTocEntry(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt, bool isData, bool acl_pass)
|
|
+{
|
|
+ /* ACLs are dumped only during acl pass */
|
|
+ if (acl_pass)
|
|
+ {
|
|
+ if (!_tocEntryIsACL(te))
|
|
+ return;
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ if (_tocEntryIsACL(te))
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Avoid dumping the public schema, as it will already be created ...
|
|
+ * unless we are using --clean mode, in which case it's been deleted and
|
|
+ * we'd better recreate it. Likewise for its comment, if any.
|
|
+ */
|
|
+ if (!ropt->dropSchema)
|
|
+ {
|
|
+ if (strcmp(te->desc, "SCHEMA") == 0 &&
|
|
+ strcmp(te->tag, "public") == 0)
|
|
+ return;
|
|
+ /* The comment restore would require super-user privs, so avoid it. */
|
|
+ if (strcmp(te->desc, "COMMENT") == 0 &&
|
|
+ strcmp(te->tag, "SCHEMA public") == 0)
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ /* Select owner, schema, and tablespace as necessary */
|
|
+ _becomeOwner(AH, te);
|
|
+ _selectOutputSchema(AH, te->namespace);
|
|
+ _selectTablespace(AH, te->tablespace);
|
|
+
|
|
+ /* Set up OID mode too */
|
|
+ if (strcmp(te->desc, "TABLE") == 0)
|
|
+ _setWithOids(AH, te);
|
|
+
|
|
+ /* Emit header comment for item */
|
|
+ if (!AH->noTocComments)
|
|
+ {
|
|
+ const char *pfx;
|
|
+ char *sanitized_name;
|
|
+ char *sanitized_schema;
|
|
+ char *sanitized_owner;
|
|
+
|
|
+ if (isData)
|
|
+ pfx = "Data for ";
|
|
+ else
|
|
+ pfx = "";
|
|
+
|
|
+ ahprintf(AH, "--\n");
|
|
+ if (AH->public.verbose)
|
|
+ {
|
|
+ ahprintf(AH, "-- TOC entry %d (class %u OID %u)\n",
|
|
+ te->dumpId, te->catalogId.tableoid, te->catalogId.oid);
|
|
+ if (te->nDeps > 0)
|
|
+ {
|
|
+ int i;
|
|
+
|
|
+ ahprintf(AH, "-- Dependencies:");
|
|
+ for (i = 0; i < te->nDeps; i++)
|
|
+ ahprintf(AH, " %d", te->dependencies[i]);
|
|
+ ahprintf(AH, "\n");
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Zap any line endings embedded in user-supplied fields, to prevent
|
|
+ * corruption of the dump (which could, in the worst case, present an
|
|
+ * SQL injection vulnerability if someone were to incautiously load a
|
|
+ * dump containing objects with maliciously crafted names).
|
|
+ */
|
|
+ sanitized_name = replace_line_endings(te->tag);
|
|
+ if (te->namespace)
|
|
+ sanitized_schema = replace_line_endings(te->namespace);
|
|
+ else
|
|
+ sanitized_schema = pg_strdup("-");
|
|
+ if (!ropt->noOwner)
|
|
+ sanitized_owner = replace_line_endings(te->owner);
|
|
+ else
|
|
+ sanitized_owner = pg_strdup("-");
|
|
+
|
|
+ ahprintf(AH, "-- %sName: %s; Type: %s; Schema: %s; Owner: %s",
|
|
+ pfx, sanitized_name, te->desc, sanitized_schema,
|
|
+ sanitized_owner);
|
|
+
|
|
+ free(sanitized_name);
|
|
+ free(sanitized_schema);
|
|
+ free(sanitized_owner);
|
|
+
|
|
+ if (te->tablespace && !ropt->noTablespace)
|
|
+ {
|
|
+ char *sanitized_tablespace;
|
|
+
|
|
+ sanitized_tablespace = replace_line_endings(te->tablespace);
|
|
+ ahprintf(AH, "; Tablespace: %s", sanitized_tablespace);
|
|
+ free(sanitized_tablespace);
|
|
+ }
|
|
+ ahprintf(AH, "\n");
|
|
+
|
|
+ if (AH->PrintExtraTocPtr !=NULL)
|
|
+ (*AH->PrintExtraTocPtr) (AH, te);
|
|
+ ahprintf(AH, "--\n\n");
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Actually print the definition.
|
|
+ *
|
|
+ * Really crude hack for suppressing AUTHORIZATION clause that old pg_dump
|
|
+ * versions put into CREATE SCHEMA. We have to do this when --no-owner
|
|
+ * mode is selected. This is ugly, but I see no other good way ...
|
|
+ */
|
|
+ if (ropt->noOwner && strcmp(te->desc, "SCHEMA") == 0)
|
|
+ {
|
|
+ ahprintf(AH, "CREATE SCHEMA %s;\n\n\n", fmtId(te->tag));
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ if (strlen(te->defn) > 0)
|
|
+ ahprintf(AH, "%s\n\n", te->defn);
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * If we aren't using SET SESSION AUTH to determine ownership, we must
|
|
+ * instead issue an ALTER OWNER command. We assume that anything without
|
|
+ * a DROP command is not a separately ownable object. All the categories
|
|
+ * with DROP commands must appear in one list or the other.
|
|
+ */
|
|
+ if (!ropt->noOwner && !ropt->use_setsessauth &&
|
|
+ strlen(te->owner) > 0 && strlen(te->dropStmt) > 0)
|
|
+ {
|
|
+ if (strcmp(te->desc, "AGGREGATE") == 0 ||
|
|
+ strcmp(te->desc, "BLOB") == 0 ||
|
|
+ strcmp(te->desc, "COLLATION") == 0 ||
|
|
+ strcmp(te->desc, "CONVERSION") == 0 ||
|
|
+ strcmp(te->desc, "DATABASE") == 0 ||
|
|
+ strcmp(te->desc, "DOMAIN") == 0 ||
|
|
+ strcmp(te->desc, "FUNCTION") == 0 ||
|
|
+ strcmp(te->desc, "OPERATOR") == 0 ||
|
|
+ strcmp(te->desc, "OPERATOR CLASS") == 0 ||
|
|
+ strcmp(te->desc, "OPERATOR FAMILY") == 0 ||
|
|
+ strcmp(te->desc, "PROCEDURAL LANGUAGE") == 0 ||
|
|
+ strcmp(te->desc, "SCHEMA") == 0 ||
|
|
+ strcmp(te->desc, "TABLE") == 0 ||
|
|
+ strcmp(te->desc, "TYPE") == 0 ||
|
|
+ strcmp(te->desc, "VIEW") == 0 ||
|
|
+ strcmp(te->desc, "MATERIALIZED VIEW") == 0 ||
|
|
+ strcmp(te->desc, "SEQUENCE") == 0 ||
|
|
+ strcmp(te->desc, "FOREIGN TABLE") == 0 ||
|
|
+ strcmp(te->desc, "TEXT SEARCH DICTIONARY") == 0 ||
|
|
+ strcmp(te->desc, "TEXT SEARCH CONFIGURATION") == 0 ||
|
|
+ strcmp(te->desc, "FOREIGN DATA WRAPPER") == 0 ||
|
|
+ strcmp(te->desc, "SERVER") == 0)
|
|
+ {
|
|
+ PQExpBuffer temp = createPQExpBuffer();
|
|
+
|
|
+ appendPQExpBufferStr(temp, "ALTER ");
|
|
+ _getObjectDescription(temp, te, AH);
|
|
+ appendPQExpBuffer(temp, " OWNER TO %s;", fmtId(te->owner));
|
|
+ ahprintf(AH, "%s\n\n", temp->data);
|
|
+ destroyPQExpBuffer(temp);
|
|
+ }
|
|
+ else if (strcmp(te->desc, "CAST") == 0 ||
|
|
+ strcmp(te->desc, "CHECK CONSTRAINT") == 0 ||
|
|
+ strcmp(te->desc, "CONSTRAINT") == 0 ||
|
|
+ strcmp(te->desc, "DEFAULT") == 0 ||
|
|
+ strcmp(te->desc, "FK CONSTRAINT") == 0 ||
|
|
+ strcmp(te->desc, "INDEX") == 0 ||
|
|
+ strcmp(te->desc, "RULE") == 0 ||
|
|
+ strcmp(te->desc, "TRIGGER") == 0 ||
|
|
+ strcmp(te->desc, "USER MAPPING") == 0)
|
|
+ {
|
|
+ /* these object types don't have separate owners */
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ write_msg(modulename, "WARNING: don't know how to set owner for object type %s\n",
|
|
+ te->desc);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * If it's an ACL entry, it might contain SET SESSION AUTHORIZATION
|
|
+ * commands, so we can no longer assume we know the current auth setting.
|
|
+ */
|
|
+ if (acl_pass)
|
|
+ {
|
|
+ if (AH->currUser)
|
|
+ free(AH->currUser);
|
|
+ AH->currUser = NULL;
|
|
+ }
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Sanitize a string to be included in an SQL comment, by replacing any
|
|
+ * newlines with spaces.
|
|
+ */
|
|
+static char *
|
|
+replace_line_endings(const char *str)
|
|
+{
|
|
+ char *result;
|
|
+ char *s;
|
|
+
|
|
+ result = pg_strdup(str);
|
|
+
|
|
+ for (s = result; *s != '\0'; s++)
|
|
+ {
|
|
+ if (*s == '\n' || *s == '\r')
|
|
+ *s = ' ';
|
|
+ }
|
|
+
|
|
+ return result;
|
|
+}
|
|
+
|
|
+void
|
|
+WriteHead(ArchiveHandle *AH)
|
|
+{
|
|
+ struct tm crtm;
|
|
+
|
|
+ (*AH->WriteBufPtr) (AH, "PGDMP", 5); /* Magic code */
|
|
+ (*AH->WriteBytePtr) (AH, AH->vmaj);
|
|
+ (*AH->WriteBytePtr) (AH, AH->vmin);
|
|
+ (*AH->WriteBytePtr) (AH, AH->vrev);
|
|
+ (*AH->WriteBytePtr) (AH, AH->intSize);
|
|
+ (*AH->WriteBytePtr) (AH, AH->offSize);
|
|
+ (*AH->WriteBytePtr) (AH, AH->format);
|
|
+
|
|
+#ifndef HAVE_LIBZ
|
|
+ if (AH->compression != 0)
|
|
+ write_msg(modulename, "WARNING: requested compression not available in this "
|
|
+ "installation -- archive will be uncompressed\n");
|
|
+
|
|
+ AH->compression = 0;
|
|
+#endif
|
|
+
|
|
+ WriteInt(AH, AH->compression);
|
|
+
|
|
+ crtm = *localtime(&AH->createDate);
|
|
+ WriteInt(AH, crtm.tm_sec);
|
|
+ WriteInt(AH, crtm.tm_min);
|
|
+ WriteInt(AH, crtm.tm_hour);
|
|
+ WriteInt(AH, crtm.tm_mday);
|
|
+ WriteInt(AH, crtm.tm_mon);
|
|
+ WriteInt(AH, crtm.tm_year);
|
|
+ WriteInt(AH, crtm.tm_isdst);
|
|
+ WriteStr(AH, PQdb(AH->connection));
|
|
+ WriteStr(AH, AH->public.remoteVersionStr);
|
|
+ WriteStr(AH, PG_VERSION);
|
|
+}
|
|
+
|
|
+void
|
|
+ReadHead(ArchiveHandle *AH)
|
|
+{
|
|
+ char tmpMag[7];
|
|
+ int fmt;
|
|
+ struct tm crtm;
|
|
+
|
|
+ /*
|
|
+ * If we haven't already read the header, do so.
|
|
+ *
|
|
+ * NB: this code must agree with _discoverArchiveFormat(). Maybe find a
|
|
+ * way to unify the cases?
|
|
+ */
|
|
+ if (!AH->readHeader)
|
|
+ {
|
|
+ (*AH->ReadBufPtr) (AH, tmpMag, 5);
|
|
+
|
|
+ if (strncmp(tmpMag, "PGDMP", 5) != 0)
|
|
+ exit_horribly(modulename, "did not find magic string in file header\n");
|
|
+
|
|
+ AH->vmaj = (*AH->ReadBytePtr) (AH);
|
|
+ AH->vmin = (*AH->ReadBytePtr) (AH);
|
|
+
|
|
+ if (AH->vmaj > 1 || ((AH->vmaj == 1) && (AH->vmin > 0))) /* Version > 1.0 */
|
|
+ AH->vrev = (*AH->ReadBytePtr) (AH);
|
|
+ else
|
|
+ AH->vrev = 0;
|
|
+
|
|
+ AH->version = ((AH->vmaj * 256 + AH->vmin) * 256 + AH->vrev) * 256 + 0;
|
|
+
|
|
+ if (AH->version < K_VERS_1_0 || AH->version > K_VERS_MAX)
|
|
+ exit_horribly(modulename, "unsupported version (%d.%d) in file header\n",
|
|
+ AH->vmaj, AH->vmin);
|
|
+
|
|
+ AH->intSize = (*AH->ReadBytePtr) (AH);
|
|
+ if (AH->intSize > 32)
|
|
+ exit_horribly(modulename, "sanity check on integer size (%lu) failed\n",
|
|
+ (unsigned long) AH->intSize);
|
|
+
|
|
+ if (AH->intSize > sizeof(int))
|
|
+ write_msg(modulename, "WARNING: archive was made on a machine with larger integers, some operations might fail\n");
|
|
+
|
|
+ if (AH->version >= K_VERS_1_7)
|
|
+ AH->offSize = (*AH->ReadBytePtr) (AH);
|
|
+ else
|
|
+ AH->offSize = AH->intSize;
|
|
+
|
|
+ fmt = (*AH->ReadBytePtr) (AH);
|
|
+
|
|
+ if (AH->format != fmt)
|
|
+ exit_horribly(modulename, "expected format (%d) differs from format found in file (%d)\n",
|
|
+ AH->format, fmt);
|
|
+ }
|
|
+
|
|
+ if (AH->version >= K_VERS_1_2)
|
|
+ {
|
|
+ if (AH->version < K_VERS_1_4)
|
|
+ AH->compression = (*AH->ReadBytePtr) (AH);
|
|
+ else
|
|
+ AH->compression = ReadInt(AH);
|
|
+ }
|
|
+ else
|
|
+ AH->compression = Z_DEFAULT_COMPRESSION;
|
|
+
|
|
+#ifndef HAVE_LIBZ
|
|
+ if (AH->compression != 0)
|
|
+ write_msg(modulename, "WARNING: archive is compressed, but this installation does not support compression -- no data will be available\n");
|
|
+#endif
|
|
+
|
|
+ if (AH->version >= K_VERS_1_4)
|
|
+ {
|
|
+ crtm.tm_sec = ReadInt(AH);
|
|
+ crtm.tm_min = ReadInt(AH);
|
|
+ crtm.tm_hour = ReadInt(AH);
|
|
+ crtm.tm_mday = ReadInt(AH);
|
|
+ crtm.tm_mon = ReadInt(AH);
|
|
+ crtm.tm_year = ReadInt(AH);
|
|
+ crtm.tm_isdst = ReadInt(AH);
|
|
+
|
|
+ AH->archdbname = ReadStr(AH);
|
|
+
|
|
+ AH->createDate = mktime(&crtm);
|
|
+
|
|
+ if (AH->createDate == (time_t) -1)
|
|
+ write_msg(modulename, "WARNING: invalid creation date in header\n");
|
|
+ }
|
|
+
|
|
+ if (AH->version >= K_VERS_1_10)
|
|
+ {
|
|
+ AH->archiveRemoteVersion = ReadStr(AH);
|
|
+ AH->archiveDumpVersion = ReadStr(AH);
|
|
+ }
|
|
+}
|
|
+
|
|
+
|
|
+/*
|
|
+ * checkSeek
|
|
+ * check to see if ftell/fseek can be performed.
|
|
+ */
|
|
+bool
|
|
+checkSeek(FILE *fp)
|
|
+{
|
|
+ pgoff_t tpos;
|
|
+
|
|
+ /*
|
|
+ * If pgoff_t is wider than long, we must have "real" fseeko and not an
|
|
+ * emulation using fseek. Otherwise report no seek capability.
|
|
+ */
|
|
+#ifndef HAVE_FSEEKO
|
|
+ if (sizeof(pgoff_t) > sizeof(long))
|
|
+ return false;
|
|
+#endif
|
|
+
|
|
+ /* Check that ftello works on this file */
|
|
+ tpos = ftello(fp);
|
|
+ if (tpos < 0)
|
|
+ return false;
|
|
+
|
|
+ /*
|
|
+ * Check that fseeko(SEEK_SET) works, too. NB: we used to try to test
|
|
+ * this with fseeko(fp, 0, SEEK_CUR). But some platforms treat that as a
|
|
+ * successful no-op even on files that are otherwise unseekable.
|
|
+ */
|
|
+ if (fseeko(fp, tpos, SEEK_SET) != 0)
|
|
+ return false;
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
+
|
|
+/*
|
|
+ * dumpTimestamp
|
|
+ */
|
|
+static void
|
|
+dumpTimestamp(ArchiveHandle *AH, const char *msg, time_t tim)
|
|
+{
|
|
+ char buf[256];
|
|
+
|
|
+ /*
|
|
+ * We don't print the timezone on Win32, because the names are long and
|
|
+ * localized, which means they may contain characters in various random
|
|
+ * encodings; this has been seen to cause encoding errors when reading the
|
|
+ * dump script.
|
|
+ */
|
|
+ if (strftime(buf, sizeof(buf),
|
|
+#ifndef WIN32
|
|
+ "%Y-%m-%d %H:%M:%S %Z",
|
|
+#else
|
|
+ "%Y-%m-%d %H:%M:%S",
|
|
+#endif
|
|
+ localtime(&tim)) != 0)
|
|
+ ahprintf(AH, "-- %s %s\n\n", msg, buf);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Main engine for parallel restore.
|
|
+ *
|
|
+ * Work is done in three phases.
|
|
+ * First we process all SECTION_PRE_DATA tocEntries, in a single connection,
|
|
+ * just as for a standard restore. Second we process the remaining non-ACL
|
|
+ * steps in parallel worker children (threads on Windows, processes on Unix),
|
|
+ * each of which connects separately to the database. Finally we process all
|
|
+ * the ACL entries in a single connection (that happens back in
|
|
+ * RestoreArchive).
|
|
+ */
|
|
+static void
|
|
+restore_toc_entries_prefork(ArchiveHandle *AH)
|
|
+{
|
|
+ RestoreOptions *ropt = AH->ropt;
|
|
+ bool skipped_some;
|
|
+ TocEntry *next_work_item;
|
|
+
|
|
+ ahlog(AH, 2, "entering restore_toc_entries_prefork\n");
|
|
+
|
|
+ /* Adjust dependency information */
|
|
+ fix_dependencies(AH);
|
|
+
|
|
+ /*
|
|
+ * Do all the early stuff in a single connection in the parent. There's no
|
|
+ * great point in running it in parallel, in fact it will actually run
|
|
+ * faster in a single connection because we avoid all the connection and
|
|
+ * setup overhead. Also, pre-9.2 pg_dump versions were not very good
|
|
+ * about showing all the dependencies of SECTION_PRE_DATA items, so we do
|
|
+ * not risk trying to process them out-of-order.
|
|
+ *
|
|
+ * Note: as of 9.2, it should be guaranteed that all PRE_DATA items appear
|
|
+ * before DATA items, and all DATA items before POST_DATA items. That is
|
|
+ * not certain to be true in older archives, though, so this loop is coded
|
|
+ * to not assume it.
|
|
+ */
|
|
+ skipped_some = false;
|
|
+ for (next_work_item = AH->toc->next; next_work_item != AH->toc; next_work_item = next_work_item->next)
|
|
+ {
|
|
+ /* NB: process-or-continue logic must be the inverse of loop below */
|
|
+ if (next_work_item->section != SECTION_PRE_DATA)
|
|
+ {
|
|
+ /* DATA and POST_DATA items are just ignored for now */
|
|
+ if (next_work_item->section == SECTION_DATA ||
|
|
+ next_work_item->section == SECTION_POST_DATA)
|
|
+ {
|
|
+ skipped_some = true;
|
|
+ continue;
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ /*
|
|
+ * SECTION_NONE items, such as comments, can be processed now
|
|
+ * if we are still in the PRE_DATA part of the archive. Once
|
|
+ * we've skipped any items, we have to consider whether the
|
|
+ * comment's dependencies are satisfied, so skip it for now.
|
|
+ */
|
|
+ if (skipped_some)
|
|
+ continue;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ ahlog(AH, 1, "processing item %d %s %s\n",
|
|
+ next_work_item->dumpId,
|
|
+ next_work_item->desc, next_work_item->tag);
|
|
+
|
|
+ (void) restore_toc_entry(AH, next_work_item, ropt, false);
|
|
+
|
|
+ /* there should be no touch of ready_list here, so pass NULL */
|
|
+ reduce_dependencies(AH, next_work_item, NULL);
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Now close parent connection in prep for parallel steps. We do this
|
|
+ * mainly to ensure that we don't exceed the specified number of parallel
|
|
+ * connections.
|
|
+ */
|
|
+ DisconnectDatabase(&AH->public);
|
|
+
|
|
+ /* blow away any transient state from the old connection */
|
|
+ if (AH->currUser)
|
|
+ free(AH->currUser);
|
|
+ AH->currUser = NULL;
|
|
+ if (AH->currSchema)
|
|
+ free(AH->currSchema);
|
|
+ AH->currSchema = NULL;
|
|
+ if (AH->currTablespace)
|
|
+ free(AH->currTablespace);
|
|
+ AH->currTablespace = NULL;
|
|
+ AH->currWithOids = -1;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Main engine for parallel restore.
|
|
+ *
|
|
+ * Work is done in three phases.
|
|
+ * First we process all SECTION_PRE_DATA tocEntries, in a single connection,
|
|
+ * just as for a standard restore. This is done in restore_toc_entries_prefork().
|
|
+ * Second we process the remaining non-ACL steps in parallel worker children
|
|
+ * (threads on Windows, processes on Unix), these fork off and set up their
|
|
+ * connections before we call restore_toc_entries_parallel_forked.
|
|
+ * Finally we process all the ACL entries in a single connection (that happens
|
|
+ * back in RestoreArchive).
|
|
+ */
|
|
+static void
|
|
+restore_toc_entries_parallel(ArchiveHandle *AH, ParallelState *pstate,
|
|
+ TocEntry *pending_list)
|
|
+{
|
|
+ int work_status;
|
|
+ bool skipped_some;
|
|
+ TocEntry ready_list;
|
|
+ TocEntry *next_work_item;
|
|
+ int ret_child;
|
|
+
|
|
+ ahlog(AH, 2, "entering restore_toc_entries_parallel\n");
|
|
+
|
|
+ /*
|
|
+ * Initialize the lists of ready items, the list for pending items has
|
|
+ * already been initialized in the caller. After this setup, the pending
|
|
+ * list is everything that needs to be done but is blocked by one or more
|
|
+ * dependencies, while the ready list contains items that have no
|
|
+ * remaining dependencies. Note: we don't yet filter out entries that
|
|
+ * aren't going to be restored. They might participate in dependency
|
|
+ * chains connecting entries that should be restored, so we treat them as
|
|
+ * live until we actually process them.
|
|
+ */
|
|
+ par_list_header_init(&ready_list);
|
|
+ skipped_some = false;
|
|
+ for (next_work_item = AH->toc->next; next_work_item != AH->toc; next_work_item = next_work_item->next)
|
|
+ {
|
|
+ /* NB: process-or-continue logic must be the inverse of loop above */
|
|
+ if (next_work_item->section == SECTION_PRE_DATA)
|
|
+ {
|
|
+ /* All PRE_DATA items were dealt with above */
|
|
+ continue;
|
|
+ }
|
|
+ if (next_work_item->section == SECTION_DATA ||
|
|
+ next_work_item->section == SECTION_POST_DATA)
|
|
+ {
|
|
+ /* set this flag at same point that previous loop did */
|
|
+ skipped_some = true;
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ /* SECTION_NONE items must be processed if previous loop didn't */
|
|
+ if (!skipped_some)
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ if (next_work_item->depCount > 0)
|
|
+ par_list_append(pending_list, next_work_item);
|
|
+ else
|
|
+ par_list_append(&ready_list, next_work_item);
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * main parent loop
|
|
+ *
|
|
+ * Keep going until there is no worker still running AND there is no work
|
|
+ * left to be done.
|
|
+ */
|
|
+
|
|
+ ahlog(AH, 1, "entering main parallel loop\n");
|
|
+
|
|
+ while ((next_work_item = get_next_work_item(AH, &ready_list, pstate)) != NULL ||
|
|
+ !IsEveryWorkerIdle(pstate))
|
|
+ {
|
|
+ if (next_work_item != NULL)
|
|
+ {
|
|
+ /* If not to be restored, don't waste time launching a worker */
|
|
+ if ((next_work_item->reqs & (REQ_SCHEMA | REQ_DATA)) == 0 ||
|
|
+ _tocEntryIsACL(next_work_item))
|
|
+ {
|
|
+ ahlog(AH, 1, "skipping item %d %s %s\n",
|
|
+ next_work_item->dumpId,
|
|
+ next_work_item->desc, next_work_item->tag);
|
|
+
|
|
+ par_list_remove(next_work_item);
|
|
+ reduce_dependencies(AH, next_work_item, &ready_list);
|
|
+
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ ahlog(AH, 1, "launching item %d %s %s\n",
|
|
+ next_work_item->dumpId,
|
|
+ next_work_item->desc, next_work_item->tag);
|
|
+
|
|
+ par_list_remove(next_work_item);
|
|
+
|
|
+ Assert(GetIdleWorker(pstate) != NO_SLOT);
|
|
+ DispatchJobForTocEntry(AH, pstate, next_work_item, ACT_RESTORE);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ /* at least one child is working and we have nothing ready. */
|
|
+ Assert(!IsEveryWorkerIdle(pstate));
|
|
+ }
|
|
+
|
|
+ for (;;)
|
|
+ {
|
|
+ int nTerm = 0;
|
|
+
|
|
+ /*
|
|
+ * In order to reduce dependencies as soon as possible and
|
|
+ * especially to reap the status of workers who are working on
|
|
+ * items that pending items depend on, we do a non-blocking check
|
|
+ * for ended workers first.
|
|
+ *
|
|
+ * However, if we do not have any other work items currently that
|
|
+ * workers can work on, we do not busy-loop here but instead
|
|
+ * really wait for at least one worker to terminate. Hence we call
|
|
+ * ListenToWorkers(..., ..., do_wait = true) in this case.
|
|
+ */
|
|
+ ListenToWorkers(AH, pstate, !next_work_item);
|
|
+
|
|
+ while ((ret_child = ReapWorkerStatus(pstate, &work_status)) != NO_SLOT)
|
|
+ {
|
|
+ nTerm++;
|
|
+ mark_work_done(AH, &ready_list, ret_child, work_status, pstate);
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * We need to make sure that we have an idle worker before
|
|
+ * re-running the loop. If nTerm > 0 we already have that (quick
|
|
+ * check).
|
|
+ */
|
|
+ if (nTerm > 0)
|
|
+ break;
|
|
+
|
|
+ /* if nobody terminated, explicitly check for an idle worker */
|
|
+ if (GetIdleWorker(pstate) != NO_SLOT)
|
|
+ break;
|
|
+
|
|
+ /*
|
|
+ * If we have no idle worker, read the result of one or more
|
|
+ * workers and loop the loop to call ReapWorkerStatus() on them.
|
|
+ */
|
|
+ ListenToWorkers(AH, pstate, true);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ ahlog(AH, 1, "finished main parallel loop\n");
|
|
+}
|
|
+
|
|
+static void
|
|
+restore_toc_entries_postfork(ArchiveHandle *AH, TocEntry *pending_list)
|
|
+{
|
|
+ RestoreOptions *ropt = AH->ropt;
|
|
+ TocEntry *te;
|
|
+
|
|
+ ahlog(AH, 2, "entering restore_toc_entries_postfork\n");
|
|
+
|
|
+ /*
|
|
+ * Now reconnect the single parent connection.
|
|
+ */
|
|
+ ConnectDatabase((Archive *) AH, ropt->dbname,
|
|
+ ropt->pghost, ropt->pgport, ropt->username,
|
|
+ ropt->promptPassword);
|
|
+
|
|
+ _doSetFixedOutputState(AH);
|
|
+
|
|
+ /*
|
|
+ * Make sure there is no non-ACL work left due to, say, circular
|
|
+ * dependencies, or some other pathological condition. If so, do it in the
|
|
+ * single parent connection.
|
|
+ */
|
|
+ for (te = pending_list->par_next; te != pending_list; te = te->par_next)
|
|
+ {
|
|
+ ahlog(AH, 1, "processing missed item %d %s %s\n",
|
|
+ te->dumpId, te->desc, te->tag);
|
|
+ (void) restore_toc_entry(AH, te, ropt, false);
|
|
+ }
|
|
+
|
|
+ /* The ACLs will be handled back in RestoreArchive. */
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Check if te1 has an exclusive lock requirement for an item that te2 also
|
|
+ * requires, whether or not te2's requirement is for an exclusive lock.
|
|
+ */
|
|
+static bool
|
|
+has_lock_conflicts(TocEntry *te1, TocEntry *te2)
|
|
+{
|
|
+ int j,
|
|
+ k;
|
|
+
|
|
+ for (j = 0; j < te1->nLockDeps; j++)
|
|
+ {
|
|
+ for (k = 0; k < te2->nDeps; k++)
|
|
+ {
|
|
+ if (te1->lockDeps[j] == te2->dependencies[k])
|
|
+ return true;
|
|
+ }
|
|
+ }
|
|
+ return false;
|
|
+}
|
|
+
|
|
+
|
|
+/*
|
|
+ * Initialize the header of a parallel-processing list.
|
|
+ *
|
|
+ * These are circular lists with a dummy TocEntry as header, just like the
|
|
+ * main TOC list; but we use separate list links so that an entry can be in
|
|
+ * the main TOC list as well as in a parallel-processing list.
|
|
+ */
|
|
+static void
|
|
+par_list_header_init(TocEntry *l)
|
|
+{
|
|
+ l->par_prev = l->par_next = l;
|
|
+}
|
|
+
|
|
+/* Append te to the end of the parallel-processing list headed by l */
|
|
+static void
|
|
+par_list_append(TocEntry *l, TocEntry *te)
|
|
+{
|
|
+ te->par_prev = l->par_prev;
|
|
+ l->par_prev->par_next = te;
|
|
+ l->par_prev = te;
|
|
+ te->par_next = l;
|
|
+}
|
|
+
|
|
+/* Remove te from whatever parallel-processing list it's in */
|
|
+static void
|
|
+par_list_remove(TocEntry *te)
|
|
+{
|
|
+ te->par_prev->par_next = te->par_next;
|
|
+ te->par_next->par_prev = te->par_prev;
|
|
+ te->par_prev = NULL;
|
|
+ te->par_next = NULL;
|
|
+}
|
|
+
|
|
+
|
|
+/*
|
|
+ * Find the next work item (if any) that is capable of being run now.
|
|
+ *
|
|
+ * To qualify, the item must have no remaining dependencies
|
|
+ * and no requirements for locks that are incompatible with
|
|
+ * items currently running. Items in the ready_list are known to have
|
|
+ * no remaining dependencies, but we have to check for lock conflicts.
|
|
+ *
|
|
+ * Note that the returned item has *not* been removed from ready_list.
|
|
+ * The caller must do that after successfully dispatching the item.
|
|
+ *
|
|
+ * pref_non_data is for an alternative selection algorithm that gives
|
|
+ * preference to non-data items if there is already a data load running.
|
|
+ * It is currently disabled.
|
|
+ */
|
|
+static TocEntry *
|
|
+get_next_work_item(ArchiveHandle *AH, TocEntry *ready_list,
|
|
+ ParallelState *pstate)
|
|
+{
|
|
+ bool pref_non_data = false; /* or get from AH->ropt */
|
|
+ TocEntry *data_te = NULL;
|
|
+ TocEntry *te;
|
|
+ int i,
|
|
+ k;
|
|
+
|
|
+ /*
|
|
+ * Bogus heuristics for pref_non_data
|
|
+ */
|
|
+ if (pref_non_data)
|
|
+ {
|
|
+ int count = 0;
|
|
+
|
|
+ for (k = 0; k < pstate->numWorkers; k++)
|
|
+ if (pstate->parallelSlot[k].args->te != NULL &&
|
|
+ pstate->parallelSlot[k].args->te->section == SECTION_DATA)
|
|
+ count++;
|
|
+ if (pstate->numWorkers == 0 || count * 4 < pstate->numWorkers)
|
|
+ pref_non_data = false;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Search the ready_list until we find a suitable item.
|
|
+ */
|
|
+ for (te = ready_list->par_next; te != ready_list; te = te->par_next)
|
|
+ {
|
|
+ bool conflicts = false;
|
|
+
|
|
+ /*
|
|
+ * Check to see if the item would need exclusive lock on something
|
|
+ * that a currently running item also needs lock on, or vice versa. If
|
|
+ * so, we don't want to schedule them together.
|
|
+ */
|
|
+ for (i = 0; i < pstate->numWorkers && !conflicts; i++)
|
|
+ {
|
|
+ TocEntry *running_te;
|
|
+
|
|
+ if (pstate->parallelSlot[i].workerStatus != WRKR_WORKING)
|
|
+ continue;
|
|
+ running_te = pstate->parallelSlot[i].args->te;
|
|
+
|
|
+ if (has_lock_conflicts(te, running_te) ||
|
|
+ has_lock_conflicts(running_te, te))
|
|
+ {
|
|
+ conflicts = true;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (conflicts)
|
|
+ continue;
|
|
+
|
|
+ if (pref_non_data && te->section == SECTION_DATA)
|
|
+ {
|
|
+ if (data_te == NULL)
|
|
+ data_te = te;
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ /* passed all tests, so this item can run */
|
|
+ return te;
|
|
+ }
|
|
+
|
|
+ if (data_te != NULL)
|
|
+ return data_te;
|
|
+
|
|
+ ahlog(AH, 2, "no item ready\n");
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+
|
|
+/*
|
|
+ * Restore a single TOC item in parallel with others
|
|
+ *
|
|
+ * this is run in the worker, i.e. in a thread (Windows) or a separate process
|
|
+ * (everything else). A worker process executes several such work items during
|
|
+ * a parallel backup or restore. Once we terminate here and report back that
|
|
+ * our work is finished, the master process will assign us a new work item.
|
|
+ */
|
|
+int
|
|
+parallel_restore(ParallelArgs *args)
|
|
+{
|
|
+ ArchiveHandle *AH = args->AH;
|
|
+ TocEntry *te = args->te;
|
|
+ RestoreOptions *ropt = AH->ropt;
|
|
+ int status;
|
|
+
|
|
+ _doSetFixedOutputState(AH);
|
|
+
|
|
+ Assert(AH->connection != NULL);
|
|
+
|
|
+ AH->public.n_errors = 0;
|
|
+
|
|
+ /* Restore the TOC item */
|
|
+ status = restore_toc_entry(AH, te, ropt, true);
|
|
+
|
|
+ return status;
|
|
+}
|
|
+
|
|
+
|
|
+/*
|
|
+ * Housekeeping to be done after a step has been parallel restored.
|
|
+ *
|
|
+ * Clear the appropriate slot, free all the extra memory we allocated,
|
|
+ * update status, and reduce the dependency count of any dependent items.
|
|
+ */
|
|
+static void
|
|
+mark_work_done(ArchiveHandle *AH, TocEntry *ready_list,
|
|
+ int worker, int status,
|
|
+ ParallelState *pstate)
|
|
+{
|
|
+ TocEntry *te = NULL;
|
|
+
|
|
+ te = pstate->parallelSlot[worker].args->te;
|
|
+
|
|
+ if (te == NULL)
|
|
+ exit_horribly(modulename, "could not find slot of finished worker\n");
|
|
+
|
|
+ ahlog(AH, 1, "finished item %d %s %s\n",
|
|
+ te->dumpId, te->desc, te->tag);
|
|
+
|
|
+ if (status == WORKER_CREATE_DONE)
|
|
+ mark_create_done(AH, te);
|
|
+ else if (status == WORKER_INHIBIT_DATA)
|
|
+ {
|
|
+ inhibit_data_for_failed_table(AH, te);
|
|
+ AH->public.n_errors++;
|
|
+ }
|
|
+ else if (status == WORKER_IGNORED_ERRORS)
|
|
+ AH->public.n_errors++;
|
|
+ else if (status != 0)
|
|
+ exit_horribly(modulename, "worker process failed: exit code %d\n",
|
|
+ status);
|
|
+
|
|
+ reduce_dependencies(AH, te, ready_list);
|
|
+}
|
|
+
|
|
+
|
|
+/*
|
|
+ * Process the dependency information into a form useful for parallel restore.
|
|
+ *
|
|
+ * This function takes care of fixing up some missing or badly designed
|
|
+ * dependencies, and then prepares subsidiary data structures that will be
|
|
+ * used in the main parallel-restore logic, including:
|
|
+ * 1. We build the revDeps[] arrays of incoming dependency dumpIds.
|
|
+ * 2. We set up depCount fields that are the number of as-yet-unprocessed
|
|
+ * dependencies for each TOC entry.
|
|
+ *
|
|
+ * We also identify locking dependencies so that we can avoid trying to
|
|
+ * schedule conflicting items at the same time.
|
|
+ */
|
|
+static void
|
|
+fix_dependencies(ArchiveHandle *AH)
|
|
+{
|
|
+ TocEntry *te;
|
|
+ int i;
|
|
+
|
|
+ /*
|
|
+ * Initialize the depCount/revDeps/nRevDeps fields, and make sure the TOC
|
|
+ * items are marked as not being in any parallel-processing list.
|
|
+ */
|
|
+ for (te = AH->toc->next; te != AH->toc; te = te->next)
|
|
+ {
|
|
+ te->depCount = te->nDeps;
|
|
+ te->revDeps = NULL;
|
|
+ te->nRevDeps = 0;
|
|
+ te->par_prev = NULL;
|
|
+ te->par_next = NULL;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * POST_DATA items that are shown as depending on a table need to be
|
|
+ * re-pointed to depend on that table's data, instead. This ensures they
|
|
+ * won't get scheduled until the data has been loaded.
|
|
+ */
|
|
+ repoint_table_dependencies(AH);
|
|
+
|
|
+ /*
|
|
+ * Pre-8.4 versions of pg_dump neglected to set up a dependency from BLOB
|
|
+ * COMMENTS to BLOBS. Cope. (We assume there's only one BLOBS and only
|
|
+ * one BLOB COMMENTS in such files.)
|
|
+ */
|
|
+ if (AH->version < K_VERS_1_11)
|
|
+ {
|
|
+ for (te = AH->toc->next; te != AH->toc; te = te->next)
|
|
+ {
|
|
+ if (strcmp(te->desc, "BLOB COMMENTS") == 0 && te->nDeps == 0)
|
|
+ {
|
|
+ TocEntry *te2;
|
|
+
|
|
+ for (te2 = AH->toc->next; te2 != AH->toc; te2 = te2->next)
|
|
+ {
|
|
+ if (strcmp(te2->desc, "BLOBS") == 0)
|
|
+ {
|
|
+ te->dependencies = (DumpId *) pg_malloc(sizeof(DumpId));
|
|
+ te->dependencies[0] = te2->dumpId;
|
|
+ te->nDeps++;
|
|
+ te->depCount++;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * At this point we start to build the revDeps reverse-dependency arrays,
|
|
+ * so all changes of dependencies must be complete.
|
|
+ */
|
|
+
|
|
+ /*
|
|
+ * Count the incoming dependencies for each item. Also, it is possible
|
|
+ * that the dependencies list items that are not in the archive at all
|
|
+ * (that should not happen in 9.2 and later, but is highly likely in older
|
|
+ * archives). Subtract such items from the depCounts.
|
|
+ */
|
|
+ for (te = AH->toc->next; te != AH->toc; te = te->next)
|
|
+ {
|
|
+ for (i = 0; i < te->nDeps; i++)
|
|
+ {
|
|
+ DumpId depid = te->dependencies[i];
|
|
+
|
|
+ if (depid <= AH->maxDumpId && AH->tocsByDumpId[depid] != NULL)
|
|
+ AH->tocsByDumpId[depid]->nRevDeps++;
|
|
+ else
|
|
+ te->depCount--;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Allocate space for revDeps[] arrays, and reset nRevDeps so we can use
|
|
+ * it as a counter below.
|
|
+ */
|
|
+ for (te = AH->toc->next; te != AH->toc; te = te->next)
|
|
+ {
|
|
+ if (te->nRevDeps > 0)
|
|
+ te->revDeps = (DumpId *) pg_malloc(te->nRevDeps * sizeof(DumpId));
|
|
+ te->nRevDeps = 0;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Build the revDeps[] arrays of incoming-dependency dumpIds. This had
|
|
+ * better agree with the loops above.
|
|
+ */
|
|
+ for (te = AH->toc->next; te != AH->toc; te = te->next)
|
|
+ {
|
|
+ for (i = 0; i < te->nDeps; i++)
|
|
+ {
|
|
+ DumpId depid = te->dependencies[i];
|
|
+
|
|
+ if (depid <= AH->maxDumpId && AH->tocsByDumpId[depid] != NULL)
|
|
+ {
|
|
+ TocEntry *otherte = AH->tocsByDumpId[depid];
|
|
+
|
|
+ otherte->revDeps[otherte->nRevDeps++] = te->dumpId;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Lastly, work out the locking dependencies.
|
|
+ */
|
|
+ for (te = AH->toc->next; te != AH->toc; te = te->next)
|
|
+ {
|
|
+ te->lockDeps = NULL;
|
|
+ te->nLockDeps = 0;
|
|
+ identify_locking_dependencies(AH, te);
|
|
+ }
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Change dependencies on table items to depend on table data items instead,
|
|
+ * but only in POST_DATA items.
|
|
+ */
|
|
+static void
|
|
+repoint_table_dependencies(ArchiveHandle *AH)
|
|
+{
|
|
+ TocEntry *te;
|
|
+ int i;
|
|
+ DumpId olddep;
|
|
+
|
|
+ for (te = AH->toc->next; te != AH->toc; te = te->next)
|
|
+ {
|
|
+ if (te->section != SECTION_POST_DATA)
|
|
+ continue;
|
|
+ for (i = 0; i < te->nDeps; i++)
|
|
+ {
|
|
+ olddep = te->dependencies[i];
|
|
+ if (olddep <= AH->maxDumpId &&
|
|
+ AH->tableDataId[olddep] != 0)
|
|
+ {
|
|
+ te->dependencies[i] = AH->tableDataId[olddep];
|
|
+ ahlog(AH, 2, "transferring dependency %d -> %d to %d\n",
|
|
+ te->dumpId, olddep, AH->tableDataId[olddep]);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Identify which objects we'll need exclusive lock on in order to restore
|
|
+ * the given TOC entry (*other* than the one identified by the TOC entry
|
|
+ * itself). Record their dump IDs in the entry's lockDeps[] array.
|
|
+ */
|
|
+static void
|
|
+identify_locking_dependencies(ArchiveHandle *AH, TocEntry *te)
|
|
+{
|
|
+ DumpId *lockids;
|
|
+ int nlockids;
|
|
+ int i;
|
|
+
|
|
+ /* Quick exit if no dependencies at all */
|
|
+ if (te->nDeps == 0)
|
|
+ return;
|
|
+
|
|
+ /* Exit if this entry doesn't need exclusive lock on other objects */
|
|
+ if (!(strcmp(te->desc, "CONSTRAINT") == 0 ||
|
|
+ strcmp(te->desc, "CHECK CONSTRAINT") == 0 ||
|
|
+ strcmp(te->desc, "FK CONSTRAINT") == 0 ||
|
|
+ strcmp(te->desc, "RULE") == 0 ||
|
|
+ strcmp(te->desc, "TRIGGER") == 0))
|
|
+ return;
|
|
+
|
|
+ /*
|
|
+ * We assume the entry requires exclusive lock on each TABLE or TABLE DATA
|
|
+ * item listed among its dependencies. Originally all of these would have
|
|
+ * been TABLE items, but repoint_table_dependencies would have repointed
|
|
+ * them to the TABLE DATA items if those are present (which they might not
|
|
+ * be, eg in a schema-only dump). Note that all of the entries we are
|
|
+ * processing here are POST_DATA; otherwise there might be a significant
|
|
+ * difference between a dependency on a table and a dependency on its
|
|
+ * data, so that closer analysis would be needed here.
|
|
+ */
|
|
+ lockids = (DumpId *) pg_malloc(te->nDeps * sizeof(DumpId));
|
|
+ nlockids = 0;
|
|
+ for (i = 0; i < te->nDeps; i++)
|
|
+ {
|
|
+ DumpId depid = te->dependencies[i];
|
|
+
|
|
+ if (depid <= AH->maxDumpId && AH->tocsByDumpId[depid] != NULL &&
|
|
+ ((strcmp(AH->tocsByDumpId[depid]->desc, "TABLE DATA") == 0) ||
|
|
+ strcmp(AH->tocsByDumpId[depid]->desc, "TABLE") == 0))
|
|
+ lockids[nlockids++] = depid;
|
|
+ }
|
|
+
|
|
+ if (nlockids == 0)
|
|
+ {
|
|
+ free(lockids);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ te->lockDeps = pg_realloc(lockids, nlockids * sizeof(DumpId));
|
|
+ te->nLockDeps = nlockids;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Remove the specified TOC entry from the depCounts of items that depend on
|
|
+ * it, thereby possibly making them ready-to-run. Any pending item that
|
|
+ * becomes ready should be moved to the ready list.
|
|
+ */
|
|
+static void
|
|
+reduce_dependencies(ArchiveHandle *AH, TocEntry *te, TocEntry *ready_list)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ ahlog(AH, 2, "reducing dependencies for %d\n", te->dumpId);
|
|
+
|
|
+ for (i = 0; i < te->nRevDeps; i++)
|
|
+ {
|
|
+ TocEntry *otherte = AH->tocsByDumpId[te->revDeps[i]];
|
|
+
|
|
+ otherte->depCount--;
|
|
+ if (otherte->depCount == 0 && otherte->par_prev != NULL)
|
|
+ {
|
|
+ /* It must be in the pending list, so remove it ... */
|
|
+ par_list_remove(otherte);
|
|
+ /* ... and add to ready_list */
|
|
+ par_list_append(ready_list, otherte);
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Set the created flag on the DATA member corresponding to the given
|
|
+ * TABLE member
|
|
+ */
|
|
+static void
|
|
+mark_create_done(ArchiveHandle *AH, TocEntry *te)
|
|
+{
|
|
+ if (AH->tableDataId[te->dumpId] != 0)
|
|
+ {
|
|
+ TocEntry *ted = AH->tocsByDumpId[AH->tableDataId[te->dumpId]];
|
|
+
|
|
+ ted->created = true;
|
|
+ }
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Mark the DATA member corresponding to the given TABLE member
|
|
+ * as not wanted
|
|
+ */
|
|
+static void
|
|
+inhibit_data_for_failed_table(ArchiveHandle *AH, TocEntry *te)
|
|
+{
|
|
+ ahlog(AH, 1, "table \"%s\" could not be created, will not restore its data\n",
|
|
+ te->tag);
|
|
+
|
|
+ if (AH->tableDataId[te->dumpId] != 0)
|
|
+ {
|
|
+ TocEntry *ted = AH->tocsByDumpId[AH->tableDataId[te->dumpId]];
|
|
+
|
|
+ ted->reqs = 0;
|
|
+ }
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Clone and de-clone routines used in parallel restoration.
|
|
+ *
|
|
+ * Enough of the structure is cloned to ensure that there is no
|
|
+ * conflict between different threads each with their own clone.
|
|
+ */
|
|
+ArchiveHandle *
|
|
+CloneArchive(ArchiveHandle *AH)
|
|
+{
|
|
+ ArchiveHandle *clone;
|
|
+
|
|
+ /* Make a "flat" copy */
|
|
+ clone = (ArchiveHandle *) pg_malloc(sizeof(ArchiveHandle));
|
|
+ memcpy(clone, AH, sizeof(ArchiveHandle));
|
|
+
|
|
+ /* Handle format-independent fields */
|
|
+ memset(&(clone->sqlparse), 0, sizeof(clone->sqlparse));
|
|
+
|
|
+ /* The clone will have its own connection, so disregard connection state */
|
|
+ clone->connection = NULL;
|
|
+ clone->currUser = NULL;
|
|
+ clone->currSchema = NULL;
|
|
+ clone->currTablespace = NULL;
|
|
+ clone->currWithOids = -1;
|
|
+
|
|
+ /* savedPassword must be local in case we change it while connecting */
|
|
+ if (clone->savedPassword)
|
|
+ clone->savedPassword = pg_strdup(clone->savedPassword);
|
|
+
|
|
+ /* clone has its own error count, too */
|
|
+ clone->public.n_errors = 0;
|
|
+
|
|
+ /*
|
|
+ * Connect our new clone object to the database: In parallel restore the
|
|
+ * parent is already disconnected, because we can connect the worker
|
|
+ * processes independently to the database (no snapshot sync required). In
|
|
+ * parallel backup we clone the parent's existing connection.
|
|
+ */
|
|
+ if (AH->mode == archModeRead)
|
|
+ {
|
|
+ RestoreOptions *ropt = AH->ropt;
|
|
+
|
|
+ Assert(AH->connection == NULL);
|
|
+ /* this also sets clone->connection */
|
|
+ ConnectDatabase((Archive *) clone, ropt->dbname,
|
|
+ ropt->pghost, ropt->pgport, ropt->username,
|
|
+ ropt->promptPassword);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ char *dbname;
|
|
+ char *pghost;
|
|
+ char *pgport;
|
|
+ char *username;
|
|
+ const char *encname;
|
|
+
|
|
+ Assert(AH->connection != NULL);
|
|
+
|
|
+ /*
|
|
+ * Even though we are technically accessing the parent's database
|
|
+ * object here, these functions are fine to be called like that
|
|
+ * because all just return a pointer and do not actually send/receive
|
|
+ * any data to/from the database.
|
|
+ */
|
|
+ dbname = PQdb(AH->connection);
|
|
+ pghost = PQhost(AH->connection);
|
|
+ pgport = PQport(AH->connection);
|
|
+ username = PQuser(AH->connection);
|
|
+ encname = pg_encoding_to_char(AH->public.encoding);
|
|
+
|
|
+ /* this also sets clone->connection */
|
|
+ ConnectDatabase((Archive *) clone, dbname, pghost, pgport, username, TRI_NO);
|
|
+
|
|
+ /*
|
|
+ * Set the same encoding, whatever we set here is what we got from
|
|
+ * pg_encoding_to_char(), so we really shouldn't run into an error
|
|
+ * setting that very same value. Also see the comment in
|
|
+ * SetupConnection().
|
|
+ */
|
|
+ PQsetClientEncoding(clone->connection, encname);
|
|
+ }
|
|
+
|
|
+ /* Let the format-specific code have a chance too */
|
|
+ (clone->ClonePtr) (clone);
|
|
+
|
|
+ Assert(clone->connection != NULL);
|
|
+ return clone;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Release clone-local storage.
|
|
+ *
|
|
+ * Note: we assume any clone-local connection was already closed.
|
|
+ */
|
|
+void
|
|
+DeCloneArchive(ArchiveHandle *AH)
|
|
+{
|
|
+ /* Clear format-specific state */
|
|
+ (AH->DeClonePtr) (AH);
|
|
+
|
|
+ /* Clear state allocated by CloneArchive */
|
|
+ if (AH->sqlparse.curCmd)
|
|
+ destroyPQExpBuffer(AH->sqlparse.curCmd);
|
|
+
|
|
+ /* Clear any connection-local state */
|
|
+ if (AH->currUser)
|
|
+ free(AH->currUser);
|
|
+ if (AH->currSchema)
|
|
+ free(AH->currSchema);
|
|
+ if (AH->currTablespace)
|
|
+ free(AH->currTablespace);
|
|
+ if (AH->savedPassword)
|
|
+ free(AH->savedPassword);
|
|
+
|
|
+ free(AH);
|
|
+}
|
|
--- /dev/null
|
|
+++ pglogical-2.2.2/pglogical_dump/pg_backup_archiver.h
|
|
@@ -0,0 +1,441 @@
|
|
+/*-------------------------------------------------------------------------
|
|
+ *
|
|
+ * pg_backup_archiver.h
|
|
+ *
|
|
+ * Private interface to the pg_dump archiver routines.
|
|
+ * It is NOT intended that these routines be called by any
|
|
+ * dumper directly.
|
|
+ *
|
|
+ * See the headers to pg_restore for more details.
|
|
+ *
|
|
+ * Copyright (c) 2000, Philip Warner
|
|
+ * Rights are granted to use this software in any way so long
|
|
+ * as this notice is not removed.
|
|
+ *
|
|
+ * The author is not responsible for loss or damages that may
|
|
+ * result from it's use.
|
|
+ *
|
|
+ *
|
|
+ * IDENTIFICATION
|
|
+ * src/bin/pg_dump/pg_backup_archiver.h
|
|
+ *
|
|
+ *-------------------------------------------------------------------------
|
|
+ */
|
|
+
|
|
+#ifndef __PG_BACKUP_ARCHIVE__
|
|
+#define __PG_BACKUP_ARCHIVE__
|
|
+
|
|
+#include "compat.h"
|
|
+
|
|
+#include "postgres_fe.h"
|
|
+
|
|
+#include <time.h>
|
|
+
|
|
+#include "pg_backup.h"
|
|
+
|
|
+#include "libpq-fe.h"
|
|
+#include "pqexpbuffer.h"
|
|
+
|
|
+#define LOBBUFSIZE 16384
|
|
+
|
|
+/*
|
|
+ * Note: zlib.h must be included *after* libpq-fe.h, because the latter may
|
|
+ * include ssl.h, which has a naming conflict with zlib.h.
|
|
+ */
|
|
+#ifdef HAVE_LIBZ
|
|
+#include <zlib.h>
|
|
+#define GZCLOSE(fh) gzclose(fh)
|
|
+#define GZWRITE(p, s, n, fh) gzwrite(fh, p, (n) * (s))
|
|
+#define GZREAD(p, s, n, fh) gzread(fh, p, (n) * (s))
|
|
+#define GZEOF(fh) gzeof(fh)
|
|
+#else
|
|
+#define GZCLOSE(fh) fclose(fh)
|
|
+#define GZWRITE(p, s, n, fh) (fwrite(p, s, n, fh) * (s))
|
|
+#define GZREAD(p, s, n, fh) fread(p, s, n, fh)
|
|
+#define GZEOF(fh) feof(fh)
|
|
+/* this is just the redefinition of a libz constant */
|
|
+#define Z_DEFAULT_COMPRESSION (-1)
|
|
+
|
|
+typedef struct _z_stream
|
|
+{
|
|
+ void *next_in;
|
|
+ void *next_out;
|
|
+ size_t avail_in;
|
|
+ size_t avail_out;
|
|
+} z_stream;
|
|
+typedef z_stream *z_streamp;
|
|
+#endif
|
|
+
|
|
+/* Current archive version number (the format we can output) */
|
|
+#define K_VERS_MAJOR 1
|
|
+#define K_VERS_MINOR 12
|
|
+#define K_VERS_REV 0
|
|
+
|
|
+/* Data block types */
|
|
+#define BLK_DATA 1
|
|
+#define BLK_BLOBS 3
|
|
+
|
|
+/* Historical version numbers (checked in code) */
|
|
+#define K_VERS_1_0 (( (1 * 256 + 0) * 256 + 0) * 256 + 0)
|
|
+#define K_VERS_1_2 (( (1 * 256 + 2) * 256 + 0) * 256 + 0) /* Allow No ZLIB */
|
|
+#define K_VERS_1_3 (( (1 * 256 + 3) * 256 + 0) * 256 + 0) /* BLOBs */
|
|
+#define K_VERS_1_4 (( (1 * 256 + 4) * 256 + 0) * 256 + 0) /* Date & name in header */
|
|
+#define K_VERS_1_5 (( (1 * 256 + 5) * 256 + 0) * 256 + 0) /* Handle dependencies */
|
|
+#define K_VERS_1_6 (( (1 * 256 + 6) * 256 + 0) * 256 + 0) /* Schema field in TOCs */
|
|
+#define K_VERS_1_7 (( (1 * 256 + 7) * 256 + 0) * 256 + 0) /* File Offset size in
|
|
+ * header */
|
|
+#define K_VERS_1_8 (( (1 * 256 + 8) * 256 + 0) * 256 + 0) /* change interpretation
|
|
+ * of ID numbers and
|
|
+ * dependencies */
|
|
+#define K_VERS_1_9 (( (1 * 256 + 9) * 256 + 0) * 256 + 0) /* add default_with_oids
|
|
+ * tracking */
|
|
+#define K_VERS_1_10 (( (1 * 256 + 10) * 256 + 0) * 256 + 0) /* add tablespace */
|
|
+#define K_VERS_1_11 (( (1 * 256 + 11) * 256 + 0) * 256 + 0) /* add toc section
|
|
+ * indicator */
|
|
+#define K_VERS_1_12 (( (1 * 256 + 12) * 256 + 0) * 256 + 0) /* add separate BLOB
|
|
+ * entries */
|
|
+
|
|
+/* Newest format we can read */
|
|
+#define K_VERS_MAX (( (1 * 256 + 12) * 256 + 255) * 256 + 0)
|
|
+
|
|
+
|
|
+/* Flags to indicate disposition of offsets stored in files */
|
|
+#define K_OFFSET_POS_NOT_SET 1
|
|
+#define K_OFFSET_POS_SET 2
|
|
+#define K_OFFSET_NO_DATA 3
|
|
+
|
|
+/*
|
|
+ * Special exit values from worker children. We reserve 0 for normal
|
|
+ * success; 1 and other small values should be interpreted as crashes.
|
|
+ */
|
|
+#define WORKER_OK 0
|
|
+#define WORKER_CREATE_DONE 10
|
|
+#define WORKER_INHIBIT_DATA 11
|
|
+#define WORKER_IGNORED_ERRORS 12
|
|
+
|
|
+struct _archiveHandle;
|
|
+struct _tocEntry;
|
|
+struct _restoreList;
|
|
+struct ParallelArgs;
|
|
+struct ParallelState;
|
|
+
|
|
+#define READ_ERROR_EXIT(fd) \
|
|
+ do { \
|
|
+ if (feof(fd)) \
|
|
+ exit_horribly(modulename, \
|
|
+ "could not read from input file: end of file\n"); \
|
|
+ else \
|
|
+ exit_horribly(modulename, \
|
|
+ "could not read from input file: %s\n", strerror(errno)); \
|
|
+ } while (0)
|
|
+
|
|
+#define WRITE_ERROR_EXIT \
|
|
+ do { \
|
|
+ exit_horribly(modulename, "could not write to output file: %s\n", \
|
|
+ strerror(errno)); \
|
|
+ } while (0)
|
|
+
|
|
+typedef enum T_Action
|
|
+{
|
|
+ ACT_DUMP,
|
|
+ ACT_RESTORE
|
|
+} T_Action;
|
|
+
|
|
+typedef void (*ClosePtr) (struct _archiveHandle * AH);
|
|
+typedef void (*ReopenPtr) (struct _archiveHandle * AH);
|
|
+typedef void (*ArchiveEntryPtr) (struct _archiveHandle * AH, struct _tocEntry * te);
|
|
+
|
|
+typedef void (*StartDataPtr) (struct _archiveHandle * AH, struct _tocEntry * te);
|
|
+typedef void (*WriteDataPtr) (struct _archiveHandle * AH, const void *data, size_t dLen);
|
|
+typedef void (*EndDataPtr) (struct _archiveHandle * AH, struct _tocEntry * te);
|
|
+
|
|
+typedef void (*StartBlobsPtr) (struct _archiveHandle * AH, struct _tocEntry * te);
|
|
+typedef void (*StartBlobPtr) (struct _archiveHandle * AH, struct _tocEntry * te, Oid oid);
|
|
+typedef void (*EndBlobPtr) (struct _archiveHandle * AH, struct _tocEntry * te, Oid oid);
|
|
+typedef void (*EndBlobsPtr) (struct _archiveHandle * AH, struct _tocEntry * te);
|
|
+
|
|
+typedef int (*WriteBytePtr) (struct _archiveHandle * AH, const int i);
|
|
+typedef int (*ReadBytePtr) (struct _archiveHandle * AH);
|
|
+typedef void (*WriteBufPtr) (struct _archiveHandle * AH, const void *c, size_t len);
|
|
+typedef void (*ReadBufPtr) (struct _archiveHandle * AH, void *buf, size_t len);
|
|
+typedef void (*SaveArchivePtr) (struct _archiveHandle * AH);
|
|
+typedef void (*WriteExtraTocPtr) (struct _archiveHandle * AH, struct _tocEntry * te);
|
|
+typedef void (*ReadExtraTocPtr) (struct _archiveHandle * AH, struct _tocEntry * te);
|
|
+typedef void (*PrintExtraTocPtr) (struct _archiveHandle * AH, struct _tocEntry * te);
|
|
+typedef void (*PrintTocDataPtr) (struct _archiveHandle * AH, struct _tocEntry * te, RestoreOptions *ropt);
|
|
+
|
|
+typedef void (*ClonePtr) (struct _archiveHandle * AH);
|
|
+typedef void (*DeClonePtr) (struct _archiveHandle * AH);
|
|
+
|
|
+typedef char *(*WorkerJobRestorePtr) (struct _archiveHandle * AH, struct _tocEntry * te);
|
|
+typedef char *(*WorkerJobDumpPtr) (struct _archiveHandle * AH, struct _tocEntry * te);
|
|
+typedef char *(*MasterStartParallelItemPtr) (struct _archiveHandle * AH, struct _tocEntry * te,
|
|
+ T_Action act);
|
|
+typedef int (*MasterEndParallelItemPtr) (struct _archiveHandle * AH, struct _tocEntry * te,
|
|
+ const char *str, T_Action act);
|
|
+
|
|
+typedef size_t (*CustomOutPtr) (struct _archiveHandle * AH, const void *buf, size_t len);
|
|
+
|
|
+typedef enum
|
|
+{
|
|
+ SQL_SCAN = 0, /* normal */
|
|
+ SQL_IN_SINGLE_QUOTE, /* '...' literal */
|
|
+ SQL_IN_DOUBLE_QUOTE /* "..." identifier */
|
|
+} sqlparseState;
|
|
+
|
|
+typedef struct
|
|
+{
|
|
+ sqlparseState state; /* see above */
|
|
+ bool backSlash; /* next char is backslash quoted? */
|
|
+ PQExpBuffer curCmd; /* incomplete line (NULL if not created) */
|
|
+} sqlparseInfo;
|
|
+
|
|
+typedef enum
|
|
+{
|
|
+ STAGE_NONE = 0,
|
|
+ STAGE_INITIALIZING,
|
|
+ STAGE_PROCESSING,
|
|
+ STAGE_FINALIZING
|
|
+} ArchiverStage;
|
|
+
|
|
+typedef enum
|
|
+{
|
|
+ OUTPUT_SQLCMDS = 0, /* emitting general SQL commands */
|
|
+ OUTPUT_COPYDATA, /* writing COPY data */
|
|
+ OUTPUT_OTHERDATA /* writing data as INSERT commands */
|
|
+} ArchiverOutput;
|
|
+
|
|
+typedef enum
|
|
+{
|
|
+ REQ_SCHEMA = 0x01, /* want schema */
|
|
+ REQ_DATA = 0x02, /* want data */
|
|
+ REQ_SPECIAL = 0x04 /* for special TOC entries */
|
|
+} teReqs;
|
|
+
|
|
+typedef struct _archiveHandle
|
|
+{
|
|
+ Archive public; /* Public part of archive */
|
|
+ char vmaj; /* Version of file */
|
|
+ char vmin;
|
|
+ char vrev;
|
|
+ int version; /* Conveniently formatted version */
|
|
+
|
|
+ char *archiveRemoteVersion; /* When reading an archive, the
|
|
+ * version of the dumped DB */
|
|
+ char *archiveDumpVersion; /* When reading an archive, the
|
|
+ * version of the dumper */
|
|
+
|
|
+ int debugLevel; /* Used for logging (currently only by
|
|
+ * --verbose) */
|
|
+ size_t intSize; /* Size of an integer in the archive */
|
|
+ size_t offSize; /* Size of a file offset in the archive -
|
|
+ * Added V1.7 */
|
|
+ ArchiveFormat format; /* Archive format */
|
|
+
|
|
+ sqlparseInfo sqlparse; /* state for parsing INSERT data */
|
|
+
|
|
+ time_t createDate; /* Date archive created */
|
|
+
|
|
+ /*
|
|
+ * Fields used when discovering header. A format can always get the
|
|
+ * previous read bytes from here...
|
|
+ */
|
|
+ int readHeader; /* Used if file header has been read already */
|
|
+ char *lookahead; /* Buffer used when reading header to discover
|
|
+ * format */
|
|
+ size_t lookaheadSize; /* Size of allocated buffer */
|
|
+ size_t lookaheadLen; /* Length of data in lookahead */
|
|
+ pgoff_t lookaheadPos; /* Current read position in lookahead buffer */
|
|
+
|
|
+ ArchiveEntryPtr ArchiveEntryPtr; /* Called for each metadata object */
|
|
+ StartDataPtr StartDataPtr; /* Called when table data is about to be
|
|
+ * dumped */
|
|
+ WriteDataPtr WriteDataPtr; /* Called to send some table data to the
|
|
+ * archive */
|
|
+ EndDataPtr EndDataPtr; /* Called when table data dump is finished */
|
|
+ WriteBytePtr WriteBytePtr; /* Write a byte to output */
|
|
+ ReadBytePtr ReadBytePtr; /* Read a byte from an archive */
|
|
+ WriteBufPtr WriteBufPtr; /* Write a buffer of output to the archive */
|
|
+ ReadBufPtr ReadBufPtr; /* Read a buffer of input from the archive */
|
|
+ ClosePtr ClosePtr; /* Close the archive */
|
|
+ ReopenPtr ReopenPtr; /* Reopen the archive */
|
|
+ WriteExtraTocPtr WriteExtraTocPtr; /* Write extra TOC entry data
|
|
+ * associated with the current archive
|
|
+ * format */
|
|
+ ReadExtraTocPtr ReadExtraTocPtr; /* Read extr info associated with
|
|
+ * archie format */
|
|
+ PrintExtraTocPtr PrintExtraTocPtr; /* Extra TOC info for format */
|
|
+ PrintTocDataPtr PrintTocDataPtr;
|
|
+
|
|
+ StartBlobsPtr StartBlobsPtr;
|
|
+ EndBlobsPtr EndBlobsPtr;
|
|
+ StartBlobPtr StartBlobPtr;
|
|
+ EndBlobPtr EndBlobPtr;
|
|
+
|
|
+ MasterStartParallelItemPtr MasterStartParallelItemPtr;
|
|
+ MasterEndParallelItemPtr MasterEndParallelItemPtr;
|
|
+
|
|
+ SetupWorkerPtr SetupWorkerPtr;
|
|
+ WorkerJobDumpPtr WorkerJobDumpPtr;
|
|
+ WorkerJobRestorePtr WorkerJobRestorePtr;
|
|
+
|
|
+ ClonePtr ClonePtr; /* Clone format-specific fields */
|
|
+ DeClonePtr DeClonePtr; /* Clean up cloned fields */
|
|
+
|
|
+ CustomOutPtr CustomOutPtr; /* Alternative script output routine */
|
|
+
|
|
+ /* Stuff for direct DB connection */
|
|
+ char *archdbname; /* DB name *read* from archive */
|
|
+ enum trivalue promptPassword;
|
|
+ char *savedPassword; /* password for ropt->username, if known */
|
|
+ char *use_role;
|
|
+ PGconn *connection;
|
|
+ int connectToDB; /* Flag to indicate if direct DB connection is
|
|
+ * required */
|
|
+ ArchiverOutput outputKind; /* Flag for what we're currently writing */
|
|
+ bool pgCopyIn; /* Currently in libpq 'COPY IN' mode. */
|
|
+
|
|
+ int loFd; /* BLOB fd */
|
|
+ int writingBlob; /* Flag */
|
|
+ int blobCount; /* # of blobs restored */
|
|
+
|
|
+ char *fSpec; /* Archive File Spec */
|
|
+ FILE *FH; /* General purpose file handle */
|
|
+ void *OF;
|
|
+ int gzOut; /* Output file */
|
|
+
|
|
+ struct _tocEntry *toc; /* Header of circular list of TOC entries */
|
|
+ int tocCount; /* Number of TOC entries */
|
|
+ DumpId maxDumpId; /* largest DumpId among all TOC entries */
|
|
+
|
|
+ /* arrays created after the TOC list is complete: */
|
|
+ struct _tocEntry **tocsByDumpId; /* TOCs indexed by dumpId */
|
|
+ DumpId *tableDataId; /* TABLE DATA ids, indexed by table dumpId */
|
|
+
|
|
+ struct _tocEntry *currToc; /* Used when dumping data */
|
|
+ int compression; /* Compression requested on open Possible
|
|
+ * values for compression: -1
|
|
+ * Z_DEFAULT_COMPRESSION 0 COMPRESSION_NONE
|
|
+ * 1-9 levels for gzip compression */
|
|
+ ArchiveMode mode; /* File mode - r or w */
|
|
+ void *formatData; /* Header data specific to file format */
|
|
+
|
|
+ RestoreOptions *ropt; /* Used to check restore options in ahwrite
|
|
+ * etc */
|
|
+
|
|
+ /* these vars track state to avoid sending redundant SET commands */
|
|
+ char *currUser; /* current username, or NULL if unknown */
|
|
+ char *currSchema; /* current schema, or NULL */
|
|
+ char *currTablespace; /* current tablespace, or NULL */
|
|
+ bool currWithOids; /* current default_with_oids setting */
|
|
+
|
|
+ void *lo_buf;
|
|
+ size_t lo_buf_used;
|
|
+ size_t lo_buf_size;
|
|
+
|
|
+ int noTocComments;
|
|
+ ArchiverStage stage;
|
|
+ ArchiverStage lastErrorStage;
|
|
+ struct _tocEntry *currentTE;
|
|
+ struct _tocEntry *lastErrorTE;
|
|
+} ArchiveHandle;
|
|
+
|
|
+typedef struct _tocEntry
|
|
+{
|
|
+ struct _tocEntry *prev;
|
|
+ struct _tocEntry *next;
|
|
+ CatalogId catalogId;
|
|
+ DumpId dumpId;
|
|
+ teSection section;
|
|
+ bool hadDumper; /* Archiver was passed a dumper routine (used
|
|
+ * in restore) */
|
|
+ char *tag; /* index tag */
|
|
+ char *namespace; /* null or empty string if not in a schema */
|
|
+ char *tablespace; /* null if not in a tablespace; empty string
|
|
+ * means use database default */
|
|
+ char *owner;
|
|
+ bool withOids; /* Used only by "TABLE" tags */
|
|
+ char *desc;
|
|
+ char *defn;
|
|
+ char *dropStmt;
|
|
+ char *copyStmt;
|
|
+ DumpId *dependencies; /* dumpIds of objects this one depends on */
|
|
+ int nDeps; /* number of dependencies */
|
|
+
|
|
+ DataDumperPtr dataDumper; /* Routine to dump data for object */
|
|
+ void *dataDumperArg; /* Arg for above routine */
|
|
+ void *formatData; /* TOC Entry data specific to file format */
|
|
+
|
|
+ /* working state while dumping/restoring */
|
|
+ teReqs reqs; /* do we need schema and/or data of object */
|
|
+ bool created; /* set for DATA member if TABLE was created */
|
|
+
|
|
+ /* working state (needed only for parallel restore) */
|
|
+ struct _tocEntry *par_prev; /* list links for pending/ready items; */
|
|
+ struct _tocEntry *par_next; /* these are NULL if not in either list */
|
|
+ int depCount; /* number of dependencies not yet restored */
|
|
+ DumpId *revDeps; /* dumpIds of objects depending on this one */
|
|
+ int nRevDeps; /* number of such dependencies */
|
|
+ DumpId *lockDeps; /* dumpIds of objects this one needs lock on */
|
|
+ int nLockDeps; /* number of such dependencies */
|
|
+} TocEntry;
|
|
+
|
|
+extern int parallel_restore(struct ParallelArgs *args);
|
|
+extern void on_exit_close_archive(Archive *AHX);
|
|
+
|
|
+extern void warn_or_exit_horribly(ArchiveHandle *AH, const char *modulename, const char *fmt,...) __attribute__((format(PG_PRINTF_ATTRIBUTE, 3, 4)));
|
|
+
|
|
+extern void WriteTOC(ArchiveHandle *AH);
|
|
+extern void ReadTOC(ArchiveHandle *AH);
|
|
+extern void WriteHead(ArchiveHandle *AH);
|
|
+extern void ReadHead(ArchiveHandle *AH);
|
|
+extern void WriteToc(ArchiveHandle *AH);
|
|
+extern void ReadToc(ArchiveHandle *AH);
|
|
+extern void WriteDataChunks(ArchiveHandle *AH, struct ParallelState *pstate);
|
|
+extern void WriteDataChunksForTocEntry(ArchiveHandle *AH, TocEntry *te);
|
|
+extern ArchiveHandle *CloneArchive(ArchiveHandle *AH);
|
|
+extern void DeCloneArchive(ArchiveHandle *AH);
|
|
+
|
|
+extern teReqs TocIDRequired(ArchiveHandle *AH, DumpId id);
|
|
+TocEntry *getTocEntryByDumpId(ArchiveHandle *AH, DumpId id);
|
|
+extern bool checkSeek(FILE *fp);
|
|
+
|
|
+#define appendStringLiteralAHX(buf,str,AH) \
|
|
+ appendStringLiteral(buf, str, (AH)->public.encoding, (AH)->public.std_strings)
|
|
+
|
|
+#define appendByteaLiteralAHX(buf,str,len,AH) \
|
|
+ appendByteaLiteral(buf, str, len, (AH)->public.std_strings)
|
|
+
|
|
+/*
|
|
+ * Mandatory routines for each supported format
|
|
+ */
|
|
+
|
|
+extern size_t WriteInt(ArchiveHandle *AH, int i);
|
|
+extern int ReadInt(ArchiveHandle *AH);
|
|
+extern char *ReadStr(ArchiveHandle *AH);
|
|
+extern size_t WriteStr(ArchiveHandle *AH, const char *s);
|
|
+
|
|
+int ReadOffset(ArchiveHandle *, pgoff_t *);
|
|
+size_t WriteOffset(ArchiveHandle *, pgoff_t, int);
|
|
+
|
|
+extern void StartRestoreBlobs(ArchiveHandle *AH);
|
|
+extern void StartRestoreBlob(ArchiveHandle *AH, Oid oid, bool drop);
|
|
+extern void EndRestoreBlob(ArchiveHandle *AH, Oid oid);
|
|
+extern void EndRestoreBlobs(ArchiveHandle *AH);
|
|
+
|
|
+extern void InitArchiveFmt_Custom(ArchiveHandle *AH);
|
|
+extern void InitArchiveFmt_Null(ArchiveHandle *AH);
|
|
+extern void InitArchiveFmt_Directory(ArchiveHandle *AH);
|
|
+extern void InitArchiveFmt_Tar(ArchiveHandle *AH);
|
|
+
|
|
+extern bool isValidTarHeader(char *header);
|
|
+
|
|
+extern int ReconnectToServer(ArchiveHandle *AH, const char *dbname, const char *newUser);
|
|
+extern void DropBlobIfExists(ArchiveHandle *AH, Oid oid);
|
|
+
|
|
+void ahwrite(const void *ptr, size_t size, size_t nmemb, ArchiveHandle *AH);
|
|
+int ahprintf(ArchiveHandle *AH, const char *fmt,...) __attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
|
|
+
|
|
+void ahlog(ArchiveHandle *AH, int level, const char *fmt,...) __attribute__((format(PG_PRINTF_ATTRIBUTE, 3, 4)));
|
|
+
|
|
+#endif
|
|
--- /dev/null
|
|
+++ pglogical-2.2.2/pglogical_dump/pg_backup_custom.c
|
|
@@ -0,0 +1,995 @@
|
|
+/*-------------------------------------------------------------------------
|
|
+ *
|
|
+ * pg_backup_custom.c
|
|
+ *
|
|
+ * Implements the custom output format.
|
|
+ *
|
|
+ * The comments with the routined in this code are a good place to
|
|
+ * understand how to write a new format.
|
|
+ *
|
|
+ * See the headers to pg_restore for more details.
|
|
+ *
|
|
+ * Copyright (c) 2000, Philip Warner
|
|
+ * Rights are granted to use this software in any way so long
|
|
+ * as this notice is not removed.
|
|
+ *
|
|
+ * The author is not responsible for loss or damages that may
|
|
+ * and any liability will be limited to the time taken to fix any
|
|
+ * related bug.
|
|
+ *
|
|
+ *
|
|
+ * IDENTIFICATION
|
|
+ * src/bin/pg_dump/pg_backup_custom.c
|
|
+ *
|
|
+ *-------------------------------------------------------------------------
|
|
+ */
|
|
+
|
|
+#include "compress_io.h"
|
|
+#include "parallel.h"
|
|
+#include "pg_backup_utils.h"
|
|
+
|
|
+/*--------
|
|
+ * Routines in the format interface
|
|
+ *--------
|
|
+ */
|
|
+
|
|
+static void _ArchiveEntry(ArchiveHandle *AH, TocEntry *te);
|
|
+static void _StartData(ArchiveHandle *AH, TocEntry *te);
|
|
+static void _WriteData(ArchiveHandle *AH, const void *data, size_t dLen);
|
|
+static void _EndData(ArchiveHandle *AH, TocEntry *te);
|
|
+static int _WriteByte(ArchiveHandle *AH, const int i);
|
|
+static int _ReadByte(ArchiveHandle *);
|
|
+static void _WriteBuf(ArchiveHandle *AH, const void *buf, size_t len);
|
|
+static void _ReadBuf(ArchiveHandle *AH, void *buf, size_t len);
|
|
+static void _CloseArchive(ArchiveHandle *AH);
|
|
+static void _ReopenArchive(ArchiveHandle *AH);
|
|
+static void _PrintTocData(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt);
|
|
+static void _WriteExtraToc(ArchiveHandle *AH, TocEntry *te);
|
|
+static void _ReadExtraToc(ArchiveHandle *AH, TocEntry *te);
|
|
+static void _PrintExtraToc(ArchiveHandle *AH, TocEntry *te);
|
|
+
|
|
+static void _PrintData(ArchiveHandle *AH);
|
|
+static void _skipData(ArchiveHandle *AH);
|
|
+static void _skipBlobs(ArchiveHandle *AH);
|
|
+
|
|
+static void _StartBlobs(ArchiveHandle *AH, TocEntry *te);
|
|
+static void _StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid);
|
|
+static void _EndBlob(ArchiveHandle *AH, TocEntry *te, Oid oid);
|
|
+static void _EndBlobs(ArchiveHandle *AH, TocEntry *te);
|
|
+static void _LoadBlobs(ArchiveHandle *AH, bool drop);
|
|
+static void _Clone(ArchiveHandle *AH);
|
|
+static void _DeClone(ArchiveHandle *AH);
|
|
+
|
|
+static char *_MasterStartParallelItem(ArchiveHandle *AH, TocEntry *te, T_Action act);
|
|
+static int _MasterEndParallelItem(ArchiveHandle *AH, TocEntry *te, const char *str, T_Action act);
|
|
+char *_WorkerJobRestoreCustom(ArchiveHandle *AH, TocEntry *te);
|
|
+
|
|
+typedef struct
|
|
+{
|
|
+ CompressorState *cs;
|
|
+ int hasSeek;
|
|
+ pgoff_t filePos;
|
|
+ pgoff_t dataStart;
|
|
+} lclContext;
|
|
+
|
|
+typedef struct
|
|
+{
|
|
+ int dataState;
|
|
+ pgoff_t dataPos;
|
|
+} lclTocEntry;
|
|
+
|
|
+
|
|
+/*------
|
|
+ * Static declarations
|
|
+ *------
|
|
+ */
|
|
+static void _readBlockHeader(ArchiveHandle *AH, int *type, int *id);
|
|
+static pgoff_t _getFilePos(ArchiveHandle *AH, lclContext *ctx);
|
|
+
|
|
+static void _CustomWriteFunc(ArchiveHandle *AH, const char *buf, size_t len);
|
|
+static size_t _CustomReadFunc(ArchiveHandle *AH, char **buf, size_t *buflen);
|
|
+
|
|
+/* translator: this is a module name */
|
|
+static const char *modulename = gettext_noop("custom archiver");
|
|
+
|
|
+
|
|
+
|
|
+/*
|
|
+ * Init routine required by ALL formats. This is a global routine
|
|
+ * and should be declared in pg_backup_archiver.h
|
|
+ *
|
|
+ * It's task is to create any extra archive context (using AH->formatData),
|
|
+ * and to initialize the supported function pointers.
|
|
+ *
|
|
+ * It should also prepare whatever it's input source is for reading/writing,
|
|
+ * and in the case of a read mode connection, it should load the Header & TOC.
|
|
+ */
|
|
+void
|
|
+InitArchiveFmt_Custom(ArchiveHandle *AH)
|
|
+{
|
|
+ lclContext *ctx;
|
|
+
|
|
+ /* Assuming static functions, this can be copied for each format. */
|
|
+ AH->ArchiveEntryPtr = _ArchiveEntry;
|
|
+ AH->StartDataPtr = _StartData;
|
|
+ AH->WriteDataPtr = _WriteData;
|
|
+ AH->EndDataPtr = _EndData;
|
|
+ AH->WriteBytePtr = _WriteByte;
|
|
+ AH->ReadBytePtr = _ReadByte;
|
|
+ AH->WriteBufPtr = _WriteBuf;
|
|
+ AH->ReadBufPtr = _ReadBuf;
|
|
+ AH->ClosePtr = _CloseArchive;
|
|
+ AH->ReopenPtr = _ReopenArchive;
|
|
+ AH->PrintTocDataPtr = _PrintTocData;
|
|
+ AH->ReadExtraTocPtr = _ReadExtraToc;
|
|
+ AH->WriteExtraTocPtr = _WriteExtraToc;
|
|
+ AH->PrintExtraTocPtr = _PrintExtraToc;
|
|
+
|
|
+ AH->StartBlobsPtr = _StartBlobs;
|
|
+ AH->StartBlobPtr = _StartBlob;
|
|
+ AH->EndBlobPtr = _EndBlob;
|
|
+ AH->EndBlobsPtr = _EndBlobs;
|
|
+ AH->ClonePtr = _Clone;
|
|
+ AH->DeClonePtr = _DeClone;
|
|
+
|
|
+ AH->MasterStartParallelItemPtr = _MasterStartParallelItem;
|
|
+ AH->MasterEndParallelItemPtr = _MasterEndParallelItem;
|
|
+
|
|
+ /* no parallel dump in the custom archive, only parallel restore */
|
|
+ AH->WorkerJobDumpPtr = NULL;
|
|
+ AH->WorkerJobRestorePtr = _WorkerJobRestoreCustom;
|
|
+
|
|
+ /* Set up a private area. */
|
|
+ ctx = (lclContext *) pg_malloc0(sizeof(lclContext));
|
|
+ AH->formatData = (void *) ctx;
|
|
+
|
|
+ /* Initialize LO buffering */
|
|
+ AH->lo_buf_size = LOBBUFSIZE;
|
|
+ AH->lo_buf = (void *) pg_malloc(LOBBUFSIZE);
|
|
+
|
|
+ ctx->filePos = 0;
|
|
+
|
|
+ /*
|
|
+ * Now open the file
|
|
+ */
|
|
+ if (AH->mode == archModeWrite)
|
|
+ {
|
|
+ if (AH->fSpec && strcmp(AH->fSpec, "") != 0)
|
|
+ {
|
|
+ AH->FH = fopen(AH->fSpec, PG_BINARY_W);
|
|
+ if (!AH->FH)
|
|
+ exit_horribly(modulename, "could not open output file \"%s\": %s\n",
|
|
+ AH->fSpec, strerror(errno));
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ AH->FH = stdout;
|
|
+ if (!AH->FH)
|
|
+ exit_horribly(modulename, "could not open output file: %s\n",
|
|
+ strerror(errno));
|
|
+ }
|
|
+
|
|
+ ctx->hasSeek = checkSeek(AH->FH);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ if (AH->fSpec && strcmp(AH->fSpec, "") != 0)
|
|
+ {
|
|
+ AH->FH = fopen(AH->fSpec, PG_BINARY_R);
|
|
+ if (!AH->FH)
|
|
+ exit_horribly(modulename, "could not open input file \"%s\": %s\n",
|
|
+ AH->fSpec, strerror(errno));
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ AH->FH = stdin;
|
|
+ if (!AH->FH)
|
|
+ exit_horribly(modulename, "could not open input file: %s\n",
|
|
+ strerror(errno));
|
|
+ }
|
|
+
|
|
+ ctx->hasSeek = checkSeek(AH->FH);
|
|
+
|
|
+ ReadHead(AH);
|
|
+ ReadToc(AH);
|
|
+ ctx->dataStart = _getFilePos(AH, ctx);
|
|
+ }
|
|
+
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Called by the Archiver when the dumper creates a new TOC entry.
|
|
+ *
|
|
+ * Optional.
|
|
+ *
|
|
+ * Set up extrac format-related TOC data.
|
|
+*/
|
|
+static void
|
|
+_ArchiveEntry(ArchiveHandle *AH, TocEntry *te)
|
|
+{
|
|
+ lclTocEntry *ctx;
|
|
+
|
|
+ ctx = (lclTocEntry *) pg_malloc0(sizeof(lclTocEntry));
|
|
+ if (te->dataDumper)
|
|
+ ctx->dataState = K_OFFSET_POS_NOT_SET;
|
|
+ else
|
|
+ ctx->dataState = K_OFFSET_NO_DATA;
|
|
+
|
|
+ te->formatData = (void *) ctx;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Called by the Archiver to save any extra format-related TOC entry
|
|
+ * data.
|
|
+ *
|
|
+ * Optional.
|
|
+ *
|
|
+ * Use the Archiver routines to write data - they are non-endian, and
|
|
+ * maintain other important file information.
|
|
+ */
|
|
+static void
|
|
+_WriteExtraToc(ArchiveHandle *AH, TocEntry *te)
|
|
+{
|
|
+ lclTocEntry *ctx = (lclTocEntry *) te->formatData;
|
|
+
|
|
+ WriteOffset(AH, ctx->dataPos, ctx->dataState);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Called by the Archiver to read any extra format-related TOC data.
|
|
+ *
|
|
+ * Optional.
|
|
+ *
|
|
+ * Needs to match the order defined in _WriteExtraToc, and should also
|
|
+ * use the Archiver input routines.
|
|
+ */
|
|
+static void
|
|
+_ReadExtraToc(ArchiveHandle *AH, TocEntry *te)
|
|
+{
|
|
+ lclTocEntry *ctx = (lclTocEntry *) te->formatData;
|
|
+
|
|
+ if (ctx == NULL)
|
|
+ {
|
|
+ ctx = (lclTocEntry *) pg_malloc0(sizeof(lclTocEntry));
|
|
+ te->formatData = (void *) ctx;
|
|
+ }
|
|
+
|
|
+ ctx->dataState = ReadOffset(AH, &(ctx->dataPos));
|
|
+
|
|
+ /*
|
|
+ * Prior to V1.7 (pg7.3), we dumped the data size as an int now we don't
|
|
+ * dump it at all.
|
|
+ */
|
|
+ if (AH->version < K_VERS_1_7)
|
|
+ ReadInt(AH);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Called by the Archiver when restoring an archive to output a comment
|
|
+ * that includes useful information about the TOC entry.
|
|
+ *
|
|
+ * Optional.
|
|
+ *
|
|
+ */
|
|
+static void
|
|
+_PrintExtraToc(ArchiveHandle *AH, TocEntry *te)
|
|
+{
|
|
+ lclTocEntry *ctx = (lclTocEntry *) te->formatData;
|
|
+
|
|
+ if (AH->public.verbose)
|
|
+ ahprintf(AH, "-- Data Pos: " INT64_FORMAT "\n",
|
|
+ (int64) ctx->dataPos);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Called by the archiver when saving TABLE DATA (not schema). This routine
|
|
+ * should save whatever format-specific information is needed to read
|
|
+ * the archive back.
|
|
+ *
|
|
+ * It is called just prior to the dumper's 'DataDumper' routine being called.
|
|
+ *
|
|
+ * Optional, but strongly recommended.
|
|
+ *
|
|
+ */
|
|
+static void
|
|
+_StartData(ArchiveHandle *AH, TocEntry *te)
|
|
+{
|
|
+ lclContext *ctx = (lclContext *) AH->formatData;
|
|
+ lclTocEntry *tctx = (lclTocEntry *) te->formatData;
|
|
+
|
|
+ tctx->dataPos = _getFilePos(AH, ctx);
|
|
+ tctx->dataState = K_OFFSET_POS_SET;
|
|
+
|
|
+ _WriteByte(AH, BLK_DATA); /* Block type */
|
|
+ WriteInt(AH, te->dumpId); /* For sanity check */
|
|
+
|
|
+ ctx->cs = AllocateCompressor(AH->compression, _CustomWriteFunc);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Called by archiver when dumper calls WriteData. This routine is
|
|
+ * called for both BLOB and TABLE data; it is the responsibility of
|
|
+ * the format to manage each kind of data using StartBlob/StartData.
|
|
+ *
|
|
+ * It should only be called from within a DataDumper routine.
|
|
+ *
|
|
+ * Mandatory.
|
|
+ */
|
|
+static void
|
|
+_WriteData(ArchiveHandle *AH, const void *data, size_t dLen)
|
|
+{
|
|
+ lclContext *ctx = (lclContext *) AH->formatData;
|
|
+ CompressorState *cs = ctx->cs;
|
|
+
|
|
+ if (dLen > 0)
|
|
+ /* WriteDataToArchive() internally throws write errors */
|
|
+ WriteDataToArchive(AH, cs, data, dLen);
|
|
+
|
|
+ return;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Called by the archiver when a dumper's 'DataDumper' routine has
|
|
+ * finished.
|
|
+ *
|
|
+ * Optional.
|
|
+ *
|
|
+ */
|
|
+static void
|
|
+_EndData(ArchiveHandle *AH, TocEntry *te)
|
|
+{
|
|
+ lclContext *ctx = (lclContext *) AH->formatData;
|
|
+
|
|
+ EndCompressor(AH, ctx->cs);
|
|
+ /* Send the end marker */
|
|
+ WriteInt(AH, 0);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Called by the archiver when starting to save all BLOB DATA (not schema).
|
|
+ * This routine should save whatever format-specific information is needed
|
|
+ * to read the BLOBs back into memory.
|
|
+ *
|
|
+ * It is called just prior to the dumper's DataDumper routine.
|
|
+ *
|
|
+ * Optional, but strongly recommended.
|
|
+ */
|
|
+static void
|
|
+_StartBlobs(ArchiveHandle *AH, TocEntry *te)
|
|
+{
|
|
+ lclContext *ctx = (lclContext *) AH->formatData;
|
|
+ lclTocEntry *tctx = (lclTocEntry *) te->formatData;
|
|
+
|
|
+ tctx->dataPos = _getFilePos(AH, ctx);
|
|
+ tctx->dataState = K_OFFSET_POS_SET;
|
|
+
|
|
+ _WriteByte(AH, BLK_BLOBS); /* Block type */
|
|
+ WriteInt(AH, te->dumpId); /* For sanity check */
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Called by the archiver when the dumper calls StartBlob.
|
|
+ *
|
|
+ * Mandatory.
|
|
+ *
|
|
+ * Must save the passed OID for retrieval at restore-time.
|
|
+ */
|
|
+static void
|
|
+_StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid)
|
|
+{
|
|
+ lclContext *ctx = (lclContext *) AH->formatData;
|
|
+
|
|
+ if (oid == 0)
|
|
+ exit_horribly(modulename, "invalid OID for large object\n");
|
|
+
|
|
+ WriteInt(AH, oid);
|
|
+
|
|
+ ctx->cs = AllocateCompressor(AH->compression, _CustomWriteFunc);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Called by the archiver when the dumper calls EndBlob.
|
|
+ *
|
|
+ * Optional.
|
|
+ */
|
|
+static void
|
|
+_EndBlob(ArchiveHandle *AH, TocEntry *te, Oid oid)
|
|
+{
|
|
+ lclContext *ctx = (lclContext *) AH->formatData;
|
|
+
|
|
+ EndCompressor(AH, ctx->cs);
|
|
+ /* Send the end marker */
|
|
+ WriteInt(AH, 0);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Called by the archiver when finishing saving all BLOB DATA.
|
|
+ *
|
|
+ * Optional.
|
|
+ */
|
|
+static void
|
|
+_EndBlobs(ArchiveHandle *AH, TocEntry *te)
|
|
+{
|
|
+ /* Write out a fake zero OID to mark end-of-blobs. */
|
|
+ WriteInt(AH, 0);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Print data for a given TOC entry
|
|
+ */
|
|
+static void
|
|
+_PrintTocData(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt)
|
|
+{
|
|
+ lclContext *ctx = (lclContext *) AH->formatData;
|
|
+ lclTocEntry *tctx = (lclTocEntry *) te->formatData;
|
|
+ int blkType;
|
|
+ int id;
|
|
+
|
|
+ if (tctx->dataState == K_OFFSET_NO_DATA)
|
|
+ return;
|
|
+
|
|
+ if (!ctx->hasSeek || tctx->dataState == K_OFFSET_POS_NOT_SET)
|
|
+ {
|
|
+ /*
|
|
+ * We cannot seek directly to the desired block. Instead, skip over
|
|
+ * block headers until we find the one we want. This could fail if we
|
|
+ * are asked to restore items out-of-order.
|
|
+ */
|
|
+ _readBlockHeader(AH, &blkType, &id);
|
|
+
|
|
+ while (blkType != EOF && id != te->dumpId)
|
|
+ {
|
|
+ switch (blkType)
|
|
+ {
|
|
+ case BLK_DATA:
|
|
+ _skipData(AH);
|
|
+ break;
|
|
+
|
|
+ case BLK_BLOBS:
|
|
+ _skipBlobs(AH);
|
|
+ break;
|
|
+
|
|
+ default: /* Always have a default */
|
|
+ exit_horribly(modulename,
|
|
+ "unrecognized data block type (%d) while searching archive\n",
|
|
+ blkType);
|
|
+ break;
|
|
+ }
|
|
+ _readBlockHeader(AH, &blkType, &id);
|
|
+ }
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ /* We can just seek to the place we need to be. */
|
|
+ if (fseeko(AH->FH, tctx->dataPos, SEEK_SET) != 0)
|
|
+ exit_horribly(modulename, "error during file seek: %s\n",
|
|
+ strerror(errno));
|
|
+
|
|
+ _readBlockHeader(AH, &blkType, &id);
|
|
+ }
|
|
+
|
|
+ /* Produce suitable failure message if we fell off end of file */
|
|
+ if (blkType == EOF)
|
|
+ {
|
|
+ if (tctx->dataState == K_OFFSET_POS_NOT_SET)
|
|
+ exit_horribly(modulename, "could not find block ID %d in archive -- "
|
|
+ "possibly due to out-of-order restore request, "
|
|
+ "which cannot be handled due to lack of data offsets in archive\n",
|
|
+ te->dumpId);
|
|
+ else if (!ctx->hasSeek)
|
|
+ exit_horribly(modulename, "could not find block ID %d in archive -- "
|
|
+ "possibly due to out-of-order restore request, "
|
|
+ "which cannot be handled due to non-seekable input file\n",
|
|
+ te->dumpId);
|
|
+ else /* huh, the dataPos led us to EOF? */
|
|
+ exit_horribly(modulename, "could not find block ID %d in archive -- "
|
|
+ "possibly corrupt archive\n",
|
|
+ te->dumpId);
|
|
+ }
|
|
+
|
|
+ /* Are we sane? */
|
|
+ if (id != te->dumpId)
|
|
+ exit_horribly(modulename, "found unexpected block ID (%d) when reading data -- expected %d\n",
|
|
+ id, te->dumpId);
|
|
+
|
|
+ switch (blkType)
|
|
+ {
|
|
+ case BLK_DATA:
|
|
+ _PrintData(AH);
|
|
+ break;
|
|
+
|
|
+ case BLK_BLOBS:
|
|
+ _LoadBlobs(AH, ropt->dropSchema);
|
|
+ break;
|
|
+
|
|
+ default: /* Always have a default */
|
|
+ exit_horribly(modulename, "unrecognized data block type %d while restoring archive\n",
|
|
+ blkType);
|
|
+ break;
|
|
+ }
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Print data from current file position.
|
|
+*/
|
|
+static void
|
|
+_PrintData(ArchiveHandle *AH)
|
|
+{
|
|
+ ReadDataFromArchive(AH, AH->compression, _CustomReadFunc);
|
|
+}
|
|
+
|
|
+static void
|
|
+_LoadBlobs(ArchiveHandle *AH, bool drop)
|
|
+{
|
|
+ Oid oid;
|
|
+
|
|
+ StartRestoreBlobs(AH);
|
|
+
|
|
+ oid = ReadInt(AH);
|
|
+ while (oid != 0)
|
|
+ {
|
|
+ StartRestoreBlob(AH, oid, drop);
|
|
+ _PrintData(AH);
|
|
+ EndRestoreBlob(AH, oid);
|
|
+ oid = ReadInt(AH);
|
|
+ }
|
|
+
|
|
+ EndRestoreBlobs(AH);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Skip the BLOBs from the current file position.
|
|
+ * BLOBS are written sequentially as data blocks (see below).
|
|
+ * Each BLOB is preceded by it's original OID.
|
|
+ * A zero OID indicated the end of the BLOBS
|
|
+ */
|
|
+static void
|
|
+_skipBlobs(ArchiveHandle *AH)
|
|
+{
|
|
+ Oid oid;
|
|
+
|
|
+ oid = ReadInt(AH);
|
|
+ while (oid != 0)
|
|
+ {
|
|
+ _skipData(AH);
|
|
+ oid = ReadInt(AH);
|
|
+ }
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Skip data from current file position.
|
|
+ * Data blocks are formatted as an integer length, followed by data.
|
|
+ * A zero length denoted the end of the block.
|
|
+*/
|
|
+static void
|
|
+_skipData(ArchiveHandle *AH)
|
|
+{
|
|
+ lclContext *ctx = (lclContext *) AH->formatData;
|
|
+ size_t blkLen;
|
|
+ char *buf = NULL;
|
|
+ int buflen = 0;
|
|
+ size_t cnt;
|
|
+
|
|
+ blkLen = ReadInt(AH);
|
|
+ while (blkLen != 0)
|
|
+ {
|
|
+ if (blkLen > buflen)
|
|
+ {
|
|
+ if (buf)
|
|
+ free(buf);
|
|
+ buf = (char *) pg_malloc(blkLen);
|
|
+ buflen = blkLen;
|
|
+ }
|
|
+ if ((cnt = fread(buf, 1, blkLen, AH->FH)) != blkLen)
|
|
+ {
|
|
+ if (feof(AH->FH))
|
|
+ exit_horribly(modulename,
|
|
+ "could not read from input file: end of file\n");
|
|
+ else
|
|
+ exit_horribly(modulename,
|
|
+ "could not read from input file: %s\n", strerror(errno));
|
|
+ }
|
|
+
|
|
+ ctx->filePos += blkLen;
|
|
+
|
|
+ blkLen = ReadInt(AH);
|
|
+ }
|
|
+
|
|
+ if (buf)
|
|
+ free(buf);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Write a byte of data to the archive.
|
|
+ *
|
|
+ * Mandatory.
|
|
+ *
|
|
+ * Called by the archiver to do integer & byte output to the archive.
|
|
+ */
|
|
+static int
|
|
+_WriteByte(ArchiveHandle *AH, const int i)
|
|
+{
|
|
+ lclContext *ctx = (lclContext *) AH->formatData;
|
|
+ int res;
|
|
+
|
|
+ if ((res = fputc(i, AH->FH)) == EOF)
|
|
+ WRITE_ERROR_EXIT;
|
|
+ ctx->filePos += 1;
|
|
+
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Read a byte of data from the archive.
|
|
+ *
|
|
+ * Mandatory
|
|
+ *
|
|
+ * Called by the archiver to read bytes & integers from the archive.
|
|
+ * EOF should be treated as a fatal error.
|
|
+ */
|
|
+static int
|
|
+_ReadByte(ArchiveHandle *AH)
|
|
+{
|
|
+ lclContext *ctx = (lclContext *) AH->formatData;
|
|
+ int res;
|
|
+
|
|
+ res = getc(AH->FH);
|
|
+ if (res == EOF)
|
|
+ READ_ERROR_EXIT(AH->FH);
|
|
+ ctx->filePos += 1;
|
|
+ return res;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Write a buffer of data to the archive.
|
|
+ *
|
|
+ * Mandatory.
|
|
+ *
|
|
+ * Called by the archiver to write a block of bytes to the archive.
|
|
+ */
|
|
+static void
|
|
+_WriteBuf(ArchiveHandle *AH, const void *buf, size_t len)
|
|
+{
|
|
+ lclContext *ctx = (lclContext *) AH->formatData;
|
|
+
|
|
+ if (fwrite(buf, 1, len, AH->FH) != len)
|
|
+ WRITE_ERROR_EXIT;
|
|
+ ctx->filePos += len;
|
|
+
|
|
+ return;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Read a block of bytes from the archive.
|
|
+ *
|
|
+ * Mandatory.
|
|
+ *
|
|
+ * Called by the archiver to read a block of bytes from the archive
|
|
+ */
|
|
+static void
|
|
+_ReadBuf(ArchiveHandle *AH, void *buf, size_t len)
|
|
+{
|
|
+ lclContext *ctx = (lclContext *) AH->formatData;
|
|
+
|
|
+ if (fread(buf, 1, len, AH->FH) != len)
|
|
+ READ_ERROR_EXIT(AH->FH);
|
|
+ ctx->filePos += len;
|
|
+
|
|
+ return;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Close the archive.
|
|
+ *
|
|
+ * Mandatory.
|
|
+ *
|
|
+ * When writing the archive, this is the routine that actually starts
|
|
+ * the process of saving it to files. No data should be written prior
|
|
+ * to this point, since the user could sort the TOC after creating it.
|
|
+ *
|
|
+ * If an archive is to be written, this toutine must call:
|
|
+ * WriteHead to save the archive header
|
|
+ * WriteToc to save the TOC entries
|
|
+ * WriteDataChunks to save all DATA & BLOBs.
|
|
+ *
|
|
+ */
|
|
+static void
|
|
+_CloseArchive(ArchiveHandle *AH)
|
|
+{
|
|
+ lclContext *ctx = (lclContext *) AH->formatData;
|
|
+ pgoff_t tpos;
|
|
+
|
|
+ if (AH->mode == archModeWrite)
|
|
+ {
|
|
+ WriteHead(AH);
|
|
+ /* Remember TOC's seek position for use below */
|
|
+ tpos = ftello(AH->FH);
|
|
+ if (tpos < 0 && ctx->hasSeek)
|
|
+ exit_horribly(modulename, "could not determine seek position in archive file: %s\n",
|
|
+ strerror(errno));
|
|
+ WriteToc(AH);
|
|
+ ctx->dataStart = _getFilePos(AH, ctx);
|
|
+ WriteDataChunks(AH, NULL);
|
|
+
|
|
+ /*
|
|
+ * If possible, re-write the TOC in order to update the data offset
|
|
+ * information. This is not essential, as pg_restore can cope in most
|
|
+ * cases without it; but it can make pg_restore significantly faster
|
|
+ * in some situations (especially parallel restore).
|
|
+ */
|
|
+ if (ctx->hasSeek &&
|
|
+ fseeko(AH->FH, tpos, SEEK_SET) == 0)
|
|
+ WriteToc(AH);
|
|
+ }
|
|
+
|
|
+ if (fclose(AH->FH) != 0)
|
|
+ exit_horribly(modulename, "could not close archive file: %s\n", strerror(errno));
|
|
+
|
|
+ AH->FH = NULL;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Reopen the archive's file handle.
|
|
+ *
|
|
+ * We close the original file handle, except on Windows. (The difference
|
|
+ * is because on Windows, this is used within a multithreading context,
|
|
+ * and we don't want a thread closing the parent file handle.)
|
|
+ */
|
|
+static void
|
|
+_ReopenArchive(ArchiveHandle *AH)
|
|
+{
|
|
+ lclContext *ctx = (lclContext *) AH->formatData;
|
|
+ pgoff_t tpos;
|
|
+
|
|
+ if (AH->mode == archModeWrite)
|
|
+ exit_horribly(modulename, "can only reopen input archives\n");
|
|
+
|
|
+ /*
|
|
+ * These two cases are user-facing errors since they represent unsupported
|
|
+ * (but not invalid) use-cases. Word the error messages appropriately.
|
|
+ */
|
|
+ if (AH->fSpec == NULL || strcmp(AH->fSpec, "") == 0)
|
|
+ exit_horribly(modulename, "parallel restore from standard input is not supported\n");
|
|
+ if (!ctx->hasSeek)
|
|
+ exit_horribly(modulename, "parallel restore from non-seekable file is not supported\n");
|
|
+
|
|
+ tpos = ftello(AH->FH);
|
|
+ if (tpos < 0)
|
|
+ exit_horribly(modulename, "could not determine seek position in archive file: %s\n",
|
|
+ strerror(errno));
|
|
+
|
|
+#ifndef WIN32
|
|
+ if (fclose(AH->FH) != 0)
|
|
+ exit_horribly(modulename, "could not close archive file: %s\n",
|
|
+ strerror(errno));
|
|
+#endif
|
|
+
|
|
+ AH->FH = fopen(AH->fSpec, PG_BINARY_R);
|
|
+ if (!AH->FH)
|
|
+ exit_horribly(modulename, "could not open input file \"%s\": %s\n",
|
|
+ AH->fSpec, strerror(errno));
|
|
+
|
|
+ if (fseeko(AH->FH, tpos, SEEK_SET) != 0)
|
|
+ exit_horribly(modulename, "could not set seek position in archive file: %s\n",
|
|
+ strerror(errno));
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Clone format-specific fields during parallel restoration.
|
|
+ */
|
|
+static void
|
|
+_Clone(ArchiveHandle *AH)
|
|
+{
|
|
+ lclContext *ctx = (lclContext *) AH->formatData;
|
|
+
|
|
+ AH->formatData = (lclContext *) pg_malloc(sizeof(lclContext));
|
|
+ memcpy(AH->formatData, ctx, sizeof(lclContext));
|
|
+ ctx = (lclContext *) AH->formatData;
|
|
+
|
|
+ /* sanity check, shouldn't happen */
|
|
+ if (ctx->cs != NULL)
|
|
+ exit_horribly(modulename, "compressor active\n");
|
|
+
|
|
+ /*
|
|
+ * Note: we do not make a local lo_buf because we expect at most one BLOBS
|
|
+ * entry per archive, so no parallelism is possible. Likewise,
|
|
+ * TOC-entry-local state isn't an issue because any one TOC entry is
|
|
+ * touched by just one worker child.
|
|
+ */
|
|
+}
|
|
+
|
|
+static void
|
|
+_DeClone(ArchiveHandle *AH)
|
|
+{
|
|
+ lclContext *ctx = (lclContext *) AH->formatData;
|
|
+
|
|
+ free(ctx);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * This function is executed in the child of a parallel backup for the
|
|
+ * custom format archive and dumps the actual data.
|
|
+ */
|
|
+char *
|
|
+_WorkerJobRestoreCustom(ArchiveHandle *AH, TocEntry *te)
|
|
+{
|
|
+ /*
|
|
+ * short fixed-size string + some ID so far, this needs to be malloc'ed
|
|
+ * instead of static because we work with threads on windows
|
|
+ */
|
|
+ const int buflen = 64;
|
|
+ char *buf = (char *) pg_malloc(buflen);
|
|
+ ParallelArgs pargs;
|
|
+ int status;
|
|
+
|
|
+ pargs.AH = AH;
|
|
+ pargs.te = te;
|
|
+
|
|
+ status = parallel_restore(&pargs);
|
|
+
|
|
+ snprintf(buf, buflen, "OK RESTORE %d %d %d", te->dumpId, status,
|
|
+ status == WORKER_IGNORED_ERRORS ? AH->public.n_errors : 0);
|
|
+
|
|
+ return buf;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * This function is executed in the parent process. Depending on the desired
|
|
+ * action (dump or restore) it creates a string that is understood by the
|
|
+ * _WorkerJobDump /_WorkerJobRestore functions of the dump format.
|
|
+ */
|
|
+static char *
|
|
+_MasterStartParallelItem(ArchiveHandle *AH, TocEntry *te, T_Action act)
|
|
+{
|
|
+ /*
|
|
+ * A static char is okay here, even on Windows because we call this
|
|
+ * function only from one process (the master).
|
|
+ */
|
|
+ static char buf[64]; /* short fixed-size string + number */
|
|
+
|
|
+ /* no parallel dump in the custom archive format */
|
|
+ Assert(act == ACT_RESTORE);
|
|
+
|
|
+ snprintf(buf, sizeof(buf), "RESTORE %d", te->dumpId);
|
|
+
|
|
+ return buf;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * This function is executed in the parent process. It analyzes the response of
|
|
+ * the _WorkerJobDump / _WorkerJobRestore functions of the dump format.
|
|
+ */
|
|
+static int
|
|
+_MasterEndParallelItem(ArchiveHandle *AH, TocEntry *te, const char *str, T_Action act)
|
|
+{
|
|
+ DumpId dumpId;
|
|
+ int nBytes,
|
|
+ status,
|
|
+ n_errors;
|
|
+
|
|
+ /* no parallel dump in the custom archive */
|
|
+ Assert(act == ACT_RESTORE);
|
|
+
|
|
+ sscanf(str, "%u %u %u%n", &dumpId, &status, &n_errors, &nBytes);
|
|
+
|
|
+ Assert(nBytes == strlen(str));
|
|
+ Assert(dumpId == te->dumpId);
|
|
+
|
|
+ AH->public.n_errors += n_errors;
|
|
+
|
|
+ return status;
|
|
+}
|
|
+
|
|
+/*--------------------------------------------------
|
|
+ * END OF FORMAT CALLBACKS
|
|
+ *--------------------------------------------------
|
|
+ */
|
|
+
|
|
+/*
|
|
+ * Get the current position in the archive file.
|
|
+ */
|
|
+static pgoff_t
|
|
+_getFilePos(ArchiveHandle *AH, lclContext *ctx)
|
|
+{
|
|
+ pgoff_t pos;
|
|
+
|
|
+ if (ctx->hasSeek)
|
|
+ {
|
|
+ /*
|
|
+ * Prior to 1.7 (pg7.3) we relied on the internally maintained
|
|
+ * pointer. Now we rely on ftello() always, unless the file has been
|
|
+ * found to not support it. For debugging purposes, print a warning
|
|
+ * if the internal pointer disagrees, so that we're more likely to
|
|
+ * notice if something's broken about the internal position tracking.
|
|
+ */
|
|
+ pos = ftello(AH->FH);
|
|
+ if (pos < 0)
|
|
+ exit_horribly(modulename, "could not determine seek position in archive file: %s\n",
|
|
+ strerror(errno));
|
|
+
|
|
+ if (pos != ctx->filePos)
|
|
+ write_msg(modulename, "WARNING: ftell mismatch with expected position -- ftell used\n");
|
|
+ }
|
|
+ else
|
|
+ pos = ctx->filePos;
|
|
+ return pos;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Read a data block header. The format changed in V1.3, so we
|
|
+ * centralize the code here for simplicity. Returns *type = EOF
|
|
+ * if at EOF.
|
|
+ */
|
|
+static void
|
|
+_readBlockHeader(ArchiveHandle *AH, int *type, int *id)
|
|
+{
|
|
+ lclContext *ctx = (lclContext *) AH->formatData;
|
|
+ int byt;
|
|
+
|
|
+ /*
|
|
+ * Note: if we are at EOF with a pre-1.3 input file, we'll exit_horribly
|
|
+ * inside ReadInt rather than returning EOF. It doesn't seem worth
|
|
+ * jumping through hoops to deal with that case better, because no such
|
|
+ * files are likely to exist in the wild: only some 7.1 development
|
|
+ * versions of pg_dump ever generated such files.
|
|
+ */
|
|
+ if (AH->version < K_VERS_1_3)
|
|
+ *type = BLK_DATA;
|
|
+ else
|
|
+ {
|
|
+ byt = getc(AH->FH);
|
|
+ *type = byt;
|
|
+ if (byt == EOF)
|
|
+ {
|
|
+ *id = 0; /* don't return an uninitialized value */
|
|
+ return;
|
|
+ }
|
|
+ ctx->filePos += 1;
|
|
+ }
|
|
+
|
|
+ *id = ReadInt(AH);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Callback function for WriteDataToArchive. Writes one block of (compressed)
|
|
+ * data to the archive.
|
|
+ */
|
|
+static void
|
|
+_CustomWriteFunc(ArchiveHandle *AH, const char *buf, size_t len)
|
|
+{
|
|
+ /* never write 0-byte blocks (this should not happen) */
|
|
+ if (len > 0)
|
|
+ {
|
|
+ WriteInt(AH, len);
|
|
+ _WriteBuf(AH, buf, len);
|
|
+ }
|
|
+ return;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Callback function for ReadDataFromArchive. To keep things simple, we
|
|
+ * always read one compressed block at a time.
|
|
+ */
|
|
+static size_t
|
|
+_CustomReadFunc(ArchiveHandle *AH, char **buf, size_t *buflen)
|
|
+{
|
|
+ size_t blkLen;
|
|
+
|
|
+ /* Read length */
|
|
+ blkLen = ReadInt(AH);
|
|
+ if (blkLen == 0)
|
|
+ return 0;
|
|
+
|
|
+ /* If the caller's buffer is not large enough, allocate a bigger one */
|
|
+ if (blkLen > *buflen)
|
|
+ {
|
|
+ free(*buf);
|
|
+ *buf = (char *) pg_malloc(blkLen);
|
|
+ *buflen = blkLen;
|
|
+ }
|
|
+
|
|
+ /* exits app on read errors */
|
|
+ _ReadBuf(AH, *buf, blkLen);
|
|
+
|
|
+ return blkLen;
|
|
+}
|
|
--- /dev/null
|
|
+++ pglogical-2.2.2/pglogical_dump/pg_backup_db.c
|
|
@@ -0,0 +1,632 @@
|
|
+/*-------------------------------------------------------------------------
|
|
+ *
|
|
+ * pg_backup_db.c
|
|
+ *
|
|
+ * Implements the basic DB functions used by the archiver.
|
|
+ *
|
|
+ * IDENTIFICATION
|
|
+ * src/bin/pg_dump/pg_backup_db.c
|
|
+ *
|
|
+ *-------------------------------------------------------------------------
|
|
+ */
|
|
+
|
|
+#include "pg_backup_db.h"
|
|
+#include "pg_backup_utils.h"
|
|
+#include "dumputils.h"
|
|
+#include "parallel.h"
|
|
+
|
|
+#include <unistd.h>
|
|
+#include <ctype.h>
|
|
+#ifdef HAVE_TERMIOS_H
|
|
+#include <termios.h>
|
|
+#endif
|
|
+
|
|
+
|
|
+#define DB_MAX_ERR_STMT 128
|
|
+
|
|
+/* translator: this is a module name */
|
|
+static const char *modulename = gettext_noop("archiver (db)");
|
|
+
|
|
+static void _check_database_version(ArchiveHandle *AH);
|
|
+static PGconn *_connectDB(ArchiveHandle *AH, const char *newdbname, const char *newUser);
|
|
+static void notice_processor(void *arg, const char *message);
|
|
+
|
|
+static void
|
|
+_check_database_version(ArchiveHandle *AH)
|
|
+{
|
|
+ const char *remoteversion_str;
|
|
+ int remoteversion;
|
|
+
|
|
+ remoteversion_str = PQparameterStatus(AH->connection, "server_version");
|
|
+ remoteversion = PQserverVersion(AH->connection);
|
|
+ if (remoteversion == 0 || !remoteversion_str)
|
|
+ exit_horribly(modulename, "could not get server_version from libpq\n");
|
|
+
|
|
+ AH->public.remoteVersionStr = pg_strdup(remoteversion_str);
|
|
+ AH->public.remoteVersion = remoteversion;
|
|
+ if (!AH->archiveRemoteVersion)
|
|
+ AH->archiveRemoteVersion = AH->public.remoteVersionStr;
|
|
+
|
|
+ if (remoteversion != PG_VERSION_NUM
|
|
+ && (remoteversion < AH->public.minRemoteVersion ||
|
|
+ remoteversion > AH->public.maxRemoteVersion))
|
|
+ {
|
|
+ write_msg(NULL, "server version: %s; %s version: %s\n",
|
|
+ remoteversion_str, progname, PG_VERSION);
|
|
+ exit_horribly(NULL, "aborting because of server version mismatch\n");
|
|
+ }
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Reconnect to the server. If dbname is not NULL, use that database,
|
|
+ * else the one associated with the archive handle. If username is
|
|
+ * not NULL, use that user name, else the one from the handle. If
|
|
+ * both the database and the user match the existing connection already,
|
|
+ * nothing will be done.
|
|
+ *
|
|
+ * Returns 1 in any case.
|
|
+ */
|
|
+int
|
|
+ReconnectToServer(ArchiveHandle *AH, const char *dbname, const char *username)
|
|
+{
|
|
+ PGconn *newConn;
|
|
+ const char *newdbname;
|
|
+ const char *newusername;
|
|
+
|
|
+ if (!dbname)
|
|
+ newdbname = PQdb(AH->connection);
|
|
+ else
|
|
+ newdbname = dbname;
|
|
+
|
|
+ if (!username)
|
|
+ newusername = PQuser(AH->connection);
|
|
+ else
|
|
+ newusername = username;
|
|
+
|
|
+ /* Let's see if the request is already satisfied */
|
|
+ if (strcmp(newdbname, PQdb(AH->connection)) == 0 &&
|
|
+ strcmp(newusername, PQuser(AH->connection)) == 0)
|
|
+ return 1;
|
|
+
|
|
+ newConn = _connectDB(AH, newdbname, newusername);
|
|
+
|
|
+ PQfinish(AH->connection);
|
|
+ AH->connection = newConn;
|
|
+
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Connect to the db again.
|
|
+ *
|
|
+ * Note: it's not really all that sensible to use a single-entry password
|
|
+ * cache if the username keeps changing. In current usage, however, the
|
|
+ * username never does change, so one savedPassword is sufficient. We do
|
|
+ * update the cache on the off chance that the password has changed since the
|
|
+ * start of the run.
|
|
+ */
|
|
+static PGconn *
|
|
+_connectDB(ArchiveHandle *AH, const char *reqdb, const char *requser)
|
|
+{
|
|
+ PGconn *newConn;
|
|
+ const char *newdb;
|
|
+ const char *newuser;
|
|
+ char *password;
|
|
+ bool new_pass;
|
|
+
|
|
+ if (!reqdb)
|
|
+ newdb = PQdb(AH->connection);
|
|
+ else
|
|
+ newdb = reqdb;
|
|
+
|
|
+ if (!requser || strlen(requser) == 0)
|
|
+ newuser = PQuser(AH->connection);
|
|
+ else
|
|
+ newuser = requser;
|
|
+
|
|
+ ahlog(AH, 1, "connecting to database \"%s\" as user \"%s\"\n",
|
|
+ newdb, newuser);
|
|
+
|
|
+ password = AH->savedPassword ? pg_strdup(AH->savedPassword) : NULL;
|
|
+
|
|
+ if (AH->promptPassword == TRI_YES && password == NULL)
|
|
+ {
|
|
+ password = simple_prompt("Password: ", 100, false);
|
|
+ if (password == NULL)
|
|
+ exit_horribly(modulename, "out of memory\n");
|
|
+ }
|
|
+
|
|
+ do
|
|
+ {
|
|
+ const char *keywords[7];
|
|
+ const char *values[7];
|
|
+
|
|
+ keywords[0] = "host";
|
|
+ values[0] = PQhost(AH->connection);
|
|
+ keywords[1] = "port";
|
|
+ values[1] = PQport(AH->connection);
|
|
+ keywords[2] = "user";
|
|
+ values[2] = newuser;
|
|
+ keywords[3] = "password";
|
|
+ values[3] = password;
|
|
+ keywords[4] = "dbname";
|
|
+ values[4] = newdb;
|
|
+ keywords[5] = "fallback_application_name";
|
|
+ values[5] = progname;
|
|
+ keywords[6] = NULL;
|
|
+ values[6] = NULL;
|
|
+
|
|
+ new_pass = false;
|
|
+ newConn = PQconnectdbParams(keywords, values, true);
|
|
+
|
|
+ if (!newConn)
|
|
+ exit_horribly(modulename, "failed to reconnect to database\n");
|
|
+
|
|
+ if (PQstatus(newConn) == CONNECTION_BAD)
|
|
+ {
|
|
+ if (!PQconnectionNeedsPassword(newConn))
|
|
+ exit_horribly(modulename, "could not reconnect to database: %s",
|
|
+ PQerrorMessage(newConn));
|
|
+ PQfinish(newConn);
|
|
+
|
|
+ if (password)
|
|
+ fprintf(stderr, "Password incorrect\n");
|
|
+
|
|
+ fprintf(stderr, "Connecting to %s as %s\n",
|
|
+ newdb, newuser);
|
|
+
|
|
+ if (password)
|
|
+ free(password);
|
|
+
|
|
+ if (AH->promptPassword != TRI_NO)
|
|
+ password = simple_prompt("Password: ", 100, false);
|
|
+ else
|
|
+ exit_horribly(modulename, "connection needs password\n");
|
|
+
|
|
+ if (password == NULL)
|
|
+ exit_horribly(modulename, "out of memory\n");
|
|
+ new_pass = true;
|
|
+ }
|
|
+ } while (new_pass);
|
|
+
|
|
+ /*
|
|
+ * We want to remember connection's actual password, whether or not we got
|
|
+ * it by prompting. So we don't just store the password variable.
|
|
+ */
|
|
+ if (PQconnectionUsedPassword(newConn))
|
|
+ {
|
|
+ if (AH->savedPassword)
|
|
+ free(AH->savedPassword);
|
|
+ AH->savedPassword = pg_strdup(PQpass(newConn));
|
|
+ }
|
|
+ if (password)
|
|
+ free(password);
|
|
+
|
|
+ /* check for version mismatch */
|
|
+ _check_database_version(AH);
|
|
+
|
|
+ PQsetNoticeProcessor(newConn, notice_processor, NULL);
|
|
+
|
|
+ return newConn;
|
|
+}
|
|
+
|
|
+
|
|
+/*
|
|
+ * Make a database connection with the given parameters. The
|
|
+ * connection handle is returned, the parameters are stored in AHX.
|
|
+ * An interactive password prompt is automatically issued if required.
|
|
+ *
|
|
+ * Note: it's not really all that sensible to use a single-entry password
|
|
+ * cache if the username keeps changing. In current usage, however, the
|
|
+ * username never does change, so one savedPassword is sufficient.
|
|
+ */
|
|
+void
|
|
+ConnectDatabase(Archive *AHX,
|
|
+ const char *dbname,
|
|
+ const char *pghost,
|
|
+ const char *pgport,
|
|
+ const char *username,
|
|
+ enum trivalue prompt_password)
|
|
+{
|
|
+ ArchiveHandle *AH = (ArchiveHandle *) AHX;
|
|
+ char *password;
|
|
+ bool new_pass;
|
|
+
|
|
+ if (AH->connection)
|
|
+ exit_horribly(modulename, "already connected to a database\n");
|
|
+
|
|
+ password = AH->savedPassword ? pg_strdup(AH->savedPassword) : NULL;
|
|
+
|
|
+ if (prompt_password == TRI_YES && password == NULL)
|
|
+ {
|
|
+ password = simple_prompt("Password: ", 100, false);
|
|
+ if (password == NULL)
|
|
+ exit_horribly(modulename, "out of memory\n");
|
|
+ }
|
|
+ AH->promptPassword = prompt_password;
|
|
+
|
|
+ /*
|
|
+ * Start the connection. Loop until we have a password if requested by
|
|
+ * backend.
|
|
+ */
|
|
+ do
|
|
+ {
|
|
+ const char *keywords[7];
|
|
+ const char *values[7];
|
|
+
|
|
+ keywords[0] = "host";
|
|
+ values[0] = pghost;
|
|
+ keywords[1] = "port";
|
|
+ values[1] = pgport;
|
|
+ keywords[2] = "user";
|
|
+ values[2] = username;
|
|
+ keywords[3] = "password";
|
|
+ values[3] = password;
|
|
+ keywords[4] = "dbname";
|
|
+ values[4] = dbname;
|
|
+ keywords[5] = "fallback_application_name";
|
|
+ values[5] = progname;
|
|
+ keywords[6] = NULL;
|
|
+ values[6] = NULL;
|
|
+
|
|
+ new_pass = false;
|
|
+ AH->connection = PQconnectdbParams(keywords, values, true);
|
|
+
|
|
+ if (!AH->connection)
|
|
+ exit_horribly(modulename, "failed to connect to database\n");
|
|
+
|
|
+ if (PQstatus(AH->connection) == CONNECTION_BAD &&
|
|
+ PQconnectionNeedsPassword(AH->connection) &&
|
|
+ password == NULL &&
|
|
+ prompt_password != TRI_NO)
|
|
+ {
|
|
+ PQfinish(AH->connection);
|
|
+ password = simple_prompt("Password: ", 100, false);
|
|
+ if (password == NULL)
|
|
+ exit_horribly(modulename, "out of memory\n");
|
|
+ new_pass = true;
|
|
+ }
|
|
+ } while (new_pass);
|
|
+
|
|
+ /* check to see that the backend connection was successfully made */
|
|
+ if (PQstatus(AH->connection) == CONNECTION_BAD)
|
|
+ exit_horribly(modulename, "connection to database \"%s\" failed: %s",
|
|
+ PQdb(AH->connection) ? PQdb(AH->connection) : "",
|
|
+ PQerrorMessage(AH->connection));
|
|
+
|
|
+ /*
|
|
+ * We want to remember connection's actual password, whether or not we got
|
|
+ * it by prompting. So we don't just store the password variable.
|
|
+ */
|
|
+ if (PQconnectionUsedPassword(AH->connection))
|
|
+ {
|
|
+ if (AH->savedPassword)
|
|
+ free(AH->savedPassword);
|
|
+ AH->savedPassword = pg_strdup(PQpass(AH->connection));
|
|
+ }
|
|
+ if (password)
|
|
+ free(password);
|
|
+
|
|
+ /* check for version mismatch */
|
|
+ _check_database_version(AH);
|
|
+
|
|
+ PQsetNoticeProcessor(AH->connection, notice_processor, NULL);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Close the connection to the database and also cancel off the query if we
|
|
+ * have one running.
|
|
+ */
|
|
+void
|
|
+DisconnectDatabase(Archive *AHX)
|
|
+{
|
|
+ ArchiveHandle *AH = (ArchiveHandle *) AHX;
|
|
+ PGcancel *cancel;
|
|
+ char errbuf[1];
|
|
+
|
|
+ if (!AH->connection)
|
|
+ return;
|
|
+
|
|
+ if (PQtransactionStatus(AH->connection) == PQTRANS_ACTIVE)
|
|
+ {
|
|
+ if ((cancel = PQgetCancel(AH->connection)))
|
|
+ {
|
|
+ PQcancel(cancel, errbuf, sizeof(errbuf));
|
|
+ PQfreeCancel(cancel);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ PQfinish(AH->connection);
|
|
+ AH->connection = NULL;
|
|
+}
|
|
+
|
|
+PGconn *
|
|
+GetConnection(Archive *AHX)
|
|
+{
|
|
+ ArchiveHandle *AH = (ArchiveHandle *) AHX;
|
|
+
|
|
+ return AH->connection;
|
|
+}
|
|
+
|
|
+static void
|
|
+notice_processor(void *arg, const char *message)
|
|
+{
|
|
+ write_msg(NULL, "%s", message);
|
|
+}
|
|
+
|
|
+/* Like exit_horribly(), but with a complaint about a particular query. */
|
|
+static void
|
|
+die_on_query_failure(ArchiveHandle *AH, const char *modulename, const char *query)
|
|
+{
|
|
+ write_msg(modulename, "query failed: %s",
|
|
+ PQerrorMessage(AH->connection));
|
|
+ exit_horribly(modulename, "query was: %s\n", query);
|
|
+}
|
|
+
|
|
+void
|
|
+ExecuteSqlStatement(Archive *AHX, const char *query)
|
|
+{
|
|
+ ArchiveHandle *AH = (ArchiveHandle *) AHX;
|
|
+ PGresult *res;
|
|
+
|
|
+ res = PQexec(AH->connection, query);
|
|
+ if (PQresultStatus(res) != PGRES_COMMAND_OK)
|
|
+ die_on_query_failure(AH, modulename, query);
|
|
+ PQclear(res);
|
|
+}
|
|
+
|
|
+PGresult *
|
|
+ExecuteSqlQuery(Archive *AHX, const char *query, ExecStatusType status)
|
|
+{
|
|
+ ArchiveHandle *AH = (ArchiveHandle *) AHX;
|
|
+ PGresult *res;
|
|
+
|
|
+ res = PQexec(AH->connection, query);
|
|
+ if (PQresultStatus(res) != status)
|
|
+ die_on_query_failure(AH, modulename, query);
|
|
+ return res;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Convenience function to send a query.
|
|
+ * Monitors result to detect COPY statements
|
|
+ */
|
|
+static void
|
|
+ExecuteSqlCommand(ArchiveHandle *AH, const char *qry, const char *desc)
|
|
+{
|
|
+ PGconn *conn = AH->connection;
|
|
+ PGresult *res;
|
|
+ char errStmt[DB_MAX_ERR_STMT];
|
|
+
|
|
+#ifdef NOT_USED
|
|
+ fprintf(stderr, "Executing: '%s'\n\n", qry);
|
|
+#endif
|
|
+ res = PQexec(conn, qry);
|
|
+
|
|
+ switch (PQresultStatus(res))
|
|
+ {
|
|
+ case PGRES_COMMAND_OK:
|
|
+ case PGRES_TUPLES_OK:
|
|
+ case PGRES_EMPTY_QUERY:
|
|
+ /* A-OK */
|
|
+ break;
|
|
+ case PGRES_COPY_IN:
|
|
+ /* Assume this is an expected result */
|
|
+ AH->pgCopyIn = true;
|
|
+ break;
|
|
+ default:
|
|
+ /* trouble */
|
|
+ strncpy(errStmt, qry, DB_MAX_ERR_STMT);
|
|
+ if (errStmt[DB_MAX_ERR_STMT - 1] != '\0')
|
|
+ {
|
|
+ errStmt[DB_MAX_ERR_STMT - 4] = '.';
|
|
+ errStmt[DB_MAX_ERR_STMT - 3] = '.';
|
|
+ errStmt[DB_MAX_ERR_STMT - 2] = '.';
|
|
+ errStmt[DB_MAX_ERR_STMT - 1] = '\0';
|
|
+ }
|
|
+ warn_or_exit_horribly(AH, modulename, "%s: %s Command was: %s\n",
|
|
+ desc, PQerrorMessage(conn), errStmt);
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ PQclear(res);
|
|
+}
|
|
+
|
|
+
|
|
+/*
|
|
+ * Process non-COPY table data (that is, INSERT commands).
|
|
+ *
|
|
+ * The commands have been run together as one long string for compressibility,
|
|
+ * and we are receiving them in bufferloads with arbitrary boundaries, so we
|
|
+ * have to locate command boundaries and save partial commands across calls.
|
|
+ * All state must be kept in AH->sqlparse, not in local variables of this
|
|
+ * routine. We assume that AH->sqlparse was filled with zeroes when created.
|
|
+ *
|
|
+ * We have to lex the data to the extent of identifying literals and quoted
|
|
+ * identifiers, so that we can recognize statement-terminating semicolons.
|
|
+ * We assume that INSERT data will not contain SQL comments, E'' literals,
|
|
+ * or dollar-quoted strings, so this is much simpler than a full SQL lexer.
|
|
+ *
|
|
+ * Note: when restoring from a pre-9.0 dump file, this code is also used to
|
|
+ * process BLOB COMMENTS data, which has the same problem of containing
|
|
+ * multiple SQL commands that might be split across bufferloads. Fortunately,
|
|
+ * that data won't contain anything complicated to lex either.
|
|
+ */
|
|
+static void
|
|
+ExecuteSimpleCommands(ArchiveHandle *AH, const char *buf, size_t bufLen)
|
|
+{
|
|
+ const char *qry = buf;
|
|
+ const char *eos = buf + bufLen;
|
|
+
|
|
+ /* initialize command buffer if first time through */
|
|
+ if (AH->sqlparse.curCmd == NULL)
|
|
+ AH->sqlparse.curCmd = createPQExpBuffer();
|
|
+
|
|
+ for (; qry < eos; qry++)
|
|
+ {
|
|
+ char ch = *qry;
|
|
+
|
|
+ /* For neatness, we skip any newlines between commands */
|
|
+ if (!(ch == '\n' && AH->sqlparse.curCmd->len == 0))
|
|
+ appendPQExpBufferChar(AH->sqlparse.curCmd, ch);
|
|
+
|
|
+ switch (AH->sqlparse.state)
|
|
+ {
|
|
+ case SQL_SCAN: /* Default state == 0, set in _allocAH */
|
|
+ if (ch == ';')
|
|
+ {
|
|
+ /*
|
|
+ * We've found the end of a statement. Send it and reset
|
|
+ * the buffer.
|
|
+ */
|
|
+ ExecuteSqlCommand(AH, AH->sqlparse.curCmd->data,
|
|
+ "could not execute query");
|
|
+ resetPQExpBuffer(AH->sqlparse.curCmd);
|
|
+ }
|
|
+ else if (ch == '\'')
|
|
+ {
|
|
+ AH->sqlparse.state = SQL_IN_SINGLE_QUOTE;
|
|
+ AH->sqlparse.backSlash = false;
|
|
+ }
|
|
+ else if (ch == '"')
|
|
+ {
|
|
+ AH->sqlparse.state = SQL_IN_DOUBLE_QUOTE;
|
|
+ }
|
|
+ break;
|
|
+
|
|
+ case SQL_IN_SINGLE_QUOTE:
|
|
+ /* We needn't handle '' specially */
|
|
+ if (ch == '\'' && !AH->sqlparse.backSlash)
|
|
+ AH->sqlparse.state = SQL_SCAN;
|
|
+ else if (ch == '\\' && !AH->public.std_strings)
|
|
+ AH->sqlparse.backSlash = !AH->sqlparse.backSlash;
|
|
+ else
|
|
+ AH->sqlparse.backSlash = false;
|
|
+ break;
|
|
+
|
|
+ case SQL_IN_DOUBLE_QUOTE:
|
|
+ /* We needn't handle "" specially */
|
|
+ if (ch == '"')
|
|
+ AH->sqlparse.state = SQL_SCAN;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+
|
|
+/*
|
|
+ * Implement ahwrite() for direct-to-DB restore
|
|
+ */
|
|
+int
|
|
+ExecuteSqlCommandBuf(ArchiveHandle *AH, const char *buf, size_t bufLen)
|
|
+{
|
|
+ if (AH->outputKind == OUTPUT_COPYDATA)
|
|
+ {
|
|
+ /*
|
|
+ * COPY data.
|
|
+ *
|
|
+ * We drop the data on the floor if libpq has failed to enter COPY
|
|
+ * mode; this allows us to behave reasonably when trying to continue
|
|
+ * after an error in a COPY command.
|
|
+ */
|
|
+ if (AH->pgCopyIn &&
|
|
+ PQputCopyData(AH->connection, buf, bufLen) <= 0)
|
|
+ exit_horribly(modulename, "error returned by PQputCopyData: %s",
|
|
+ PQerrorMessage(AH->connection));
|
|
+ }
|
|
+ else if (AH->outputKind == OUTPUT_OTHERDATA)
|
|
+ {
|
|
+ /*
|
|
+ * Table data expressed as INSERT commands; or, in old dump files,
|
|
+ * BLOB COMMENTS data (which is expressed as COMMENT ON commands).
|
|
+ */
|
|
+ ExecuteSimpleCommands(AH, buf, bufLen);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ /*
|
|
+ * General SQL commands; we assume that commands will not be split
|
|
+ * across calls.
|
|
+ *
|
|
+ * In most cases the data passed to us will be a null-terminated
|
|
+ * string, but if it's not, we have to add a trailing null.
|
|
+ */
|
|
+ if (buf[bufLen] == '\0')
|
|
+ ExecuteSqlCommand(AH, buf, "could not execute query");
|
|
+ else
|
|
+ {
|
|
+ char *str = (char *) pg_malloc(bufLen + 1);
|
|
+
|
|
+ memcpy(str, buf, bufLen);
|
|
+ str[bufLen] = '\0';
|
|
+ ExecuteSqlCommand(AH, str, "could not execute query");
|
|
+ free(str);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return bufLen;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Terminate a COPY operation during direct-to-DB restore
|
|
+ */
|
|
+void
|
|
+EndDBCopyMode(ArchiveHandle *AH, TocEntry *te)
|
|
+{
|
|
+ if (AH->pgCopyIn)
|
|
+ {
|
|
+ PGresult *res;
|
|
+
|
|
+ if (PQputCopyEnd(AH->connection, NULL) <= 0)
|
|
+ exit_horribly(modulename, "error returned by PQputCopyEnd: %s",
|
|
+ PQerrorMessage(AH->connection));
|
|
+
|
|
+ /* Check command status and return to normal libpq state */
|
|
+ res = PQgetResult(AH->connection);
|
|
+ if (PQresultStatus(res) != PGRES_COMMAND_OK)
|
|
+ warn_or_exit_horribly(AH, modulename, "COPY failed for table \"%s\": %s",
|
|
+ te->tag, PQerrorMessage(AH->connection));
|
|
+ PQclear(res);
|
|
+
|
|
+ AH->pgCopyIn = false;
|
|
+ }
|
|
+}
|
|
+
|
|
+void
|
|
+StartTransaction(ArchiveHandle *AH)
|
|
+{
|
|
+ ExecuteSqlCommand(AH, "BEGIN", "could not start database transaction");
|
|
+}
|
|
+
|
|
+void
|
|
+CommitTransaction(ArchiveHandle *AH)
|
|
+{
|
|
+ ExecuteSqlCommand(AH, "COMMIT", "could not commit database transaction");
|
|
+}
|
|
+
|
|
+void
|
|
+DropBlobIfExists(ArchiveHandle *AH, Oid oid)
|
|
+{
|
|
+ /*
|
|
+ * If we are not restoring to a direct database connection, we have to
|
|
+ * guess about how to detect whether the blob exists. Assume new-style.
|
|
+ */
|
|
+ if (AH->connection == NULL ||
|
|
+ PQserverVersion(AH->connection) >= 90000)
|
|
+ {
|
|
+ ahprintf(AH,
|
|
+ "SELECT pg_catalog.lo_unlink(oid) "
|
|
+ "FROM pg_catalog.pg_largeobject_metadata "
|
|
+ "WHERE oid = '%u';\n",
|
|
+ oid);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ /* Restoring to pre-9.0 server, so do it the old way */
|
|
+ ahprintf(AH,
|
|
+ "SELECT CASE WHEN EXISTS("
|
|
+ "SELECT 1 FROM pg_catalog.pg_largeobject WHERE loid = '%u'"
|
|
+ ") THEN pg_catalog.lo_unlink('%u') END;\n",
|
|
+ oid, oid);
|
|
+ }
|
|
+}
|
|
--- /dev/null
|
|
+++ pglogical-2.2.2/pglogical_dump/pg_backup_db.h
|
|
@@ -0,0 +1,24 @@
|
|
+/*
|
|
+ * Definitions for pg_backup_db.c
|
|
+ *
|
|
+ * IDENTIFICATION
|
|
+ * src/bin/pg_dump/pg_backup_db.h
|
|
+ */
|
|
+
|
|
+#ifndef PG_BACKUP_DB_H
|
|
+#define PG_BACKUP_DB_H
|
|
+
|
|
+#include "pg_backup_archiver.h"
|
|
+
|
|
+extern int ExecuteSqlCommandBuf(ArchiveHandle *AH, const char *buf, size_t bufLen);
|
|
+
|
|
+extern void ExecuteSqlStatement(Archive *AHX, const char *query);
|
|
+extern PGresult *ExecuteSqlQuery(Archive *AHX, const char *query,
|
|
+ ExecStatusType status);
|
|
+
|
|
+extern void EndDBCopyMode(ArchiveHandle *AH, struct _tocEntry * te);
|
|
+
|
|
+extern void StartTransaction(ArchiveHandle *AH);
|
|
+extern void CommitTransaction(ArchiveHandle *AH);
|
|
+
|
|
+#endif
|
|
--- /dev/null
|
|
+++ pglogical-2.2.2/pglogical_dump/pg_backup_directory.c
|
|
@@ -0,0 +1,877 @@
|
|
+/*-------------------------------------------------------------------------
|
|
+ *
|
|
+ * pg_backup_directory.c
|
|
+ *
|
|
+ * A directory format dump is a directory, which contains a "toc.dat" file
|
|
+ * for the TOC, and a separate file for each data entry, named "<oid>.dat".
|
|
+ * Large objects (BLOBs) are stored in separate files named "blob_<uid>.dat",
|
|
+ * and there's a plain-text TOC file for them called "blobs.toc". If
|
|
+ * compression is used, each data file is individually compressed and the
|
|
+ * ".gz" suffix is added to the filenames. The TOC files are never
|
|
+ * compressed by pg_dump, however they are accepted with the .gz suffix too,
|
|
+ * in case the user has manually compressed them with 'gzip'.
|
|
+ *
|
|
+ * NOTE: This format is identical to the files written in the tar file in
|
|
+ * the 'tar' format, except that we don't write the restore.sql file (TODO),
|
|
+ * and the tar format doesn't support compression. Please keep the formats in
|
|
+ * sync.
|
|
+ *
|
|
+ *
|
|
+ * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
|
|
+ * Portions Copyright (c) 1994, Regents of the University of California
|
|
+ * Portions Copyright (c) 2000, Philip Warner
|
|
+ *
|
|
+ * Rights are granted to use this software in any way so long
|
|
+ * as this notice is not removed.
|
|
+ *
|
|
+ * The author is not responsible for loss or damages that may
|
|
+ * result from it's use.
|
|
+ *
|
|
+ * IDENTIFICATION
|
|
+ * src/bin/pg_dump/pg_backup_directory.c
|
|
+ *
|
|
+ *-------------------------------------------------------------------------
|
|
+ */
|
|
+
|
|
+#include "compress_io.h"
|
|
+#include "pg_backup_utils.h"
|
|
+#include "parallel.h"
|
|
+
|
|
+#include <dirent.h>
|
|
+#include <sys/stat.h>
|
|
+
|
|
+typedef struct
|
|
+{
|
|
+ /*
|
|
+ * Our archive location. This is basically what the user specified as his
|
|
+ * backup file but of course here it is a directory.
|
|
+ */
|
|
+ char *directory;
|
|
+
|
|
+ cfp *dataFH; /* currently open data file */
|
|
+
|
|
+ cfp *blobsTocFH; /* file handle for blobs.toc */
|
|
+ ParallelState *pstate; /* for parallel backup / restore */
|
|
+} lclContext;
|
|
+
|
|
+typedef struct
|
|
+{
|
|
+ char *filename; /* filename excluding the directory (basename) */
|
|
+} lclTocEntry;
|
|
+
|
|
+/* translator: this is a module name */
|
|
+static const char *modulename = gettext_noop("directory archiver");
|
|
+
|
|
+/* prototypes for private functions */
|
|
+static void _ArchiveEntry(ArchiveHandle *AH, TocEntry *te);
|
|
+static void _StartData(ArchiveHandle *AH, TocEntry *te);
|
|
+static void _EndData(ArchiveHandle *AH, TocEntry *te);
|
|
+static void _WriteData(ArchiveHandle *AH, const void *data, size_t dLen);
|
|
+static int _WriteByte(ArchiveHandle *AH, const int i);
|
|
+static int _ReadByte(ArchiveHandle *);
|
|
+static void _WriteBuf(ArchiveHandle *AH, const void *buf, size_t len);
|
|
+static void _ReadBuf(ArchiveHandle *AH, void *buf, size_t len);
|
|
+static void _CloseArchive(ArchiveHandle *AH);
|
|
+static void _ReopenArchive(ArchiveHandle *AH);
|
|
+static void _PrintTocData(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt);
|
|
+
|
|
+static void _WriteExtraToc(ArchiveHandle *AH, TocEntry *te);
|
|
+static void _ReadExtraToc(ArchiveHandle *AH, TocEntry *te);
|
|
+static void _PrintExtraToc(ArchiveHandle *AH, TocEntry *te);
|
|
+
|
|
+static void _StartBlobs(ArchiveHandle *AH, TocEntry *te);
|
|
+static void _StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid);
|
|
+static void _EndBlob(ArchiveHandle *AH, TocEntry *te, Oid oid);
|
|
+static void _EndBlobs(ArchiveHandle *AH, TocEntry *te);
|
|
+static void _LoadBlobs(ArchiveHandle *AH, RestoreOptions *ropt);
|
|
+
|
|
+static void _Clone(ArchiveHandle *AH);
|
|
+static void _DeClone(ArchiveHandle *AH);
|
|
+
|
|
+static char *_MasterStartParallelItem(ArchiveHandle *AH, TocEntry *te, T_Action act);
|
|
+static int _MasterEndParallelItem(ArchiveHandle *AH, TocEntry *te,
|
|
+ const char *str, T_Action act);
|
|
+static char *_WorkerJobRestoreDirectory(ArchiveHandle *AH, TocEntry *te);
|
|
+static char *_WorkerJobDumpDirectory(ArchiveHandle *AH, TocEntry *te);
|
|
+
|
|
+static void setFilePath(ArchiveHandle *AH, char *buf,
|
|
+ const char *relativeFilename);
|
|
+
|
|
+/*
|
|
+ * Init routine required by ALL formats. This is a global routine
|
|
+ * and should be declared in pg_backup_archiver.h
|
|
+ *
|
|
+ * Its task is to create any extra archive context (using AH->formatData),
|
|
+ * and to initialize the supported function pointers.
|
|
+ *
|
|
+ * It should also prepare whatever its input source is for reading/writing,
|
|
+ * and in the case of a read mode connection, it should load the Header & TOC.
|
|
+ */
|
|
+void
|
|
+InitArchiveFmt_Directory(ArchiveHandle *AH)
|
|
+{
|
|
+ lclContext *ctx;
|
|
+
|
|
+ /* Assuming static functions, this can be copied for each format. */
|
|
+ AH->ArchiveEntryPtr = _ArchiveEntry;
|
|
+ AH->StartDataPtr = _StartData;
|
|
+ AH->WriteDataPtr = _WriteData;
|
|
+ AH->EndDataPtr = _EndData;
|
|
+ AH->WriteBytePtr = _WriteByte;
|
|
+ AH->ReadBytePtr = _ReadByte;
|
|
+ AH->WriteBufPtr = _WriteBuf;
|
|
+ AH->ReadBufPtr = _ReadBuf;
|
|
+ AH->ClosePtr = _CloseArchive;
|
|
+ AH->ReopenPtr = _ReopenArchive;
|
|
+ AH->PrintTocDataPtr = _PrintTocData;
|
|
+ AH->ReadExtraTocPtr = _ReadExtraToc;
|
|
+ AH->WriteExtraTocPtr = _WriteExtraToc;
|
|
+ AH->PrintExtraTocPtr = _PrintExtraToc;
|
|
+
|
|
+ AH->StartBlobsPtr = _StartBlobs;
|
|
+ AH->StartBlobPtr = _StartBlob;
|
|
+ AH->EndBlobPtr = _EndBlob;
|
|
+ AH->EndBlobsPtr = _EndBlobs;
|
|
+
|
|
+ AH->ClonePtr = _Clone;
|
|
+ AH->DeClonePtr = _DeClone;
|
|
+
|
|
+ AH->WorkerJobRestorePtr = _WorkerJobRestoreDirectory;
|
|
+ AH->WorkerJobDumpPtr = _WorkerJobDumpDirectory;
|
|
+
|
|
+ AH->MasterStartParallelItemPtr = _MasterStartParallelItem;
|
|
+ AH->MasterEndParallelItemPtr = _MasterEndParallelItem;
|
|
+
|
|
+ /* Set up our private context */
|
|
+ ctx = (lclContext *) pg_malloc0(sizeof(lclContext));
|
|
+ AH->formatData = (void *) ctx;
|
|
+
|
|
+ ctx->dataFH = NULL;
|
|
+ ctx->blobsTocFH = NULL;
|
|
+
|
|
+ /* Initialize LO buffering */
|
|
+ AH->lo_buf_size = LOBBUFSIZE;
|
|
+ AH->lo_buf = (void *) pg_malloc(LOBBUFSIZE);
|
|
+
|
|
+ /*
|
|
+ * Now open the TOC file
|
|
+ */
|
|
+
|
|
+ if (!AH->fSpec || strcmp(AH->fSpec, "") == 0)
|
|
+ exit_horribly(modulename, "no output directory specified\n");
|
|
+
|
|
+ ctx->directory = AH->fSpec;
|
|
+
|
|
+ if (AH->mode == archModeWrite)
|
|
+ {
|
|
+ struct stat st;
|
|
+ bool is_empty = false;
|
|
+
|
|
+ /* we accept an empty existing directory */
|
|
+ if (stat(ctx->directory, &st) == 0 && S_ISDIR(st.st_mode))
|
|
+ {
|
|
+ DIR *dir = opendir(ctx->directory);
|
|
+
|
|
+ if (dir)
|
|
+ {
|
|
+ struct dirent *d;
|
|
+
|
|
+ is_empty = true;
|
|
+ while (errno = 0, (d = readdir(dir)))
|
|
+ {
|
|
+ if (strcmp(d->d_name, ".") != 0 && strcmp(d->d_name, "..") != 0)
|
|
+ {
|
|
+ is_empty = false;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (errno)
|
|
+ exit_horribly(modulename, "could not read directory \"%s\": %s\n",
|
|
+ ctx->directory, strerror(errno));
|
|
+
|
|
+ if (closedir(dir))
|
|
+ exit_horribly(modulename, "could not close directory \"%s\": %s\n",
|
|
+ ctx->directory, strerror(errno));
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (!is_empty && mkdir(ctx->directory, 0700) < 0)
|
|
+ exit_horribly(modulename, "could not create directory \"%s\": %s\n",
|
|
+ ctx->directory, strerror(errno));
|
|
+ }
|
|
+ else
|
|
+ { /* Read Mode */
|
|
+ char fname[MAXPGPATH];
|
|
+ cfp *tocFH;
|
|
+
|
|
+ setFilePath(AH, fname, "toc.dat");
|
|
+
|
|
+ tocFH = cfopen_read(fname, PG_BINARY_R);
|
|
+ if (tocFH == NULL)
|
|
+ exit_horribly(modulename,
|
|
+ "could not open input file \"%s\": %s\n",
|
|
+ fname, strerror(errno));
|
|
+
|
|
+ ctx->dataFH = tocFH;
|
|
+
|
|
+ /*
|
|
+ * The TOC of a directory format dump shares the format code of the
|
|
+ * tar format.
|
|
+ */
|
|
+ AH->format = archTar;
|
|
+ ReadHead(AH);
|
|
+ AH->format = archDirectory;
|
|
+ ReadToc(AH);
|
|
+
|
|
+ /* Nothing else in the file, so close it again... */
|
|
+ if (cfclose(tocFH) != 0)
|
|
+ exit_horribly(modulename, "could not close TOC file: %s\n",
|
|
+ strerror(errno));
|
|
+ ctx->dataFH = NULL;
|
|
+ }
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Called by the Archiver when the dumper creates a new TOC entry.
|
|
+ *
|
|
+ * We determine the filename for this entry.
|
|
+*/
|
|
+static void
|
|
+_ArchiveEntry(ArchiveHandle *AH, TocEntry *te)
|
|
+{
|
|
+ lclTocEntry *tctx;
|
|
+ char fn[MAXPGPATH];
|
|
+
|
|
+ tctx = (lclTocEntry *) pg_malloc0(sizeof(lclTocEntry));
|
|
+ if (te->dataDumper)
|
|
+ {
|
|
+ snprintf(fn, MAXPGPATH, "%d.dat", te->dumpId);
|
|
+ tctx->filename = pg_strdup(fn);
|
|
+ }
|
|
+ else if (strcmp(te->desc, "BLOBS") == 0)
|
|
+ tctx->filename = pg_strdup("blobs.toc");
|
|
+ else
|
|
+ tctx->filename = NULL;
|
|
+
|
|
+ te->formatData = (void *) tctx;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Called by the Archiver to save any extra format-related TOC entry
|
|
+ * data.
|
|
+ *
|
|
+ * Use the Archiver routines to write data - they are non-endian, and
|
|
+ * maintain other important file information.
|
|
+ */
|
|
+static void
|
|
+_WriteExtraToc(ArchiveHandle *AH, TocEntry *te)
|
|
+{
|
|
+ lclTocEntry *tctx = (lclTocEntry *) te->formatData;
|
|
+
|
|
+ /*
|
|
+ * A dumpable object has set tctx->filename, any other object has not.
|
|
+ * (see _ArchiveEntry).
|
|
+ */
|
|
+ if (tctx->filename)
|
|
+ WriteStr(AH, tctx->filename);
|
|
+ else
|
|
+ WriteStr(AH, "");
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Called by the Archiver to read any extra format-related TOC data.
|
|
+ *
|
|
+ * Needs to match the order defined in _WriteExtraToc, and should also
|
|
+ * use the Archiver input routines.
|
|
+ */
|
|
+static void
|
|
+_ReadExtraToc(ArchiveHandle *AH, TocEntry *te)
|
|
+{
|
|
+ lclTocEntry *tctx = (lclTocEntry *) te->formatData;
|
|
+
|
|
+ if (tctx == NULL)
|
|
+ {
|
|
+ tctx = (lclTocEntry *) pg_malloc0(sizeof(lclTocEntry));
|
|
+ te->formatData = (void *) tctx;
|
|
+ }
|
|
+
|
|
+ tctx->filename = ReadStr(AH);
|
|
+ if (strlen(tctx->filename) == 0)
|
|
+ {
|
|
+ free(tctx->filename);
|
|
+ tctx->filename = NULL;
|
|
+ }
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Called by the Archiver when restoring an archive to output a comment
|
|
+ * that includes useful information about the TOC entry.
|
|
+ */
|
|
+static void
|
|
+_PrintExtraToc(ArchiveHandle *AH, TocEntry *te)
|
|
+{
|
|
+ lclTocEntry *tctx = (lclTocEntry *) te->formatData;
|
|
+
|
|
+ if (AH->public.verbose && tctx->filename)
|
|
+ ahprintf(AH, "-- File: %s\n", tctx->filename);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Called by the archiver when saving TABLE DATA (not schema). This routine
|
|
+ * should save whatever format-specific information is needed to read
|
|
+ * the archive back.
|
|
+ *
|
|
+ * It is called just prior to the dumper's 'DataDumper' routine being called.
|
|
+ *
|
|
+ * We create the data file for writing.
|
|
+ */
|
|
+static void
|
|
+_StartData(ArchiveHandle *AH, TocEntry *te)
|
|
+{
|
|
+ lclTocEntry *tctx = (lclTocEntry *) te->formatData;
|
|
+ lclContext *ctx = (lclContext *) AH->formatData;
|
|
+ char fname[MAXPGPATH];
|
|
+
|
|
+ setFilePath(AH, fname, tctx->filename);
|
|
+
|
|
+ ctx->dataFH = cfopen_write(fname, PG_BINARY_W, AH->compression);
|
|
+ if (ctx->dataFH == NULL)
|
|
+ exit_horribly(modulename, "could not open output file \"%s\": %s\n",
|
|
+ fname, strerror(errno));
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Called by archiver when dumper calls WriteData. This routine is
|
|
+ * called for both BLOB and TABLE data; it is the responsibility of
|
|
+ * the format to manage each kind of data using StartBlob/StartData.
|
|
+ *
|
|
+ * It should only be called from within a DataDumper routine.
|
|
+ *
|
|
+ * We write the data to the open data file.
|
|
+ */
|
|
+static void
|
|
+_WriteData(ArchiveHandle *AH, const void *data, size_t dLen)
|
|
+{
|
|
+ lclContext *ctx = (lclContext *) AH->formatData;
|
|
+
|
|
+ /* Are we aborting? */
|
|
+ checkAborting(AH);
|
|
+
|
|
+ if (dLen > 0 && cfwrite(data, dLen, ctx->dataFH) != dLen)
|
|
+ WRITE_ERROR_EXIT;
|
|
+
|
|
+ return;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Called by the archiver when a dumper's 'DataDumper' routine has
|
|
+ * finished.
|
|
+ *
|
|
+ * We close the data file.
|
|
+ */
|
|
+static void
|
|
+_EndData(ArchiveHandle *AH, TocEntry *te)
|
|
+{
|
|
+ lclContext *ctx = (lclContext *) AH->formatData;
|
|
+
|
|
+ /* Close the file */
|
|
+ cfclose(ctx->dataFH);
|
|
+
|
|
+ ctx->dataFH = NULL;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Print data for a given file (can be a BLOB as well)
|
|
+ */
|
|
+static void
|
|
+_PrintFileData(ArchiveHandle *AH, char *filename, RestoreOptions *ropt)
|
|
+{
|
|
+ size_t cnt;
|
|
+ char *buf;
|
|
+ size_t buflen;
|
|
+ cfp *cfp;
|
|
+
|
|
+ if (!filename)
|
|
+ return;
|
|
+
|
|
+ cfp = cfopen_read(filename, PG_BINARY_R);
|
|
+
|
|
+ if (!cfp)
|
|
+ exit_horribly(modulename, "could not open input file \"%s\": %s\n",
|
|
+ filename, strerror(errno));
|
|
+
|
|
+ buf = pg_malloc(ZLIB_OUT_SIZE);
|
|
+ buflen = ZLIB_OUT_SIZE;
|
|
+
|
|
+ while ((cnt = cfread(buf, buflen, cfp)))
|
|
+ ahwrite(buf, 1, cnt, AH);
|
|
+
|
|
+ free(buf);
|
|
+ if (cfclose(cfp) !=0)
|
|
+ exit_horribly(modulename, "could not close data file: %s\n",
|
|
+ strerror(errno));
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Print data for a given TOC entry
|
|
+*/
|
|
+static void
|
|
+_PrintTocData(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt)
|
|
+{
|
|
+ lclTocEntry *tctx = (lclTocEntry *) te->formatData;
|
|
+
|
|
+ if (!tctx->filename)
|
|
+ return;
|
|
+
|
|
+ if (strcmp(te->desc, "BLOBS") == 0)
|
|
+ _LoadBlobs(AH, ropt);
|
|
+ else
|
|
+ {
|
|
+ char fname[MAXPGPATH];
|
|
+
|
|
+ setFilePath(AH, fname, tctx->filename);
|
|
+ _PrintFileData(AH, fname, ropt);
|
|
+ }
|
|
+}
|
|
+
|
|
+static void
|
|
+_LoadBlobs(ArchiveHandle *AH, RestoreOptions *ropt)
|
|
+{
|
|
+ Oid oid;
|
|
+ lclContext *ctx = (lclContext *) AH->formatData;
|
|
+ char fname[MAXPGPATH];
|
|
+ char line[MAXPGPATH];
|
|
+
|
|
+ StartRestoreBlobs(AH);
|
|
+
|
|
+ setFilePath(AH, fname, "blobs.toc");
|
|
+
|
|
+ ctx->blobsTocFH = cfopen_read(fname, PG_BINARY_R);
|
|
+
|
|
+ if (ctx->blobsTocFH == NULL)
|
|
+ exit_horribly(modulename, "could not open large object TOC file \"%s\" for input: %s\n",
|
|
+ fname, strerror(errno));
|
|
+
|
|
+ /* Read the blobs TOC file line-by-line, and process each blob */
|
|
+ while ((cfgets(ctx->blobsTocFH, line, MAXPGPATH)) != NULL)
|
|
+ {
|
|
+ char fname[MAXPGPATH];
|
|
+ char path[MAXPGPATH];
|
|
+
|
|
+ /* Can't overflow because line and fname are the same length. */
|
|
+ if (sscanf(line, "%u %s\n", &oid, fname) != 2)
|
|
+ exit_horribly(modulename, "invalid line in large object TOC file \"%s\": \"%s\"\n",
|
|
+ fname, line);
|
|
+
|
|
+ StartRestoreBlob(AH, oid, ropt->dropSchema);
|
|
+ snprintf(path, MAXPGPATH, "%s/%s", ctx->directory, fname);
|
|
+ _PrintFileData(AH, path, ropt);
|
|
+ EndRestoreBlob(AH, oid);
|
|
+ }
|
|
+ if (!cfeof(ctx->blobsTocFH))
|
|
+ exit_horribly(modulename, "error reading large object TOC file \"%s\"\n",
|
|
+ fname);
|
|
+
|
|
+ if (cfclose(ctx->blobsTocFH) != 0)
|
|
+ exit_horribly(modulename, "could not close large object TOC file \"%s\": %s\n",
|
|
+ fname, strerror(errno));
|
|
+
|
|
+ ctx->blobsTocFH = NULL;
|
|
+
|
|
+ EndRestoreBlobs(AH);
|
|
+}
|
|
+
|
|
+
|
|
+/*
|
|
+ * Write a byte of data to the archive.
|
|
+ * Called by the archiver to do integer & byte output to the archive.
|
|
+ * These routines are only used to read & write the headers & TOC.
|
|
+ */
|
|
+static int
|
|
+_WriteByte(ArchiveHandle *AH, const int i)
|
|
+{
|
|
+ unsigned char c = (unsigned char) i;
|
|
+ lclContext *ctx = (lclContext *) AH->formatData;
|
|
+
|
|
+ if (cfwrite(&c, 1, ctx->dataFH) != 1)
|
|
+ WRITE_ERROR_EXIT;
|
|
+
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Read a byte of data from the archive.
|
|
+ * Called by the archiver to read bytes & integers from the archive.
|
|
+ * These routines are only used to read & write headers & TOC.
|
|
+ * EOF should be treated as a fatal error.
|
|
+ */
|
|
+static int
|
|
+_ReadByte(ArchiveHandle *AH)
|
|
+{
|
|
+ lclContext *ctx = (lclContext *) AH->formatData;
|
|
+
|
|
+ return cfgetc(ctx->dataFH);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Write a buffer of data to the archive.
|
|
+ * Called by the archiver to write a block of bytes to the TOC or a data file.
|
|
+ */
|
|
+static void
|
|
+_WriteBuf(ArchiveHandle *AH, const void *buf, size_t len)
|
|
+{
|
|
+ lclContext *ctx = (lclContext *) AH->formatData;
|
|
+
|
|
+ /* Are we aborting? */
|
|
+ checkAborting(AH);
|
|
+
|
|
+ if (cfwrite(buf, len, ctx->dataFH) != len)
|
|
+ WRITE_ERROR_EXIT;
|
|
+
|
|
+ return;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Read a block of bytes from the archive.
|
|
+ *
|
|
+ * Called by the archiver to read a block of bytes from the archive
|
|
+ */
|
|
+static void
|
|
+_ReadBuf(ArchiveHandle *AH, void *buf, size_t len)
|
|
+{
|
|
+ lclContext *ctx = (lclContext *) AH->formatData;
|
|
+
|
|
+ /*
|
|
+ * If there was an I/O error, we already exited in cfread(), so here we
|
|
+ * exit on short reads.
|
|
+ */
|
|
+ if (cfread(buf, len, ctx->dataFH) != len)
|
|
+ exit_horribly(modulename,
|
|
+ "could not read from input file: end of file\n");
|
|
+
|
|
+ return;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Close the archive.
|
|
+ *
|
|
+ * When writing the archive, this is the routine that actually starts
|
|
+ * the process of saving it to files. No data should be written prior
|
|
+ * to this point, since the user could sort the TOC after creating it.
|
|
+ *
|
|
+ * If an archive is to be written, this routine must call:
|
|
+ * WriteHead to save the archive header
|
|
+ * WriteToc to save the TOC entries
|
|
+ * WriteDataChunks to save all DATA & BLOBs.
|
|
+ */
|
|
+static void
|
|
+_CloseArchive(ArchiveHandle *AH)
|
|
+{
|
|
+ lclContext *ctx = (lclContext *) AH->formatData;
|
|
+
|
|
+ if (AH->mode == archModeWrite)
|
|
+ {
|
|
+ cfp *tocFH;
|
|
+ char fname[MAXPGPATH];
|
|
+
|
|
+ setFilePath(AH, fname, "toc.dat");
|
|
+
|
|
+ /* this will actually fork the processes for a parallel backup */
|
|
+ ctx->pstate = ParallelBackupStart(AH, NULL);
|
|
+
|
|
+ /* The TOC is always created uncompressed */
|
|
+ tocFH = cfopen_write(fname, PG_BINARY_W, 0);
|
|
+ if (tocFH == NULL)
|
|
+ exit_horribly(modulename, "could not open output file \"%s\": %s\n",
|
|
+ fname, strerror(errno));
|
|
+ ctx->dataFH = tocFH;
|
|
+
|
|
+ /*
|
|
+ * Write 'tar' in the format field of the toc.dat file. The directory
|
|
+ * is compatible with 'tar', so there's no point having a different
|
|
+ * format code for it.
|
|
+ */
|
|
+ AH->format = archTar;
|
|
+ WriteHead(AH);
|
|
+ AH->format = archDirectory;
|
|
+ WriteToc(AH);
|
|
+ if (cfclose(tocFH) != 0)
|
|
+ exit_horribly(modulename, "could not close TOC file: %s\n",
|
|
+ strerror(errno));
|
|
+ WriteDataChunks(AH, ctx->pstate);
|
|
+
|
|
+ ParallelBackupEnd(AH, ctx->pstate);
|
|
+ }
|
|
+ AH->FH = NULL;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Reopen the archive's file handle.
|
|
+ */
|
|
+static void
|
|
+_ReopenArchive(ArchiveHandle *AH)
|
|
+{
|
|
+ /*
|
|
+ * Our TOC is in memory, our data files are opened by each child anyway as
|
|
+ * they are separate. We support reopening the archive by just doing
|
|
+ * nothing.
|
|
+ */
|
|
+}
|
|
+
|
|
+/*
|
|
+ * BLOB support
|
|
+ */
|
|
+
|
|
+/*
|
|
+ * Called by the archiver when starting to save all BLOB DATA (not schema).
|
|
+ * It is called just prior to the dumper's DataDumper routine.
|
|
+ *
|
|
+ * We open the large object TOC file here, so that we can append a line to
|
|
+ * it for each blob.
|
|
+ */
|
|
+static void
|
|
+_StartBlobs(ArchiveHandle *AH, TocEntry *te)
|
|
+{
|
|
+ lclContext *ctx = (lclContext *) AH->formatData;
|
|
+ char fname[MAXPGPATH];
|
|
+
|
|
+ setFilePath(AH, fname, "blobs.toc");
|
|
+
|
|
+ /* The blob TOC file is never compressed */
|
|
+ ctx->blobsTocFH = cfopen_write(fname, "ab", 0);
|
|
+ if (ctx->blobsTocFH == NULL)
|
|
+ exit_horribly(modulename, "could not open output file \"%s\": %s\n",
|
|
+ fname, strerror(errno));
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Called by the archiver when we're about to start dumping a blob.
|
|
+ *
|
|
+ * We create a file to write the blob to.
|
|
+ */
|
|
+static void
|
|
+_StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid)
|
|
+{
|
|
+ lclContext *ctx = (lclContext *) AH->formatData;
|
|
+ char fname[MAXPGPATH];
|
|
+
|
|
+ snprintf(fname, MAXPGPATH, "%s/blob_%u.dat", ctx->directory, oid);
|
|
+
|
|
+ ctx->dataFH = cfopen_write(fname, PG_BINARY_W, AH->compression);
|
|
+
|
|
+ if (ctx->dataFH == NULL)
|
|
+ exit_horribly(modulename, "could not open output file \"%s\": %s\n",
|
|
+ fname, strerror(errno));
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Called by the archiver when the dumper is finished writing a blob.
|
|
+ *
|
|
+ * We close the blob file and write an entry to the blob TOC file for it.
|
|
+ */
|
|
+static void
|
|
+_EndBlob(ArchiveHandle *AH, TocEntry *te, Oid oid)
|
|
+{
|
|
+ lclContext *ctx = (lclContext *) AH->formatData;
|
|
+ char buf[50];
|
|
+ int len;
|
|
+
|
|
+ /* Close the BLOB data file itself */
|
|
+ cfclose(ctx->dataFH);
|
|
+ ctx->dataFH = NULL;
|
|
+
|
|
+ /* register the blob in blobs.toc */
|
|
+ len = snprintf(buf, sizeof(buf), "%u blob_%u.dat\n", oid, oid);
|
|
+ if (cfwrite(buf, len, ctx->blobsTocFH) != len)
|
|
+ exit_horribly(modulename, "could not write to blobs TOC file\n");
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Called by the archiver when finishing saving all BLOB DATA.
|
|
+ *
|
|
+ * We close the blobs TOC file.
|
|
+ */
|
|
+static void
|
|
+_EndBlobs(ArchiveHandle *AH, TocEntry *te)
|
|
+{
|
|
+ lclContext *ctx = (lclContext *) AH->formatData;
|
|
+
|
|
+ cfclose(ctx->blobsTocFH);
|
|
+ ctx->blobsTocFH = NULL;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Gets a relative file name and prepends the output directory, writing the
|
|
+ * result to buf. The caller needs to make sure that buf is MAXPGPATH bytes
|
|
+ * big. Can't use a static char[MAXPGPATH] inside the function because we run
|
|
+ * multithreaded on Windows.
|
|
+ */
|
|
+static void
|
|
+setFilePath(ArchiveHandle *AH, char *buf, const char *relativeFilename)
|
|
+{
|
|
+ lclContext *ctx = (lclContext *) AH->formatData;
|
|
+ char *dname;
|
|
+
|
|
+ dname = ctx->directory;
|
|
+
|
|
+ if (strlen(dname) + 1 + strlen(relativeFilename) + 1 > MAXPGPATH)
|
|
+ exit_horribly(modulename, "file name too long: \"%s\"\n", dname);
|
|
+
|
|
+ strcpy(buf, dname);
|
|
+ strcat(buf, "/");
|
|
+ strcat(buf, relativeFilename);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Clone format-specific fields during parallel restoration.
|
|
+ */
|
|
+static void
|
|
+_Clone(ArchiveHandle *AH)
|
|
+{
|
|
+ lclContext *ctx = (lclContext *) AH->formatData;
|
|
+
|
|
+ AH->formatData = (lclContext *) pg_malloc(sizeof(lclContext));
|
|
+ memcpy(AH->formatData, ctx, sizeof(lclContext));
|
|
+ ctx = (lclContext *) AH->formatData;
|
|
+
|
|
+ /*
|
|
+ * Note: we do not make a local lo_buf because we expect at most one BLOBS
|
|
+ * entry per archive, so no parallelism is possible. Likewise,
|
|
+ * TOC-entry-local state isn't an issue because any one TOC entry is
|
|
+ * touched by just one worker child.
|
|
+ */
|
|
+
|
|
+ /*
|
|
+ * We also don't copy the ParallelState pointer (pstate), only the master
|
|
+ * process ever writes to it.
|
|
+ */
|
|
+}
|
|
+
|
|
+static void
|
|
+_DeClone(ArchiveHandle *AH)
|
|
+{
|
|
+ lclContext *ctx = (lclContext *) AH->formatData;
|
|
+
|
|
+ free(ctx);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * This function is executed in the parent process. Depending on the desired
|
|
+ * action (dump or restore) it creates a string that is understood by the
|
|
+ * _WorkerJobDump /_WorkerJobRestore functions of the dump format.
|
|
+ */
|
|
+static char *
|
|
+_MasterStartParallelItem(ArchiveHandle *AH, TocEntry *te, T_Action act)
|
|
+{
|
|
+ /*
|
|
+ * A static char is okay here, even on Windows because we call this
|
|
+ * function only from one process (the master).
|
|
+ */
|
|
+ static char buf[64];
|
|
+
|
|
+ if (act == ACT_DUMP)
|
|
+ snprintf(buf, sizeof(buf), "DUMP %d", te->dumpId);
|
|
+ else if (act == ACT_RESTORE)
|
|
+ snprintf(buf, sizeof(buf), "RESTORE %d", te->dumpId);
|
|
+
|
|
+ return buf;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * This function is executed in the child of a parallel backup for the
|
|
+ * directory archive and dumps the actual data.
|
|
+ *
|
|
+ * We are currently returning only the DumpId so theoretically we could
|
|
+ * make this function returning an int (or a DumpId). However, to
|
|
+ * facilitate further enhancements and because sooner or later we need to
|
|
+ * convert this to a string and send it via a message anyway, we stick with
|
|
+ * char *. It is parsed on the other side by the _EndMasterParallel()
|
|
+ * function of the respective dump format.
|
|
+ */
|
|
+static char *
|
|
+_WorkerJobDumpDirectory(ArchiveHandle *AH, TocEntry *te)
|
|
+{
|
|
+ /*
|
|
+ * short fixed-size string + some ID so far, this needs to be malloc'ed
|
|
+ * instead of static because we work with threads on windows
|
|
+ */
|
|
+ const int buflen = 64;
|
|
+ char *buf = (char *) pg_malloc(buflen);
|
|
+ lclTocEntry *tctx = (lclTocEntry *) te->formatData;
|
|
+
|
|
+ /* This should never happen */
|
|
+ if (!tctx)
|
|
+ exit_horribly(modulename, "error during backup\n");
|
|
+
|
|
+ /*
|
|
+ * This function returns void. We either fail and die horribly or
|
|
+ * succeed... A failure will be detected by the parent when the child dies
|
|
+ * unexpectedly.
|
|
+ */
|
|
+ WriteDataChunksForTocEntry(AH, te);
|
|
+
|
|
+ snprintf(buf, buflen, "OK DUMP %d", te->dumpId);
|
|
+
|
|
+ return buf;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * This function is executed in the child of a parallel backup for the
|
|
+ * directory archive and dumps the actual data.
|
|
+ */
|
|
+static char *
|
|
+_WorkerJobRestoreDirectory(ArchiveHandle *AH, TocEntry *te)
|
|
+{
|
|
+ /*
|
|
+ * short fixed-size string + some ID so far, this needs to be malloc'ed
|
|
+ * instead of static because we work with threads on windows
|
|
+ */
|
|
+ const int buflen = 64;
|
|
+ char *buf = (char *) pg_malloc(buflen);
|
|
+ ParallelArgs pargs;
|
|
+ int status;
|
|
+
|
|
+ pargs.AH = AH;
|
|
+ pargs.te = te;
|
|
+
|
|
+ status = parallel_restore(&pargs);
|
|
+
|
|
+ snprintf(buf, buflen, "OK RESTORE %d %d %d", te->dumpId, status,
|
|
+ status == WORKER_IGNORED_ERRORS ? AH->public.n_errors : 0);
|
|
+
|
|
+ return buf;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * This function is executed in the parent process. It analyzes the response of
|
|
+ * the _WorkerJobDumpDirectory/_WorkerJobRestoreDirectory functions of the
|
|
+ * respective dump format.
|
|
+ */
|
|
+static int
|
|
+_MasterEndParallelItem(ArchiveHandle *AH, TocEntry *te, const char *str, T_Action act)
|
|
+{
|
|
+ DumpId dumpId;
|
|
+ int nBytes,
|
|
+ n_errors;
|
|
+ int status = 0;
|
|
+
|
|
+ if (act == ACT_DUMP)
|
|
+ {
|
|
+ sscanf(str, "%u%n", &dumpId, &nBytes);
|
|
+
|
|
+ Assert(dumpId == te->dumpId);
|
|
+ Assert(nBytes == strlen(str));
|
|
+ }
|
|
+ else if (act == ACT_RESTORE)
|
|
+ {
|
|
+ sscanf(str, "%u %u %u%n", &dumpId, &status, &n_errors, &nBytes);
|
|
+
|
|
+ Assert(dumpId == te->dumpId);
|
|
+ Assert(nBytes == strlen(str));
|
|
+
|
|
+ AH->public.n_errors += n_errors;
|
|
+ }
|
|
+
|
|
+ return status;
|
|
+}
|
|
--- /dev/null
|
|
+++ pglogical-2.2.2/pglogical_dump/pg_backup_null.c
|
|
@@ -0,0 +1,233 @@
|
|
+/*-------------------------------------------------------------------------
|
|
+ *
|
|
+ * pg_backup_null.c
|
|
+ *
|
|
+ * Implementation of an archive that is never saved; it is used by
|
|
+ * pg_dump to output a plain text SQL script instead of saving
|
|
+ * a real archive.
|
|
+ *
|
|
+ * See the headers to pg_restore for more details.
|
|
+ *
|
|
+ * Copyright (c) 2000, Philip Warner
|
|
+ * Rights are granted to use this software in any way so long
|
|
+ * as this notice is not removed.
|
|
+ *
|
|
+ * The author is not responsible for loss or damages that may
|
|
+ * result from it's use.
|
|
+ *
|
|
+ *
|
|
+ * IDENTIFICATION
|
|
+ * src/bin/pg_dump/pg_backup_null.c
|
|
+ *
|
|
+ *-------------------------------------------------------------------------
|
|
+ */
|
|
+
|
|
+#include "pg_backup_archiver.h"
|
|
+#include "pg_backup_utils.h"
|
|
+#include "parallel.h"
|
|
+
|
|
+#include <unistd.h> /* for dup */
|
|
+
|
|
+#include "libpq/libpq-fs.h"
|
|
+
|
|
+static void _WriteData(ArchiveHandle *AH, const void *data, size_t dLen);
|
|
+static void _WriteBlobData(ArchiveHandle *AH, const void *data, size_t dLen);
|
|
+static void _EndData(ArchiveHandle *AH, TocEntry *te);
|
|
+static int _WriteByte(ArchiveHandle *AH, const int i);
|
|
+static void _WriteBuf(ArchiveHandle *AH, const void *buf, size_t len);
|
|
+static void _CloseArchive(ArchiveHandle *AH);
|
|
+static void _PrintTocData(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt);
|
|
+static void _StartBlobs(ArchiveHandle *AH, TocEntry *te);
|
|
+static void _StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid);
|
|
+static void _EndBlob(ArchiveHandle *AH, TocEntry *te, Oid oid);
|
|
+static void _EndBlobs(ArchiveHandle *AH, TocEntry *te);
|
|
+
|
|
+
|
|
+/*
|
|
+ * Initializer
|
|
+ */
|
|
+void
|
|
+InitArchiveFmt_Null(ArchiveHandle *AH)
|
|
+{
|
|
+ /* Assuming static functions, this can be copied for each format. */
|
|
+ AH->WriteDataPtr = _WriteData;
|
|
+ AH->EndDataPtr = _EndData;
|
|
+ AH->WriteBytePtr = _WriteByte;
|
|
+ AH->WriteBufPtr = _WriteBuf;
|
|
+ AH->ClosePtr = _CloseArchive;
|
|
+ AH->ReopenPtr = NULL;
|
|
+ AH->PrintTocDataPtr = _PrintTocData;
|
|
+
|
|
+ AH->StartBlobsPtr = _StartBlobs;
|
|
+ AH->StartBlobPtr = _StartBlob;
|
|
+ AH->EndBlobPtr = _EndBlob;
|
|
+ AH->EndBlobsPtr = _EndBlobs;
|
|
+ AH->ClonePtr = NULL;
|
|
+ AH->DeClonePtr = NULL;
|
|
+
|
|
+ /* Initialize LO buffering */
|
|
+ AH->lo_buf_size = LOBBUFSIZE;
|
|
+ AH->lo_buf = (void *) pg_malloc(LOBBUFSIZE);
|
|
+
|
|
+ /*
|
|
+ * Now prevent reading...
|
|
+ */
|
|
+ if (AH->mode == archModeRead)
|
|
+ exit_horribly(NULL, "this format cannot be read\n");
|
|
+}
|
|
+
|
|
+/*
|
|
+ * - Start a new TOC entry
|
|
+ */
|
|
+
|
|
+/*
|
|
+ * Called by dumper via archiver from within a data dump routine
|
|
+ */
|
|
+static void
|
|
+_WriteData(ArchiveHandle *AH, const void *data, size_t dLen)
|
|
+{
|
|
+ /* Just send it to output, ahwrite() already errors on failure */
|
|
+ ahwrite(data, 1, dLen, AH);
|
|
+ return;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Called by dumper via archiver from within a data dump routine
|
|
+ * We substitute this for _WriteData while emitting a BLOB
|
|
+ */
|
|
+static void
|
|
+_WriteBlobData(ArchiveHandle *AH, const void *data, size_t dLen)
|
|
+{
|
|
+ if (dLen > 0)
|
|
+ {
|
|
+ PQExpBuffer buf = createPQExpBuffer();
|
|
+
|
|
+ appendByteaLiteralAHX(buf,
|
|
+ (const unsigned char *) data,
|
|
+ dLen,
|
|
+ AH);
|
|
+
|
|
+ ahprintf(AH, "SELECT pg_catalog.lowrite(0, %s);\n", buf->data);
|
|
+
|
|
+ destroyPQExpBuffer(buf);
|
|
+ }
|
|
+ return;
|
|
+}
|
|
+
|
|
+static void
|
|
+_EndData(ArchiveHandle *AH, TocEntry *te)
|
|
+{
|
|
+ ahprintf(AH, "\n\n");
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Called by the archiver when starting to save all BLOB DATA (not schema).
|
|
+ * This routine should save whatever format-specific information is needed
|
|
+ * to read the BLOBs back into memory.
|
|
+ *
|
|
+ * It is called just prior to the dumper's DataDumper routine.
|
|
+ *
|
|
+ * Optional, but strongly recommended.
|
|
+ */
|
|
+static void
|
|
+_StartBlobs(ArchiveHandle *AH, TocEntry *te)
|
|
+{
|
|
+ ahprintf(AH, "BEGIN;\n\n");
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Called by the archiver when the dumper calls StartBlob.
|
|
+ *
|
|
+ * Mandatory.
|
|
+ *
|
|
+ * Must save the passed OID for retrieval at restore-time.
|
|
+ */
|
|
+static void
|
|
+_StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid)
|
|
+{
|
|
+ bool old_blob_style = (AH->version < K_VERS_1_12);
|
|
+
|
|
+ if (oid == 0)
|
|
+ exit_horribly(NULL, "invalid OID for large object\n");
|
|
+
|
|
+ /* With an old archive we must do drop and create logic here */
|
|
+ if (old_blob_style && AH->ropt->dropSchema)
|
|
+ DropBlobIfExists(AH, oid);
|
|
+
|
|
+ if (old_blob_style)
|
|
+ ahprintf(AH, "SELECT pg_catalog.lo_open(pg_catalog.lo_create('%u'), %d);\n",
|
|
+ oid, INV_WRITE);
|
|
+ else
|
|
+ ahprintf(AH, "SELECT pg_catalog.lo_open('%u', %d);\n",
|
|
+ oid, INV_WRITE);
|
|
+
|
|
+ AH->WriteDataPtr = _WriteBlobData;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Called by the archiver when the dumper calls EndBlob.
|
|
+ *
|
|
+ * Optional.
|
|
+ */
|
|
+static void
|
|
+_EndBlob(ArchiveHandle *AH, TocEntry *te, Oid oid)
|
|
+{
|
|
+ AH->WriteDataPtr = _WriteData;
|
|
+
|
|
+ ahprintf(AH, "SELECT pg_catalog.lo_close(0);\n\n");
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Called by the archiver when finishing saving all BLOB DATA.
|
|
+ *
|
|
+ * Optional.
|
|
+ */
|
|
+static void
|
|
+_EndBlobs(ArchiveHandle *AH, TocEntry *te)
|
|
+{
|
|
+ ahprintf(AH, "COMMIT;\n\n");
|
|
+}
|
|
+
|
|
+/*------
|
|
+ * Called as part of a RestoreArchive call; for the NULL archive, this
|
|
+ * just sends the data for a given TOC entry to the output.
|
|
+ *------
|
|
+ */
|
|
+static void
|
|
+_PrintTocData(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt)
|
|
+{
|
|
+ if (te->dataDumper)
|
|
+ {
|
|
+ AH->currToc = te;
|
|
+
|
|
+ if (strcmp(te->desc, "BLOBS") == 0)
|
|
+ _StartBlobs(AH, te);
|
|
+
|
|
+ (*te->dataDumper) ((Archive *) AH, te->dataDumperArg);
|
|
+
|
|
+ if (strcmp(te->desc, "BLOBS") == 0)
|
|
+ _EndBlobs(AH, te);
|
|
+
|
|
+ AH->currToc = NULL;
|
|
+ }
|
|
+}
|
|
+
|
|
+static int
|
|
+_WriteByte(ArchiveHandle *AH, const int i)
|
|
+{
|
|
+ /* Don't do anything */
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void
|
|
+_WriteBuf(ArchiveHandle *AH, const void *buf, size_t len)
|
|
+{
|
|
+ /* Don't do anything */
|
|
+ return;
|
|
+}
|
|
+
|
|
+static void
|
|
+_CloseArchive(ArchiveHandle *AH)
|
|
+{
|
|
+ /* Nothing to do */
|
|
+}
|
|
--- /dev/null
|
|
+++ pglogical-2.2.2/pglogical_dump/pg_backup_tar.c
|
|
@@ -0,0 +1,1306 @@
|
|
+/*-------------------------------------------------------------------------
|
|
+ *
|
|
+ * pg_backup_tar.c
|
|
+ *
|
|
+ * This file is copied from the 'files' format file, but dumps data into
|
|
+ * one temp file then sends it to the output TAR archive.
|
|
+ *
|
|
+ * The tar format also includes a 'restore.sql' script which is there for
|
|
+ * the benefit of humans. This script is never used by pg_restore.
|
|
+ *
|
|
+ * NOTE: If you untar the created 'tar' file, the resulting files are
|
|
+ * compatible with the 'directory' format. Please keep the two formats in
|
|
+ * sync.
|
|
+ *
|
|
+ * See the headers to pg_backup_directory & pg_restore for more details.
|
|
+ *
|
|
+ * Copyright (c) 2000, Philip Warner
|
|
+ * Rights are granted to use this software in any way so long
|
|
+ * as this notice is not removed.
|
|
+ *
|
|
+ * The author is not responsible for loss or damages that may
|
|
+ * result from it's use.
|
|
+ *
|
|
+ *
|
|
+ * IDENTIFICATION
|
|
+ * src/bin/pg_dump/pg_backup_tar.c
|
|
+ *
|
|
+ *-------------------------------------------------------------------------
|
|
+ */
|
|
+
|
|
+#include "pg_backup.h"
|
|
+#include "pg_backup_archiver.h"
|
|
+#include "pg_backup_tar.h"
|
|
+#include "pg_backup_utils.h"
|
|
+#include "parallel.h"
|
|
+#include "pgtar.h"
|
|
+
|
|
+#include <sys/stat.h>
|
|
+#include <ctype.h>
|
|
+#include <limits.h>
|
|
+#include <unistd.h>
|
|
+
|
|
+static void _ArchiveEntry(ArchiveHandle *AH, TocEntry *te);
|
|
+static void _StartData(ArchiveHandle *AH, TocEntry *te);
|
|
+static void _WriteData(ArchiveHandle *AH, const void *data, size_t dLen);
|
|
+static void _EndData(ArchiveHandle *AH, TocEntry *te);
|
|
+static int _WriteByte(ArchiveHandle *AH, const int i);
|
|
+static int _ReadByte(ArchiveHandle *);
|
|
+static void _WriteBuf(ArchiveHandle *AH, const void *buf, size_t len);
|
|
+static void _ReadBuf(ArchiveHandle *AH, void *buf, size_t len);
|
|
+static void _CloseArchive(ArchiveHandle *AH);
|
|
+static void _PrintTocData(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt);
|
|
+static void _WriteExtraToc(ArchiveHandle *AH, TocEntry *te);
|
|
+static void _ReadExtraToc(ArchiveHandle *AH, TocEntry *te);
|
|
+static void _PrintExtraToc(ArchiveHandle *AH, TocEntry *te);
|
|
+
|
|
+static void _StartBlobs(ArchiveHandle *AH, TocEntry *te);
|
|
+static void _StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid);
|
|
+static void _EndBlob(ArchiveHandle *AH, TocEntry *te, Oid oid);
|
|
+static void _EndBlobs(ArchiveHandle *AH, TocEntry *te);
|
|
+
|
|
+#define K_STD_BUF_SIZE 1024
|
|
+
|
|
+
|
|
+typedef struct
|
|
+{
|
|
+#ifdef HAVE_LIBZ
|
|
+ gzFile zFH;
|
|
+#else
|
|
+ FILE *zFH;
|
|
+#endif
|
|
+ FILE *nFH;
|
|
+ FILE *tarFH;
|
|
+ FILE *tmpFH;
|
|
+ char *targetFile;
|
|
+ char mode;
|
|
+ pgoff_t pos;
|
|
+ pgoff_t fileLen;
|
|
+ ArchiveHandle *AH;
|
|
+} TAR_MEMBER;
|
|
+
|
|
+typedef struct
|
|
+{
|
|
+ int hasSeek;
|
|
+ pgoff_t filePos;
|
|
+ TAR_MEMBER *blobToc;
|
|
+ FILE *tarFH;
|
|
+ pgoff_t tarFHpos;
|
|
+ pgoff_t tarNextMember;
|
|
+ TAR_MEMBER *FH;
|
|
+ int isSpecialScript;
|
|
+ TAR_MEMBER *scriptTH;
|
|
+} lclContext;
|
|
+
|
|
+typedef struct
|
|
+{
|
|
+ TAR_MEMBER *TH;
|
|
+ char *filename;
|
|
+} lclTocEntry;
|
|
+
|
|
+/* translator: this is a module name */
|
|
+static const char *modulename = gettext_noop("tar archiver");
|
|
+
|
|
+static void _LoadBlobs(ArchiveHandle *AH, RestoreOptions *ropt);
|
|
+
|
|
+static TAR_MEMBER *tarOpen(ArchiveHandle *AH, const char *filename, char mode);
|
|
+static void tarClose(ArchiveHandle *AH, TAR_MEMBER *TH);
|
|
+
|
|
+#ifdef __NOT_USED__
|
|
+static char *tarGets(char *buf, size_t len, TAR_MEMBER *th);
|
|
+#endif
|
|
+static int tarPrintf(ArchiveHandle *AH, TAR_MEMBER *th, const char *fmt,...) __attribute__((format(PG_PRINTF_ATTRIBUTE, 3, 4)));
|
|
+
|
|
+static void _tarAddFile(ArchiveHandle *AH, TAR_MEMBER *th);
|
|
+static TAR_MEMBER *_tarPositionTo(ArchiveHandle *AH, const char *filename);
|
|
+static size_t tarRead(void *buf, size_t len, TAR_MEMBER *th);
|
|
+static size_t tarWrite(const void *buf, size_t len, TAR_MEMBER *th);
|
|
+static void _tarWriteHeader(TAR_MEMBER *th);
|
|
+static int _tarGetHeader(ArchiveHandle *AH, TAR_MEMBER *th);
|
|
+static size_t _tarReadRaw(ArchiveHandle *AH, void *buf, size_t len, TAR_MEMBER *th, FILE *fh);
|
|
+
|
|
+static size_t _scriptOut(ArchiveHandle *AH, const void *buf, size_t len);
|
|
+
|
|
+/*
|
|
+ * Initializer
|
|
+ */
|
|
+void
|
|
+InitArchiveFmt_Tar(ArchiveHandle *AH)
|
|
+{
|
|
+ lclContext *ctx;
|
|
+
|
|
+ /* Assuming static functions, this can be copied for each format. */
|
|
+ AH->ArchiveEntryPtr = _ArchiveEntry;
|
|
+ AH->StartDataPtr = _StartData;
|
|
+ AH->WriteDataPtr = _WriteData;
|
|
+ AH->EndDataPtr = _EndData;
|
|
+ AH->WriteBytePtr = _WriteByte;
|
|
+ AH->ReadBytePtr = _ReadByte;
|
|
+ AH->WriteBufPtr = _WriteBuf;
|
|
+ AH->ReadBufPtr = _ReadBuf;
|
|
+ AH->ClosePtr = _CloseArchive;
|
|
+ AH->ReopenPtr = NULL;
|
|
+ AH->PrintTocDataPtr = _PrintTocData;
|
|
+ AH->ReadExtraTocPtr = _ReadExtraToc;
|
|
+ AH->WriteExtraTocPtr = _WriteExtraToc;
|
|
+ AH->PrintExtraTocPtr = _PrintExtraToc;
|
|
+
|
|
+ AH->StartBlobsPtr = _StartBlobs;
|
|
+ AH->StartBlobPtr = _StartBlob;
|
|
+ AH->EndBlobPtr = _EndBlob;
|
|
+ AH->EndBlobsPtr = _EndBlobs;
|
|
+ AH->ClonePtr = NULL;
|
|
+ AH->DeClonePtr = NULL;
|
|
+
|
|
+ AH->MasterStartParallelItemPtr = NULL;
|
|
+ AH->MasterEndParallelItemPtr = NULL;
|
|
+
|
|
+ AH->WorkerJobDumpPtr = NULL;
|
|
+ AH->WorkerJobRestorePtr = NULL;
|
|
+
|
|
+ /*
|
|
+ * Set up some special context used in compressing data.
|
|
+ */
|
|
+ ctx = (lclContext *) pg_malloc0(sizeof(lclContext));
|
|
+ AH->formatData = (void *) ctx;
|
|
+ ctx->filePos = 0;
|
|
+ ctx->isSpecialScript = 0;
|
|
+
|
|
+ /* Initialize LO buffering */
|
|
+ AH->lo_buf_size = LOBBUFSIZE;
|
|
+ AH->lo_buf = (void *) pg_malloc(LOBBUFSIZE);
|
|
+
|
|
+ /*
|
|
+ * Now open the tar file, and load the TOC if we're in read mode.
|
|
+ */
|
|
+ if (AH->mode == archModeWrite)
|
|
+ {
|
|
+ if (AH->fSpec && strcmp(AH->fSpec, "") != 0)
|
|
+ {
|
|
+ ctx->tarFH = fopen(AH->fSpec, PG_BINARY_W);
|
|
+ if (ctx->tarFH == NULL)
|
|
+ exit_horribly(modulename,
|
|
+ "could not open TOC file \"%s\" for output: %s\n",
|
|
+ AH->fSpec, strerror(errno));
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ ctx->tarFH = stdout;
|
|
+ if (ctx->tarFH == NULL)
|
|
+ exit_horribly(modulename,
|
|
+ "could not open TOC file for output: %s\n",
|
|
+ strerror(errno));
|
|
+ }
|
|
+
|
|
+ ctx->tarFHpos = 0;
|
|
+
|
|
+ /*
|
|
+ * Make unbuffered since we will dup() it, and the buffers screw each
|
|
+ * other
|
|
+ */
|
|
+ /* setvbuf(ctx->tarFH, NULL, _IONBF, 0); */
|
|
+
|
|
+ ctx->hasSeek = checkSeek(ctx->tarFH);
|
|
+
|
|
+ /*
|
|
+ * We don't support compression because reading the files back is not
|
|
+ * possible since gzdopen uses buffered IO which totally screws file
|
|
+ * positioning.
|
|
+ */
|
|
+ if (AH->compression != 0)
|
|
+ exit_horribly(modulename,
|
|
+ "compression is not supported by tar archive format\n");
|
|
+ }
|
|
+ else
|
|
+ { /* Read Mode */
|
|
+ if (AH->fSpec && strcmp(AH->fSpec, "") != 0)
|
|
+ {
|
|
+ ctx->tarFH = fopen(AH->fSpec, PG_BINARY_R);
|
|
+ if (ctx->tarFH == NULL)
|
|
+ exit_horribly(modulename, "could not open TOC file \"%s\" for input: %s\n",
|
|
+ AH->fSpec, strerror(errno));
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ ctx->tarFH = stdin;
|
|
+ if (ctx->tarFH == NULL)
|
|
+ exit_horribly(modulename, "could not open TOC file for input: %s\n",
|
|
+ strerror(errno));
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Make unbuffered since we will dup() it, and the buffers screw each
|
|
+ * other
|
|
+ */
|
|
+ /* setvbuf(ctx->tarFH, NULL, _IONBF, 0); */
|
|
+
|
|
+ ctx->tarFHpos = 0;
|
|
+
|
|
+ ctx->hasSeek = checkSeek(ctx->tarFH);
|
|
+
|
|
+ /*
|
|
+ * Forcibly unmark the header as read since we use the lookahead
|
|
+ * buffer
|
|
+ */
|
|
+ AH->readHeader = 0;
|
|
+
|
|
+ ctx->FH = (void *) tarOpen(AH, "toc.dat", 'r');
|
|
+ ReadHead(AH);
|
|
+ ReadToc(AH);
|
|
+ tarClose(AH, ctx->FH); /* Nothing else in the file... */
|
|
+ }
|
|
+}
|
|
+
|
|
+/*
|
|
+ * - Start a new TOC entry
|
|
+ * Setup the output file name.
|
|
+ */
|
|
+static void
|
|
+_ArchiveEntry(ArchiveHandle *AH, TocEntry *te)
|
|
+{
|
|
+ lclTocEntry *ctx;
|
|
+ char fn[K_STD_BUF_SIZE];
|
|
+
|
|
+ ctx = (lclTocEntry *) pg_malloc0(sizeof(lclTocEntry));
|
|
+ if (te->dataDumper != NULL)
|
|
+ {
|
|
+#ifdef HAVE_LIBZ
|
|
+ if (AH->compression == 0)
|
|
+ sprintf(fn, "%d.dat", te->dumpId);
|
|
+ else
|
|
+ sprintf(fn, "%d.dat.gz", te->dumpId);
|
|
+#else
|
|
+ sprintf(fn, "%d.dat", te->dumpId);
|
|
+#endif
|
|
+ ctx->filename = pg_strdup(fn);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ ctx->filename = NULL;
|
|
+ ctx->TH = NULL;
|
|
+ }
|
|
+ te->formatData = (void *) ctx;
|
|
+}
|
|
+
|
|
+static void
|
|
+_WriteExtraToc(ArchiveHandle *AH, TocEntry *te)
|
|
+{
|
|
+ lclTocEntry *ctx = (lclTocEntry *) te->formatData;
|
|
+
|
|
+ if (ctx->filename)
|
|
+ WriteStr(AH, ctx->filename);
|
|
+ else
|
|
+ WriteStr(AH, "");
|
|
+}
|
|
+
|
|
+static void
|
|
+_ReadExtraToc(ArchiveHandle *AH, TocEntry *te)
|
|
+{
|
|
+ lclTocEntry *ctx = (lclTocEntry *) te->formatData;
|
|
+
|
|
+ if (ctx == NULL)
|
|
+ {
|
|
+ ctx = (lclTocEntry *) pg_malloc0(sizeof(lclTocEntry));
|
|
+ te->formatData = (void *) ctx;
|
|
+ }
|
|
+
|
|
+ ctx->filename = ReadStr(AH);
|
|
+ if (strlen(ctx->filename) == 0)
|
|
+ {
|
|
+ free(ctx->filename);
|
|
+ ctx->filename = NULL;
|
|
+ }
|
|
+ ctx->TH = NULL;
|
|
+}
|
|
+
|
|
+static void
|
|
+_PrintExtraToc(ArchiveHandle *AH, TocEntry *te)
|
|
+{
|
|
+ lclTocEntry *ctx = (lclTocEntry *) te->formatData;
|
|
+
|
|
+ if (AH->public.verbose && ctx->filename != NULL)
|
|
+ ahprintf(AH, "-- File: %s\n", ctx->filename);
|
|
+}
|
|
+
|
|
+static void
|
|
+_StartData(ArchiveHandle *AH, TocEntry *te)
|
|
+{
|
|
+ lclTocEntry *tctx = (lclTocEntry *) te->formatData;
|
|
+
|
|
+ tctx->TH = tarOpen(AH, tctx->filename, 'w');
|
|
+}
|
|
+
|
|
+static TAR_MEMBER *
|
|
+tarOpen(ArchiveHandle *AH, const char *filename, char mode)
|
|
+{
|
|
+ lclContext *ctx = (lclContext *) AH->formatData;
|
|
+ TAR_MEMBER *tm;
|
|
+
|
|
+#ifdef HAVE_LIBZ
|
|
+ char fmode[10];
|
|
+#endif
|
|
+
|
|
+ if (mode == 'r')
|
|
+ {
|
|
+ tm = _tarPositionTo(AH, filename);
|
|
+ if (!tm) /* Not found */
|
|
+ {
|
|
+ if (filename)
|
|
+ {
|
|
+ /*
|
|
+ * Couldn't find the requested file. Future: do SEEK(0) and
|
|
+ * retry.
|
|
+ */
|
|
+ exit_horribly(modulename, "could not find file \"%s\" in archive\n", filename);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ /* Any file OK, none left, so return NULL */
|
|
+ return NULL;
|
|
+ }
|
|
+ }
|
|
+
|
|
+#ifdef HAVE_LIBZ
|
|
+
|
|
+ if (AH->compression == 0)
|
|
+ tm->nFH = ctx->tarFH;
|
|
+ else
|
|
+ exit_horribly(modulename, "compression is not supported by tar archive format\n");
|
|
+ /* tm->zFH = gzdopen(dup(fileno(ctx->tarFH)), "rb"); */
|
|
+#else
|
|
+ tm->nFH = ctx->tarFH;
|
|
+#endif
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ int old_umask;
|
|
+
|
|
+ tm = pg_malloc0(sizeof(TAR_MEMBER));
|
|
+
|
|
+ /*
|
|
+ * POSIX does not require, but permits, tmpfile() to restrict file
|
|
+ * permissions. Given an OS crash after we write data, the filesystem
|
|
+ * might retain the data but forget tmpfile()'s unlink(). If so, the
|
|
+ * file mode protects confidentiality of the data written.
|
|
+ */
|
|
+ old_umask = umask(S_IRWXG | S_IRWXO);
|
|
+
|
|
+#ifndef WIN32
|
|
+ tm->tmpFH = tmpfile();
|
|
+#else
|
|
+
|
|
+ /*
|
|
+ * On WIN32, tmpfile() generates a filename in the root directory,
|
|
+ * which requires administrative permissions on certain systems. Loop
|
|
+ * until we find a unique file name we can create.
|
|
+ */
|
|
+ while (1)
|
|
+ {
|
|
+ char *name;
|
|
+ int fd;
|
|
+
|
|
+ name = _tempnam(NULL, "pg_temp_");
|
|
+ if (name == NULL)
|
|
+ break;
|
|
+ fd = open(name, O_RDWR | O_CREAT | O_EXCL | O_BINARY |
|
|
+ O_TEMPORARY, S_IRUSR | S_IWUSR);
|
|
+ free(name);
|
|
+
|
|
+ if (fd != -1) /* created a file */
|
|
+ {
|
|
+ tm->tmpFH = fdopen(fd, "w+b");
|
|
+ break;
|
|
+ }
|
|
+ else if (errno != EEXIST) /* failure other than file exists */
|
|
+ break;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ if (tm->tmpFH == NULL)
|
|
+ exit_horribly(modulename, "could not generate temporary file name: %s\n", strerror(errno));
|
|
+
|
|
+ umask(old_umask);
|
|
+
|
|
+#ifdef HAVE_LIBZ
|
|
+
|
|
+ if (AH->compression != 0)
|
|
+ {
|
|
+ sprintf(fmode, "wb%d", AH->compression);
|
|
+ tm->zFH = gzdopen(dup(fileno(tm->tmpFH)), fmode);
|
|
+ if (tm->zFH == NULL)
|
|
+ exit_horribly(modulename, "could not open temporary file\n");
|
|
+ }
|
|
+ else
|
|
+ tm->nFH = tm->tmpFH;
|
|
+#else
|
|
+
|
|
+ tm->nFH = tm->tmpFH;
|
|
+#endif
|
|
+
|
|
+ tm->AH = AH;
|
|
+ tm->targetFile = pg_strdup(filename);
|
|
+ }
|
|
+
|
|
+ tm->mode = mode;
|
|
+ tm->tarFH = ctx->tarFH;
|
|
+
|
|
+ return tm;
|
|
+}
|
|
+
|
|
+static void
|
|
+tarClose(ArchiveHandle *AH, TAR_MEMBER *th)
|
|
+{
|
|
+ /*
|
|
+ * Close the GZ file since we dup'd. This will flush the buffers.
|
|
+ */
|
|
+ if (AH->compression != 0)
|
|
+ if (GZCLOSE(th->zFH) != 0)
|
|
+ exit_horribly(modulename, "could not close tar member\n");
|
|
+
|
|
+ if (th->mode == 'w')
|
|
+ _tarAddFile(AH, th); /* This will close the temp file */
|
|
+
|
|
+ /*
|
|
+ * else Nothing to do for normal read since we don't dup() normal file
|
|
+ * handle, and we don't use temp files.
|
|
+ */
|
|
+
|
|
+ if (th->targetFile)
|
|
+ free(th->targetFile);
|
|
+
|
|
+ th->nFH = NULL;
|
|
+ th->zFH = NULL;
|
|
+}
|
|
+
|
|
+#ifdef __NOT_USED__
|
|
+static char *
|
|
+tarGets(char *buf, size_t len, TAR_MEMBER *th)
|
|
+{
|
|
+ char *s;
|
|
+ size_t cnt = 0;
|
|
+ char c = ' ';
|
|
+ int eof = 0;
|
|
+
|
|
+ /* Can't read past logical EOF */
|
|
+ if (len > (th->fileLen - th->pos))
|
|
+ len = th->fileLen - th->pos;
|
|
+
|
|
+ while (cnt < len && c != '\n')
|
|
+ {
|
|
+ if (_tarReadRaw(th->AH, &c, 1, th, NULL) <= 0)
|
|
+ {
|
|
+ eof = 1;
|
|
+ break;
|
|
+ }
|
|
+ buf[cnt++] = c;
|
|
+ }
|
|
+
|
|
+ if (eof && cnt == 0)
|
|
+ s = NULL;
|
|
+ else
|
|
+ {
|
|
+ buf[cnt++] = '\0';
|
|
+ s = buf;
|
|
+ }
|
|
+
|
|
+ if (s)
|
|
+ {
|
|
+ len = strlen(s);
|
|
+ th->pos += len;
|
|
+ }
|
|
+
|
|
+ return s;
|
|
+}
|
|
+#endif
|
|
+
|
|
+/*
|
|
+ * Just read bytes from the archive. This is the low level read routine
|
|
+ * that is used for ALL reads on a tar file.
|
|
+ */
|
|
+static size_t
|
|
+_tarReadRaw(ArchiveHandle *AH, void *buf, size_t len, TAR_MEMBER *th, FILE *fh)
|
|
+{
|
|
+ lclContext *ctx = (lclContext *) AH->formatData;
|
|
+ size_t avail;
|
|
+ size_t used = 0;
|
|
+ size_t res = 0;
|
|
+
|
|
+ avail = AH->lookaheadLen - AH->lookaheadPos;
|
|
+ if (avail > 0)
|
|
+ {
|
|
+ /* We have some lookahead bytes to use */
|
|
+ if (avail >= len) /* Just use the lookahead buffer */
|
|
+ used = len;
|
|
+ else
|
|
+ used = avail;
|
|
+
|
|
+ /* Copy, and adjust buffer pos */
|
|
+ memcpy(buf, AH->lookahead + AH->lookaheadPos, used);
|
|
+ AH->lookaheadPos += used;
|
|
+
|
|
+ /* Adjust required length */
|
|
+ len -= used;
|
|
+ }
|
|
+
|
|
+ /* Read the file if len > 0 */
|
|
+ if (len > 0)
|
|
+ {
|
|
+ if (fh)
|
|
+ {
|
|
+ res = fread(&((char *) buf)[used], 1, len, fh);
|
|
+ if (res != len && !feof(fh))
|
|
+ READ_ERROR_EXIT(fh);
|
|
+ }
|
|
+ else if (th)
|
|
+ {
|
|
+ if (th->zFH)
|
|
+ {
|
|
+ res = GZREAD(&((char *) buf)[used], 1, len, th->zFH);
|
|
+ if (res != len && !GZEOF(th->zFH))
|
|
+ exit_horribly(modulename,
|
|
+ "could not read from input file: %s\n", strerror(errno));
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ res = fread(&((char *) buf)[used], 1, len, th->nFH);
|
|
+ if (res != len && !feof(th->nFH))
|
|
+ READ_ERROR_EXIT(th->nFH);
|
|
+ }
|
|
+ }
|
|
+ else
|
|
+ exit_horribly(modulename, "internal error -- neither th nor fh specified in tarReadRaw()\n");
|
|
+ }
|
|
+
|
|
+ ctx->tarFHpos += res + used;
|
|
+
|
|
+ return (res + used);
|
|
+}
|
|
+
|
|
+static size_t
|
|
+tarRead(void *buf, size_t len, TAR_MEMBER *th)
|
|
+{
|
|
+ size_t res;
|
|
+
|
|
+ if (th->pos + len > th->fileLen)
|
|
+ len = th->fileLen - th->pos;
|
|
+
|
|
+ if (len <= 0)
|
|
+ return 0;
|
|
+
|
|
+ res = _tarReadRaw(th->AH, buf, len, th, NULL);
|
|
+
|
|
+ th->pos += res;
|
|
+
|
|
+ return res;
|
|
+}
|
|
+
|
|
+static size_t
|
|
+tarWrite(const void *buf, size_t len, TAR_MEMBER *th)
|
|
+{
|
|
+ size_t res;
|
|
+
|
|
+ if (th->zFH != NULL)
|
|
+ res = GZWRITE(buf, 1, len, th->zFH);
|
|
+ else
|
|
+ res = fwrite(buf, 1, len, th->nFH);
|
|
+
|
|
+ th->pos += res;
|
|
+ return res;
|
|
+}
|
|
+
|
|
+static void
|
|
+_WriteData(ArchiveHandle *AH, const void *data, size_t dLen)
|
|
+{
|
|
+ lclTocEntry *tctx = (lclTocEntry *) AH->currToc->formatData;
|
|
+
|
|
+ if (tarWrite(data, dLen, tctx->TH) != dLen)
|
|
+ WRITE_ERROR_EXIT;
|
|
+
|
|
+ return;
|
|
+}
|
|
+
|
|
+static void
|
|
+_EndData(ArchiveHandle *AH, TocEntry *te)
|
|
+{
|
|
+ lclTocEntry *tctx = (lclTocEntry *) te->formatData;
|
|
+
|
|
+ /* Close the file */
|
|
+ tarClose(AH, tctx->TH);
|
|
+ tctx->TH = NULL;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Print data for a given file
|
|
+ */
|
|
+static void
|
|
+_PrintFileData(ArchiveHandle *AH, char *filename, RestoreOptions *ropt)
|
|
+{
|
|
+ lclContext *ctx = (lclContext *) AH->formatData;
|
|
+ char buf[4096];
|
|
+ size_t cnt;
|
|
+ TAR_MEMBER *th;
|
|
+
|
|
+ if (!filename)
|
|
+ return;
|
|
+
|
|
+ th = tarOpen(AH, filename, 'r');
|
|
+ ctx->FH = th;
|
|
+
|
|
+ while ((cnt = tarRead(buf, 4095, th)) > 0)
|
|
+ {
|
|
+ buf[cnt] = '\0';
|
|
+ ahwrite(buf, 1, cnt, AH);
|
|
+ }
|
|
+
|
|
+ tarClose(AH, th);
|
|
+}
|
|
+
|
|
+
|
|
+/*
|
|
+ * Print data for a given TOC entry
|
|
+*/
|
|
+static void
|
|
+_PrintTocData(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt)
|
|
+{
|
|
+ lclContext *ctx = (lclContext *) AH->formatData;
|
|
+ lclTocEntry *tctx = (lclTocEntry *) te->formatData;
|
|
+ int pos1;
|
|
+
|
|
+ if (!tctx->filename)
|
|
+ return;
|
|
+
|
|
+ /*
|
|
+ * If we're writing the special restore.sql script, emit a suitable
|
|
+ * command to include each table's data from the corresponding file.
|
|
+ *
|
|
+ * In the COPY case this is a bit klugy because the regular COPY command
|
|
+ * was already printed before we get control.
|
|
+ */
|
|
+ if (ctx->isSpecialScript)
|
|
+ {
|
|
+ if (te->copyStmt)
|
|
+ {
|
|
+ /* Abort the COPY FROM stdin */
|
|
+ ahprintf(AH, "\\.\n");
|
|
+
|
|
+ /*
|
|
+ * The COPY statement should look like "COPY ... FROM stdin;\n",
|
|
+ * see dumpTableData().
|
|
+ */
|
|
+ pos1 = (int) strlen(te->copyStmt) - 13;
|
|
+ if (pos1 < 6 || strncmp(te->copyStmt, "COPY ", 5) != 0 ||
|
|
+ strcmp(te->copyStmt + pos1, " FROM stdin;\n") != 0)
|
|
+ exit_horribly(modulename,
|
|
+ "unexpected COPY statement syntax: \"%s\"\n",
|
|
+ te->copyStmt);
|
|
+
|
|
+ /* Emit all but the FROM part ... */
|
|
+ ahwrite(te->copyStmt, 1, pos1, AH);
|
|
+ /* ... and insert modified FROM */
|
|
+ ahprintf(AH, " FROM '$$PATH$$/%s';\n\n", tctx->filename);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ /* --inserts mode, no worries, just include the data file */
|
|
+ ahprintf(AH, "\\i $$PATH$$/%s\n\n", tctx->filename);
|
|
+ }
|
|
+
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (strcmp(te->desc, "BLOBS") == 0)
|
|
+ _LoadBlobs(AH, ropt);
|
|
+ else
|
|
+ _PrintFileData(AH, tctx->filename, ropt);
|
|
+}
|
|
+
|
|
+static void
|
|
+_LoadBlobs(ArchiveHandle *AH, RestoreOptions *ropt)
|
|
+{
|
|
+ Oid oid;
|
|
+ lclContext *ctx = (lclContext *) AH->formatData;
|
|
+ TAR_MEMBER *th;
|
|
+ size_t cnt;
|
|
+ bool foundBlob = false;
|
|
+ char buf[4096];
|
|
+
|
|
+ StartRestoreBlobs(AH);
|
|
+
|
|
+ th = tarOpen(AH, NULL, 'r'); /* Open next file */
|
|
+ while (th != NULL)
|
|
+ {
|
|
+ ctx->FH = th;
|
|
+
|
|
+ if (strncmp(th->targetFile, "blob_", 5) == 0)
|
|
+ {
|
|
+ oid = atooid(&th->targetFile[5]);
|
|
+ if (oid != 0)
|
|
+ {
|
|
+ ahlog(AH, 1, "restoring large object with OID %u\n", oid);
|
|
+
|
|
+ StartRestoreBlob(AH, oid, ropt->dropSchema);
|
|
+
|
|
+ while ((cnt = tarRead(buf, 4095, th)) > 0)
|
|
+ {
|
|
+ buf[cnt] = '\0';
|
|
+ ahwrite(buf, 1, cnt, AH);
|
|
+ }
|
|
+ EndRestoreBlob(AH, oid);
|
|
+ foundBlob = true;
|
|
+ }
|
|
+ tarClose(AH, th);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ tarClose(AH, th);
|
|
+
|
|
+ /*
|
|
+ * Once we have found the first blob, stop at the first non-blob
|
|
+ * entry (which will be 'blobs.toc'). This coding would eat all
|
|
+ * the rest of the archive if there are no blobs ... but this
|
|
+ * function shouldn't be called at all in that case.
|
|
+ */
|
|
+ if (foundBlob)
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ th = tarOpen(AH, NULL, 'r');
|
|
+ }
|
|
+ EndRestoreBlobs(AH);
|
|
+}
|
|
+
|
|
+
|
|
+static int
|
|
+_WriteByte(ArchiveHandle *AH, const int i)
|
|
+{
|
|
+ lclContext *ctx = (lclContext *) AH->formatData;
|
|
+ char b = i; /* Avoid endian problems */
|
|
+
|
|
+ if (tarWrite(&b, 1, ctx->FH) != 1)
|
|
+ WRITE_ERROR_EXIT;
|
|
+
|
|
+ ctx->filePos += 1;
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+static int
|
|
+_ReadByte(ArchiveHandle *AH)
|
|
+{
|
|
+ lclContext *ctx = (lclContext *) AH->formatData;
|
|
+ size_t res;
|
|
+ unsigned char c;
|
|
+
|
|
+ res = tarRead(&c, 1, ctx->FH);
|
|
+ if (res != 1)
|
|
+ /* We already would have exited for errors on reads, must be EOF */
|
|
+ exit_horribly(modulename,
|
|
+ "could not read from input file: end of file\n");
|
|
+ ctx->filePos += 1;
|
|
+ return c;
|
|
+}
|
|
+
|
|
+static void
|
|
+_WriteBuf(ArchiveHandle *AH, const void *buf, size_t len)
|
|
+{
|
|
+ lclContext *ctx = (lclContext *) AH->formatData;
|
|
+
|
|
+ if (tarWrite(buf, len, ctx->FH) != len)
|
|
+ WRITE_ERROR_EXIT;
|
|
+
|
|
+ ctx->filePos += len;
|
|
+}
|
|
+
|
|
+static void
|
|
+_ReadBuf(ArchiveHandle *AH, void *buf, size_t len)
|
|
+{
|
|
+ lclContext *ctx = (lclContext *) AH->formatData;
|
|
+
|
|
+ if (tarRead(buf, len, ctx->FH) != len)
|
|
+ /* We already would have exited for errors on reads, must be EOF */
|
|
+ exit_horribly(modulename,
|
|
+ "could not read from input file: end of file\n");
|
|
+
|
|
+ ctx->filePos += len;
|
|
+ return;
|
|
+}
|
|
+
|
|
+static void
|
|
+_CloseArchive(ArchiveHandle *AH)
|
|
+{
|
|
+ lclContext *ctx = (lclContext *) AH->formatData;
|
|
+ TAR_MEMBER *th;
|
|
+ RestoreOptions *ropt;
|
|
+ RestoreOptions *savRopt;
|
|
+ int savVerbose,
|
|
+ i;
|
|
+
|
|
+ if (AH->mode == archModeWrite)
|
|
+ {
|
|
+ /*
|
|
+ * Write the Header & TOC to the archive FIRST
|
|
+ */
|
|
+ th = tarOpen(AH, "toc.dat", 'w');
|
|
+ ctx->FH = th;
|
|
+ WriteHead(AH);
|
|
+ WriteToc(AH);
|
|
+ tarClose(AH, th); /* Not needed any more */
|
|
+
|
|
+ /*
|
|
+ * Now send the data (tables & blobs)
|
|
+ */
|
|
+ WriteDataChunks(AH, NULL);
|
|
+
|
|
+ /*
|
|
+ * Now this format wants to append a script which does a full restore
|
|
+ * if the files have been extracted.
|
|
+ */
|
|
+ th = tarOpen(AH, "restore.sql", 'w');
|
|
+
|
|
+ tarPrintf(AH, th, "--\n"
|
|
+ "-- NOTE:\n"
|
|
+ "--\n"
|
|
+ "-- File paths need to be edited. Search for $$PATH$$ and\n"
|
|
+ "-- replace it with the path to the directory containing\n"
|
|
+ "-- the extracted data files.\n"
|
|
+ "--\n");
|
|
+
|
|
+ AH->CustomOutPtr = _scriptOut;
|
|
+
|
|
+ ctx->isSpecialScript = 1;
|
|
+ ctx->scriptTH = th;
|
|
+
|
|
+ ropt = NewRestoreOptions();
|
|
+ memcpy(ropt, AH->ropt, sizeof(RestoreOptions));
|
|
+ ropt->filename = NULL;
|
|
+ ropt->dropSchema = 1;
|
|
+ ropt->compression = 0;
|
|
+ ropt->superuser = NULL;
|
|
+ ropt->suppressDumpWarnings = true;
|
|
+
|
|
+ savRopt = AH->ropt;
|
|
+ AH->ropt = ropt;
|
|
+
|
|
+ savVerbose = AH->public.verbose;
|
|
+ AH->public.verbose = 0;
|
|
+
|
|
+ RestoreArchive((Archive *) AH);
|
|
+
|
|
+ AH->ropt = savRopt;
|
|
+ AH->public.verbose = savVerbose;
|
|
+
|
|
+ tarClose(AH, th);
|
|
+
|
|
+ ctx->isSpecialScript = 0;
|
|
+
|
|
+ /*
|
|
+ * EOF marker for tar files is two blocks of NULLs.
|
|
+ */
|
|
+ for (i = 0; i < 512 * 2; i++)
|
|
+ {
|
|
+ if (fputc(0, ctx->tarFH) == EOF)
|
|
+ WRITE_ERROR_EXIT;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ AH->FH = NULL;
|
|
+}
|
|
+
|
|
+static size_t
|
|
+_scriptOut(ArchiveHandle *AH, const void *buf, size_t len)
|
|
+{
|
|
+ lclContext *ctx = (lclContext *) AH->formatData;
|
|
+
|
|
+ return tarWrite(buf, len, ctx->scriptTH);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * BLOB support
|
|
+ */
|
|
+
|
|
+/*
|
|
+ * Called by the archiver when starting to save all BLOB DATA (not schema).
|
|
+ * This routine should save whatever format-specific information is needed
|
|
+ * to read the BLOBs back into memory.
|
|
+ *
|
|
+ * It is called just prior to the dumper's DataDumper routine.
|
|
+ *
|
|
+ * Optional, but strongly recommended.
|
|
+ *
|
|
+ */
|
|
+static void
|
|
+_StartBlobs(ArchiveHandle *AH, TocEntry *te)
|
|
+{
|
|
+ lclContext *ctx = (lclContext *) AH->formatData;
|
|
+ char fname[K_STD_BUF_SIZE];
|
|
+
|
|
+ sprintf(fname, "blobs.toc");
|
|
+ ctx->blobToc = tarOpen(AH, fname, 'w');
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Called by the archiver when the dumper calls StartBlob.
|
|
+ *
|
|
+ * Mandatory.
|
|
+ *
|
|
+ * Must save the passed OID for retrieval at restore-time.
|
|
+ */
|
|
+static void
|
|
+_StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid)
|
|
+{
|
|
+ lclContext *ctx = (lclContext *) AH->formatData;
|
|
+ lclTocEntry *tctx = (lclTocEntry *) te->formatData;
|
|
+ char fname[255];
|
|
+ char *sfx;
|
|
+
|
|
+ if (oid == 0)
|
|
+ exit_horribly(modulename, "invalid OID for large object (%u)\n", oid);
|
|
+
|
|
+ if (AH->compression != 0)
|
|
+ sfx = ".gz";
|
|
+ else
|
|
+ sfx = "";
|
|
+
|
|
+ sprintf(fname, "blob_%u.dat%s", oid, sfx);
|
|
+
|
|
+ tarPrintf(AH, ctx->blobToc, "%u %s\n", oid, fname);
|
|
+
|
|
+ tctx->TH = tarOpen(AH, fname, 'w');
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Called by the archiver when the dumper calls EndBlob.
|
|
+ *
|
|
+ * Optional.
|
|
+ *
|
|
+ */
|
|
+static void
|
|
+_EndBlob(ArchiveHandle *AH, TocEntry *te, Oid oid)
|
|
+{
|
|
+ lclTocEntry *tctx = (lclTocEntry *) te->formatData;
|
|
+
|
|
+ tarClose(AH, tctx->TH);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Called by the archiver when finishing saving all BLOB DATA.
|
|
+ *
|
|
+ * Optional.
|
|
+ *
|
|
+ */
|
|
+static void
|
|
+_EndBlobs(ArchiveHandle *AH, TocEntry *te)
|
|
+{
|
|
+ lclContext *ctx = (lclContext *) AH->formatData;
|
|
+
|
|
+ /* Write out a fake zero OID to mark end-of-blobs. */
|
|
+ /* WriteInt(AH, 0); */
|
|
+
|
|
+ tarClose(AH, ctx->blobToc);
|
|
+}
|
|
+
|
|
+
|
|
+
|
|
+/*------------
|
|
+ * TAR Support
|
|
+ *------------
|
|
+ */
|
|
+
|
|
+static int
|
|
+tarPrintf(ArchiveHandle *AH, TAR_MEMBER *th, const char *fmt,...)
|
|
+{
|
|
+ char *p;
|
|
+ size_t len = 128; /* initial assumption about buffer size */
|
|
+ size_t cnt;
|
|
+
|
|
+ for (;;)
|
|
+ {
|
|
+ va_list args;
|
|
+
|
|
+ /* Allocate work buffer. */
|
|
+ p = (char *) pg_malloc(len);
|
|
+
|
|
+ /* Try to format the data. */
|
|
+ va_start(args, fmt);
|
|
+ cnt = pvsnprintf(p, len, fmt, args);
|
|
+ va_end(args);
|
|
+
|
|
+ if (cnt < len)
|
|
+ break; /* success */
|
|
+
|
|
+ /* Release buffer and loop around to try again with larger len. */
|
|
+ free(p);
|
|
+ len = cnt;
|
|
+ }
|
|
+
|
|
+ cnt = tarWrite(p, cnt, th);
|
|
+ free(p);
|
|
+ return (int) cnt;
|
|
+}
|
|
+
|
|
+bool
|
|
+isValidTarHeader(char *header)
|
|
+{
|
|
+ int sum;
|
|
+ int chk = tarChecksum(header);
|
|
+
|
|
+ sum = read_tar_number(&header[148], 8);
|
|
+
|
|
+ if (sum != chk)
|
|
+ return false;
|
|
+
|
|
+ /* POSIX tar format */
|
|
+ if (memcmp(&header[257], "ustar\0", 6) == 0 &&
|
|
+ memcmp(&header[263], "00", 2) == 0)
|
|
+ return true;
|
|
+ /* GNU tar format */
|
|
+ if (memcmp(&header[257], "ustar \0", 8) == 0)
|
|
+ return true;
|
|
+ /* not-quite-POSIX format written by pre-9.3 pg_dump */
|
|
+ if (memcmp(&header[257], "ustar00\0", 8) == 0)
|
|
+ return true;
|
|
+
|
|
+ return false;
|
|
+}
|
|
+
|
|
+/* Given the member, write the TAR header & copy the file */
|
|
+static void
|
|
+_tarAddFile(ArchiveHandle *AH, TAR_MEMBER *th)
|
|
+{
|
|
+ lclContext *ctx = (lclContext *) AH->formatData;
|
|
+ FILE *tmp = th->tmpFH; /* Grab it for convenience */
|
|
+ char buf[32768];
|
|
+ size_t cnt;
|
|
+ pgoff_t len = 0;
|
|
+ size_t res;
|
|
+ size_t i,
|
|
+ pad;
|
|
+
|
|
+ /*
|
|
+ * Find file len & go back to start.
|
|
+ */
|
|
+ fseeko(tmp, 0, SEEK_END);
|
|
+ th->fileLen = ftello(tmp);
|
|
+ if (th->fileLen < 0)
|
|
+ exit_horribly(modulename, "could not determine seek position in archive file: %s\n",
|
|
+ strerror(errno));
|
|
+ fseeko(tmp, 0, SEEK_SET);
|
|
+
|
|
+ _tarWriteHeader(th);
|
|
+
|
|
+ while ((cnt = fread(buf, 1, sizeof(buf), tmp)) > 0)
|
|
+ {
|
|
+ if ((res = fwrite(buf, 1, cnt, th->tarFH)) != cnt)
|
|
+ WRITE_ERROR_EXIT;
|
|
+ len += res;
|
|
+ }
|
|
+ if (!feof(tmp))
|
|
+ READ_ERROR_EXIT(tmp);
|
|
+
|
|
+ if (fclose(tmp) != 0) /* This *should* delete it... */
|
|
+ exit_horribly(modulename, "could not close temporary file: %s\n",
|
|
+ strerror(errno));
|
|
+
|
|
+ if (len != th->fileLen)
|
|
+ {
|
|
+ char buf1[32],
|
|
+ buf2[32];
|
|
+
|
|
+ snprintf(buf1, sizeof(buf1), INT64_FORMAT, (int64) len);
|
|
+ snprintf(buf2, sizeof(buf2), INT64_FORMAT, (int64) th->fileLen);
|
|
+ exit_horribly(modulename, "actual file length (%s) does not match expected (%s)\n",
|
|
+ buf1, buf2);
|
|
+ }
|
|
+
|
|
+ pad = ((len + 511) & ~511) - len;
|
|
+ for (i = 0; i < pad; i++)
|
|
+ {
|
|
+ if (fputc('\0', th->tarFH) == EOF)
|
|
+ WRITE_ERROR_EXIT;
|
|
+ }
|
|
+
|
|
+ ctx->tarFHpos += len + pad;
|
|
+}
|
|
+
|
|
+/* Locate the file in the archive, read header and position to data */
|
|
+static TAR_MEMBER *
|
|
+_tarPositionTo(ArchiveHandle *AH, const char *filename)
|
|
+{
|
|
+ lclContext *ctx = (lclContext *) AH->formatData;
|
|
+ TAR_MEMBER *th = pg_malloc0(sizeof(TAR_MEMBER));
|
|
+ char c;
|
|
+ char header[512];
|
|
+ size_t i,
|
|
+ len,
|
|
+ blks;
|
|
+ int id;
|
|
+
|
|
+ th->AH = AH;
|
|
+
|
|
+ /* Go to end of current file, if any */
|
|
+ if (ctx->tarFHpos != 0)
|
|
+ {
|
|
+ char buf1[100],
|
|
+ buf2[100];
|
|
+
|
|
+ snprintf(buf1, sizeof(buf1), INT64_FORMAT, (int64) ctx->tarFHpos);
|
|
+ snprintf(buf2, sizeof(buf2), INT64_FORMAT, (int64) ctx->tarNextMember);
|
|
+ ahlog(AH, 4, "moving from position %s to next member at file position %s\n",
|
|
+ buf1, buf2);
|
|
+
|
|
+ while (ctx->tarFHpos < ctx->tarNextMember)
|
|
+ _tarReadRaw(AH, &c, 1, NULL, ctx->tarFH);
|
|
+ }
|
|
+
|
|
+ {
|
|
+ char buf[100];
|
|
+
|
|
+ snprintf(buf, sizeof(buf), INT64_FORMAT, (int64) ctx->tarFHpos);
|
|
+ ahlog(AH, 4, "now at file position %s\n", buf);
|
|
+ }
|
|
+
|
|
+ /* We are at the start of the file, or at the next member */
|
|
+
|
|
+ /* Get the header */
|
|
+ if (!_tarGetHeader(AH, th))
|
|
+ {
|
|
+ if (filename)
|
|
+ exit_horribly(modulename, "could not find header for file \"%s\" in tar archive\n", filename);
|
|
+ else
|
|
+ {
|
|
+ /*
|
|
+ * We're just scanning the archive for the next file, so return
|
|
+ * null
|
|
+ */
|
|
+ free(th);
|
|
+ return NULL;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ while (filename != NULL && strcmp(th->targetFile, filename) != 0)
|
|
+ {
|
|
+ ahlog(AH, 4, "skipping tar member %s\n", th->targetFile);
|
|
+
|
|
+ id = atoi(th->targetFile);
|
|
+ if ((TocIDRequired(AH, id) & REQ_DATA) != 0)
|
|
+ exit_horribly(modulename, "restoring data out of order is not supported in this archive format: "
|
|
+ "\"%s\" is required, but comes before \"%s\" in the archive file.\n",
|
|
+ th->targetFile, filename);
|
|
+
|
|
+ /* Header doesn't match, so read to next header */
|
|
+ len = ((th->fileLen + 511) & ~511); /* Padded length */
|
|
+ blks = len >> 9; /* # of 512 byte blocks */
|
|
+
|
|
+ for (i = 0; i < blks; i++)
|
|
+ _tarReadRaw(AH, &header[0], 512, NULL, ctx->tarFH);
|
|
+
|
|
+ if (!_tarGetHeader(AH, th))
|
|
+ exit_horribly(modulename, "could not find header for file \"%s\" in tar archive\n", filename);
|
|
+ }
|
|
+
|
|
+ ctx->tarNextMember = ctx->tarFHpos + ((th->fileLen + 511) & ~511);
|
|
+ th->pos = 0;
|
|
+
|
|
+ return th;
|
|
+}
|
|
+
|
|
+/* Read & verify a header */
|
|
+static int
|
|
+_tarGetHeader(ArchiveHandle *AH, TAR_MEMBER *th)
|
|
+{
|
|
+ lclContext *ctx = (lclContext *) AH->formatData;
|
|
+ char h[512];
|
|
+ char tag[100 + 1];
|
|
+ int sum,
|
|
+ chk;
|
|
+ pgoff_t len;
|
|
+ pgoff_t hPos;
|
|
+ bool gotBlock = false;
|
|
+
|
|
+ while (!gotBlock)
|
|
+ {
|
|
+ /* Save the pos for reporting purposes */
|
|
+ hPos = ctx->tarFHpos;
|
|
+
|
|
+ /* Read a 512 byte block, return EOF, exit if short */
|
|
+ len = _tarReadRaw(AH, h, 512, NULL, ctx->tarFH);
|
|
+ if (len == 0) /* EOF */
|
|
+ return 0;
|
|
+
|
|
+ if (len != 512)
|
|
+ exit_horribly(modulename,
|
|
+ ngettext("incomplete tar header found (%lu byte)\n",
|
|
+ "incomplete tar header found (%lu bytes)\n",
|
|
+ len),
|
|
+ (unsigned long) len);
|
|
+
|
|
+ /* Calc checksum */
|
|
+ chk = tarChecksum(h);
|
|
+ sum = read_tar_number(&h[148], 8);
|
|
+
|
|
+ /*
|
|
+ * If the checksum failed, see if it is a null block. If so, silently
|
|
+ * continue to the next block.
|
|
+ */
|
|
+ if (chk == sum)
|
|
+ gotBlock = true;
|
|
+ else
|
|
+ {
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < 512; i++)
|
|
+ {
|
|
+ if (h[i] != 0)
|
|
+ {
|
|
+ gotBlock = true;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* Name field is 100 bytes, might not be null-terminated */
|
|
+ strlcpy(tag, &h[0], 100 + 1);
|
|
+
|
|
+ len = read_tar_number(&h[124], 12);
|
|
+
|
|
+ {
|
|
+ char posbuf[32];
|
|
+ char lenbuf[32];
|
|
+
|
|
+ snprintf(posbuf, sizeof(posbuf), UINT64_FORMAT, (uint64) hPos);
|
|
+ snprintf(lenbuf, sizeof(lenbuf), UINT64_FORMAT, (uint64) len);
|
|
+ ahlog(AH, 3, "TOC Entry %s at %s (length %s, checksum %d)\n",
|
|
+ tag, posbuf, lenbuf, sum);
|
|
+ }
|
|
+
|
|
+ if (chk != sum)
|
|
+ {
|
|
+ char posbuf[32];
|
|
+
|
|
+ snprintf(posbuf, sizeof(posbuf), UINT64_FORMAT,
|
|
+ (uint64) ftello(ctx->tarFH));
|
|
+ exit_horribly(modulename,
|
|
+ "corrupt tar header found in %s "
|
|
+ "(expected %d, computed %d) file position %s\n",
|
|
+ tag, sum, chk, posbuf);
|
|
+ }
|
|
+
|
|
+ th->targetFile = pg_strdup(tag);
|
|
+ th->fileLen = len;
|
|
+
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+
|
|
+static void
|
|
+_tarWriteHeader(TAR_MEMBER *th)
|
|
+{
|
|
+ char h[512];
|
|
+
|
|
+ tarCreateHeader(h, th->targetFile, NULL, th->fileLen,
|
|
+ 0600, 04000, 02000, time(NULL));
|
|
+
|
|
+ /* Now write the completed header. */
|
|
+ if (fwrite(h, 1, 512, th->tarFH) != 512)
|
|
+ WRITE_ERROR_EXIT;
|
|
+}
|
|
--- /dev/null
|
|
+++ pglogical-2.2.2/pglogical_dump/pg_backup_tar.h
|
|
@@ -0,0 +1,37 @@
|
|
+/*
|
|
+ * src/bin/pg_dump/pg_backup_tar.h
|
|
+ *
|
|
+ * TAR Header (see "ustar interchange format" in POSIX 1003.1)
|
|
+ *
|
|
+ * Offset Length Contents
|
|
+ * 0 100 bytes File name ('\0' terminated, 99 maximum length)
|
|
+ * 100 8 bytes File mode (in octal ascii)
|
|
+ * 108 8 bytes User ID (in octal ascii)
|
|
+ * 116 8 bytes Group ID (in octal ascii)
|
|
+ * 124 12 bytes File size (in octal ascii)
|
|
+ * 136 12 bytes Modify time (Unix timestamp in octal ascii)
|
|
+ * 148 8 bytes Header checksum (in octal ascii)
|
|
+ * 156 1 bytes Type flag (see below)
|
|
+ * 157 100 bytes Linkname, if symlink ('\0' terminated, 99 maximum length)
|
|
+ * 257 6 bytes Magic ("ustar\0")
|
|
+ * 263 2 bytes Version ("00")
|
|
+ * 265 32 bytes User name ('\0' terminated, 31 maximum length)
|
|
+ * 297 32 bytes Group name ('\0' terminated, 31 maximum length)
|
|
+ * 329 8 bytes Major device ID (in octal ascii)
|
|
+ * 337 8 bytes Minor device ID (in octal ascii)
|
|
+ * 345 155 bytes File name prefix (not used in our implementation)
|
|
+ * 500 12 bytes Padding
|
|
+ *
|
|
+ * 512 (s+p)bytes File contents, padded out to 512-byte boundary
|
|
+ */
|
|
+
|
|
+/* The type flag defines the type of file */
|
|
+#define LF_OLDNORMAL '\0' /* Normal disk file, Unix compatible */
|
|
+#define LF_NORMAL '0' /* Normal disk file */
|
|
+#define LF_LINK '1' /* Link to previously dumped file */
|
|
+#define LF_SYMLINK '2' /* Symbolic link */
|
|
+#define LF_CHR '3' /* Character special file */
|
|
+#define LF_BLK '4' /* Block special file */
|
|
+#define LF_DIR '5' /* Directory */
|
|
+#define LF_FIFO '6' /* FIFO special file */
|
|
+#define LF_CONTIG '7' /* Contiguous file */
|
|
--- /dev/null
|
|
+++ pglogical-2.2.2/pglogical_dump/pg_backup_utils.c
|
|
@@ -0,0 +1,126 @@
|
|
+/*-------------------------------------------------------------------------
|
|
+ *
|
|
+ * pg_backup_utils.c
|
|
+ * Utility routines shared by pg_dump and pg_restore
|
|
+ *
|
|
+ *
|
|
+ * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
|
|
+ * Portions Copyright (c) 1994, Regents of the University of California
|
|
+ *
|
|
+ * src/bin/pg_dump/pg_backup_utils.c
|
|
+ *
|
|
+ *-------------------------------------------------------------------------
|
|
+ */
|
|
+#include "postgres_fe.h"
|
|
+
|
|
+#include "pg_backup_utils.h"
|
|
+#include "parallel.h"
|
|
+
|
|
+/* Globals exported by this file */
|
|
+const char *progname = NULL;
|
|
+
|
|
+#define MAX_ON_EXIT_NICELY 20
|
|
+
|
|
+static struct
|
|
+{
|
|
+ on_exit_nicely_callback function;
|
|
+ void *arg;
|
|
+} on_exit_nicely_list[MAX_ON_EXIT_NICELY];
|
|
+
|
|
+static int on_exit_nicely_index;
|
|
+
|
|
+/*
|
|
+ * Parse a --section=foo command line argument.
|
|
+ *
|
|
+ * Set or update the bitmask in *dumpSections according to arg.
|
|
+ * dumpSections is initialised as DUMP_UNSECTIONED by pg_dump and
|
|
+ * pg_restore so they can know if this has even been called.
|
|
+ */
|
|
+void
|
|
+set_dump_section(const char *arg, int *dumpSections)
|
|
+{
|
|
+ /* if this is the first call, clear all the bits */
|
|
+ if (*dumpSections == DUMP_UNSECTIONED)
|
|
+ *dumpSections = 0;
|
|
+
|
|
+ if (strcmp(arg, "pre-data") == 0)
|
|
+ *dumpSections |= DUMP_PRE_DATA;
|
|
+ else if (strcmp(arg, "data") == 0)
|
|
+ *dumpSections |= DUMP_DATA;
|
|
+ else if (strcmp(arg, "post-data") == 0)
|
|
+ *dumpSections |= DUMP_POST_DATA;
|
|
+ else
|
|
+ {
|
|
+ fprintf(stderr, _("%s: unrecognized section name: \"%s\"\n"),
|
|
+ progname, arg);
|
|
+ fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
|
|
+ progname);
|
|
+ exit_nicely(1);
|
|
+ }
|
|
+}
|
|
+
|
|
+
|
|
+/*
|
|
+ * Write a printf-style message to stderr.
|
|
+ *
|
|
+ * The program name is prepended, if "progname" has been set.
|
|
+ * Also, if modulename isn't NULL, that's included too.
|
|
+ * Note that we'll try to translate the modulename and the fmt string.
|
|
+ */
|
|
+void
|
|
+write_msg(const char *modulename, const char *fmt,...)
|
|
+{
|
|
+ va_list ap;
|
|
+
|
|
+ va_start(ap, fmt);
|
|
+ vwrite_msg(modulename, fmt, ap);
|
|
+ va_end(ap);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * As write_msg, but pass a va_list not variable arguments.
|
|
+ */
|
|
+void
|
|
+vwrite_msg(const char *modulename, const char *fmt, va_list ap)
|
|
+{
|
|
+ if (progname)
|
|
+ {
|
|
+ if (modulename)
|
|
+ fprintf(stderr, "%s: [%s] ", progname, _(modulename));
|
|
+ else
|
|
+ fprintf(stderr, "%s: ", progname);
|
|
+ }
|
|
+ vfprintf(stderr, _(fmt), ap);
|
|
+}
|
|
+
|
|
+/* Register a callback to be run when exit_nicely is invoked. */
|
|
+void
|
|
+on_exit_nicely(on_exit_nicely_callback function, void *arg)
|
|
+{
|
|
+ if (on_exit_nicely_index >= MAX_ON_EXIT_NICELY)
|
|
+ exit_horribly(NULL, "out of on_exit_nicely slots\n");
|
|
+ on_exit_nicely_list[on_exit_nicely_index].function = function;
|
|
+ on_exit_nicely_list[on_exit_nicely_index].arg = arg;
|
|
+ on_exit_nicely_index++;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Run accumulated on_exit_nicely callbacks in reverse order and then exit
|
|
+ * quietly. This needs to be thread-safe.
|
|
+ */
|
|
+void
|
|
+exit_nicely(int code)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ for (i = on_exit_nicely_index - 1; i >= 0; i--)
|
|
+ (*on_exit_nicely_list[i].function) (code,
|
|
+ on_exit_nicely_list[i].arg);
|
|
+
|
|
+#ifdef WIN32
|
|
+ if (parallel_init_done && GetCurrentThreadId() != mainThreadId)
|
|
+ ExitThread(code);
|
|
+#endif
|
|
+
|
|
+ exit(code);
|
|
+}
|
|
--- /dev/null
|
|
+++ pglogical-2.2.2/pglogical_dump/pg_backup_utils.h
|
|
@@ -0,0 +1,40 @@
|
|
+/*-------------------------------------------------------------------------
|
|
+ *
|
|
+ * pg_backup_utils.h
|
|
+ * Utility routines shared by pg_dump and pg_restore.
|
|
+ *
|
|
+ *
|
|
+ * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
|
|
+ * Portions Copyright (c) 1994, Regents of the University of California
|
|
+ *
|
|
+ * src/bin/pg_dump/pg_backup_utils.h
|
|
+ *
|
|
+ *-------------------------------------------------------------------------
|
|
+ */
|
|
+
|
|
+#ifndef PG_BACKUP_UTILS_H
|
|
+#define PG_BACKUP_UTILS_H
|
|
+
|
|
+typedef enum /* bits returned by set_dump_section */
|
|
+{
|
|
+ DUMP_PRE_DATA = 0x01,
|
|
+ DUMP_DATA = 0x02,
|
|
+ DUMP_POST_DATA = 0x04,
|
|
+ DUMP_UNSECTIONED = 0xff
|
|
+} DumpSections;
|
|
+
|
|
+typedef void (*on_exit_nicely_callback) (int code, void *arg);
|
|
+
|
|
+extern const char *progname;
|
|
+
|
|
+extern void set_dump_section(const char *arg, int *dumpSections);
|
|
+extern void
|
|
+write_msg(const char *modulename, const char *fmt,...)
|
|
+__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
|
|
+extern void
|
|
+vwrite_msg(const char *modulename, const char *fmt, va_list ap)
|
|
+__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 0)));
|
|
+extern void on_exit_nicely(on_exit_nicely_callback function, void *arg);
|
|
+extern void exit_nicely(int code) __attribute__((noreturn));
|
|
+
|
|
+#endif /* PG_BACKUP_UTILS_H */
|
|
--- /dev/null
|
|
+++ pglogical-2.2.2/pglogical_dump/pg_dump.c
|
|
@@ -0,0 +1,15811 @@
|
|
+/*-------------------------------------------------------------------------
|
|
+ *
|
|
+ * pg_dump.c
|
|
+ * pg_dump is a utility for dumping out a postgres database
|
|
+ * into a script file.
|
|
+ *
|
|
+ * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
|
|
+ * Portions Copyright (c) 1994, Regents of the University of California
|
|
+ *
|
|
+ * pg_dump will read the system catalogs in a database and dump out a
|
|
+ * script that reproduces the schema in terms of SQL that is understood
|
|
+ * by PostgreSQL
|
|
+ *
|
|
+ * Note that pg_dump runs in a transaction-snapshot mode transaction,
|
|
+ * so it sees a consistent snapshot of the database including system
|
|
+ * catalogs. However, it relies in part on various specialized backend
|
|
+ * functions like pg_get_indexdef(), and those things tend to look at
|
|
+ * the currently committed state. So it is possible to get 'cache
|
|
+ * lookup failed' error if someone performs DDL changes while a dump is
|
|
+ * happening. The window for this sort of thing is from the acquisition
|
|
+ * of the transaction snapshot to getSchemaData() (when pg_dump acquires
|
|
+ * AccessShareLock on every table it intends to dump). It isn't very large,
|
|
+ * but it can happen.
|
|
+ *
|
|
+ * http://archives.postgresql.org/pgsql-bugs/2010-02/msg00187.php
|
|
+ *
|
|
+ * IDENTIFICATION
|
|
+ * src/bin/pg_dump/pg_dump.c
|
|
+ *
|
|
+ *-------------------------------------------------------------------------
|
|
+ */
|
|
+
|
|
+#include "postgres_fe.h"
|
|
+
|
|
+#include <unistd.h>
|
|
+#include <ctype.h>
|
|
+#ifdef ENABLE_NLS
|
|
+#include <locale.h>
|
|
+#endif
|
|
+#ifdef HAVE_TERMIOS_H
|
|
+#include <termios.h>
|
|
+#endif
|
|
+
|
|
+#include "getopt_long.h"
|
|
+
|
|
+#include "access/attnum.h"
|
|
+#include "access/sysattr.h"
|
|
+#include "access/transam.h"
|
|
+#include "catalog/pg_cast.h"
|
|
+#include "catalog/pg_class.h"
|
|
+#include "catalog/pg_default_acl.h"
|
|
+#include "catalog/pg_event_trigger.h"
|
|
+#include "catalog/pg_largeobject.h"
|
|
+#include "catalog/pg_largeobject_metadata.h"
|
|
+#include "catalog/pg_proc.h"
|
|
+#include "catalog/pg_trigger.h"
|
|
+#include "catalog/pg_type.h"
|
|
+#include "libpq/libpq-fs.h"
|
|
+
|
|
+#include "pg_backup_archiver.h"
|
|
+#include "pg_backup_db.h"
|
|
+#include "pg_backup_utils.h"
|
|
+#include "dumputils.h"
|
|
+#include "parallel.h"
|
|
+
|
|
+
|
|
+typedef struct
|
|
+{
|
|
+ const char *descr; /* comment for an object */
|
|
+ Oid classoid; /* object class (catalog OID) */
|
|
+ Oid objoid; /* object OID */
|
|
+ int objsubid; /* subobject (table column #) */
|
|
+} CommentItem;
|
|
+
|
|
+typedef struct
|
|
+{
|
|
+ const char *provider; /* label provider of this security label */
|
|
+ const char *label; /* security label for an object */
|
|
+ Oid classoid; /* object class (catalog OID) */
|
|
+ Oid objoid; /* object OID */
|
|
+ int objsubid; /* subobject (table column #) */
|
|
+} SecLabelItem;
|
|
+
|
|
+/* global decls */
|
|
+bool g_verbose; /* User wants verbose narration of our
|
|
+ * activities. */
|
|
+
|
|
+/* various user-settable parameters */
|
|
+static bool schemaOnly;
|
|
+static bool dataOnly;
|
|
+static int dumpSections; /* bitmask of chosen sections */
|
|
+static bool aclsSkip;
|
|
+static const char *lockWaitTimeout;
|
|
+
|
|
+/* subquery used to convert user ID (eg, datdba) to user name */
|
|
+static const char *username_subquery;
|
|
+
|
|
+/* obsolete as of 7.3: */
|
|
+static Oid g_last_builtin_oid; /* value of the last builtin oid */
|
|
+
|
|
+/*
|
|
+ * Object inclusion/exclusion lists
|
|
+ *
|
|
+ * The string lists record the patterns given by command-line switches,
|
|
+ * which we then convert to lists of OIDs of matching objects.
|
|
+ */
|
|
+static SimpleStringList schema_include_patterns = {NULL, NULL};
|
|
+static SimpleOidList schema_include_oids = {NULL, NULL};
|
|
+static SimpleStringList schema_exclude_patterns = {NULL, NULL};
|
|
+static SimpleOidList schema_exclude_oids = {NULL, NULL};
|
|
+
|
|
+static SimpleStringList table_include_patterns = {NULL, NULL};
|
|
+static SimpleOidList table_include_oids = {NULL, NULL};
|
|
+static SimpleStringList table_exclude_patterns = {NULL, NULL};
|
|
+static SimpleOidList table_exclude_oids = {NULL, NULL};
|
|
+static SimpleStringList tabledata_exclude_patterns = {NULL, NULL};
|
|
+static SimpleOidList tabledata_exclude_oids = {NULL, NULL};
|
|
+
|
|
+/* default, if no "inclusion" switches appear, is to dump everything */
|
|
+static bool include_everything = true;
|
|
+
|
|
+char g_opaque_type[10]; /* name for the opaque type */
|
|
+
|
|
+/* placeholders for the delimiters for comments */
|
|
+char g_comment_start[10];
|
|
+char g_comment_end[10];
|
|
+
|
|
+static const CatalogId nilCatalogId = {0, 0};
|
|
+
|
|
+/* flags for various command-line long options */
|
|
+static int binary_upgrade = 0;
|
|
+static int disable_dollar_quoting = 0;
|
|
+static int dump_inserts = 0;
|
|
+static int column_inserts = 0;
|
|
+static int if_exists = 0;
|
|
+static int no_security_labels = 0;
|
|
+static int no_synchronized_snapshots = 0;
|
|
+static int no_unlogged_table_data = 0;
|
|
+static int serializable_deferrable = 0;
|
|
+
|
|
+
|
|
+static void help(const char *progname);
|
|
+static void setup_connection(Archive *AH, const char *dumpencoding,
|
|
+ const char *dumpsnapshot, char *use_role);
|
|
+static ArchiveFormat parseArchiveFormat(const char *format, ArchiveMode *mode);
|
|
+static void expand_schema_name_patterns(Archive *fout,
|
|
+ SimpleStringList *patterns,
|
|
+ SimpleOidList *oids);
|
|
+static void expand_table_name_patterns(Archive *fout,
|
|
+ SimpleStringList *patterns,
|
|
+ SimpleOidList *oids);
|
|
+static NamespaceInfo *findNamespace(Archive *fout, Oid nsoid, Oid objoid);
|
|
+static void dumpTableData(Archive *fout, TableDataInfo *tdinfo);
|
|
+static void refreshMatViewData(Archive *fout, TableDataInfo *tdinfo);
|
|
+static void guessConstraintInheritance(TableInfo *tblinfo, int numTables);
|
|
+static void dumpComment(Archive *fout, const char *target,
|
|
+ const char *namespace, const char *owner,
|
|
+ CatalogId catalogId, int subid, DumpId dumpId);
|
|
+static int findComments(Archive *fout, Oid classoid, Oid objoid,
|
|
+ CommentItem **items);
|
|
+static int collectComments(Archive *fout, CommentItem **items);
|
|
+static void dumpSecLabel(Archive *fout, const char *target,
|
|
+ const char *namespace, const char *owner,
|
|
+ CatalogId catalogId, int subid, DumpId dumpId);
|
|
+static int findSecLabels(Archive *fout, Oid classoid, Oid objoid,
|
|
+ SecLabelItem **items);
|
|
+static int collectSecLabels(Archive *fout, SecLabelItem **items);
|
|
+static void dumpDumpableObject(Archive *fout, DumpableObject *dobj);
|
|
+static void dumpNamespace(Archive *fout, NamespaceInfo *nspinfo);
|
|
+static void dumpExtension(Archive *fout, ExtensionInfo *extinfo);
|
|
+static void dumpType(Archive *fout, TypeInfo *tyinfo);
|
|
+static void dumpBaseType(Archive *fout, TypeInfo *tyinfo);
|
|
+static void dumpEnumType(Archive *fout, TypeInfo *tyinfo);
|
|
+static void dumpRangeType(Archive *fout, TypeInfo *tyinfo);
|
|
+static void dumpUndefinedType(Archive *fout, TypeInfo *tyinfo);
|
|
+static void dumpDomain(Archive *fout, TypeInfo *tyinfo);
|
|
+static void dumpCompositeType(Archive *fout, TypeInfo *tyinfo);
|
|
+static void dumpCompositeTypeColComments(Archive *fout, TypeInfo *tyinfo);
|
|
+static void dumpShellType(Archive *fout, ShellTypeInfo *stinfo);
|
|
+static void dumpProcLang(Archive *fout, ProcLangInfo *plang);
|
|
+static void dumpFunc(Archive *fout, FuncInfo *finfo);
|
|
+static void dumpCast(Archive *fout, CastInfo *cast);
|
|
+static void dumpOpr(Archive *fout, OprInfo *oprinfo);
|
|
+static void dumpOpclass(Archive *fout, OpclassInfo *opcinfo);
|
|
+static void dumpOpfamily(Archive *fout, OpfamilyInfo *opfinfo);
|
|
+static void dumpCollation(Archive *fout, CollInfo *convinfo);
|
|
+static void dumpConversion(Archive *fout, ConvInfo *convinfo);
|
|
+static void dumpRule(Archive *fout, RuleInfo *rinfo);
|
|
+static void dumpAgg(Archive *fout, AggInfo *agginfo);
|
|
+static void dumpTrigger(Archive *fout, TriggerInfo *tginfo);
|
|
+static void dumpEventTrigger(Archive *fout, EventTriggerInfo *evtinfo);
|
|
+static void dumpTable(Archive *fout, TableInfo *tbinfo);
|
|
+static void dumpTableSchema(Archive *fout, TableInfo *tbinfo);
|
|
+static void dumpAttrDef(Archive *fout, AttrDefInfo *adinfo);
|
|
+static void dumpSequence(Archive *fout, TableInfo *tbinfo);
|
|
+static void dumpSequenceData(Archive *fout, TableDataInfo *tdinfo);
|
|
+static void dumpIndex(Archive *fout, IndxInfo *indxinfo);
|
|
+static void dumpConstraint(Archive *fout, ConstraintInfo *coninfo);
|
|
+static void dumpTableConstraintComment(Archive *fout, ConstraintInfo *coninfo);
|
|
+static void dumpTSParser(Archive *fout, TSParserInfo *prsinfo);
|
|
+static void dumpTSDictionary(Archive *fout, TSDictInfo *dictinfo);
|
|
+static void dumpTSTemplate(Archive *fout, TSTemplateInfo *tmplinfo);
|
|
+static void dumpTSConfig(Archive *fout, TSConfigInfo *cfginfo);
|
|
+static void dumpForeignDataWrapper(Archive *fout, FdwInfo *fdwinfo);
|
|
+static void dumpForeignServer(Archive *fout, ForeignServerInfo *srvinfo);
|
|
+static void dumpUserMappings(Archive *fout,
|
|
+ const char *servername, const char *namespace,
|
|
+ const char *owner, CatalogId catalogId, DumpId dumpId);
|
|
+static void dumpDefaultACL(Archive *fout, DefaultACLInfo *daclinfo);
|
|
+
|
|
+static void dumpACL(Archive *fout, CatalogId objCatId, DumpId objDumpId,
|
|
+ const char *type, const char *name, const char *subname,
|
|
+ const char *tag, const char *nspname, const char *owner,
|
|
+ const char *acls);
|
|
+
|
|
+static void getDependencies(Archive *fout);
|
|
+static void BuildArchiveDependencies(Archive *fout);
|
|
+static void findDumpableDependencies(ArchiveHandle *AH, DumpableObject *dobj,
|
|
+ DumpId **dependencies, int *nDeps, int *allocDeps);
|
|
+
|
|
+static DumpableObject *createBoundaryObjects(void);
|
|
+static void addBoundaryDependencies(DumpableObject **dobjs, int numObjs,
|
|
+ DumpableObject *boundaryObjs);
|
|
+
|
|
+static void getDomainConstraints(Archive *fout, TypeInfo *tyinfo);
|
|
+static void getTableData(TableInfo *tblinfo, int numTables, bool oids);
|
|
+static void makeTableDataInfo(TableInfo *tbinfo, bool oids);
|
|
+static void buildMatViewRefreshDependencies(Archive *fout);
|
|
+static void getTableDataFKConstraints(void);
|
|
+static char *format_function_arguments(FuncInfo *finfo, char *funcargs,
|
|
+ bool is_agg);
|
|
+static char *format_function_arguments_old(Archive *fout,
|
|
+ FuncInfo *finfo, int nallargs,
|
|
+ char **allargtypes,
|
|
+ char **argmodes,
|
|
+ char **argnames);
|
|
+static char *format_function_signature(Archive *fout,
|
|
+ FuncInfo *finfo, bool honor_quotes);
|
|
+static char *convertRegProcReference(Archive *fout,
|
|
+ const char *proc);
|
|
+static char *convertOperatorReference(Archive *fout, const char *opr);
|
|
+static const char *convertTSFunction(Archive *fout, Oid funcOid);
|
|
+static Oid findLastBuiltinOid_V71(Archive *fout, const char *);
|
|
+static Oid findLastBuiltinOid_V70(Archive *fout);
|
|
+static void selectSourceSchema(Archive *fout, const char *schemaName);
|
|
+static char *getFormattedTypeName(Archive *fout, Oid oid, OidOptions opts);
|
|
+static char *myFormatType(const char *typname, int32 typmod);
|
|
+static void getBlobs(Archive *fout);
|
|
+static void dumpBlob(Archive *fout, BlobInfo *binfo);
|
|
+static int dumpBlobs(Archive *fout, void *arg);
|
|
+static void dumpDatabase(Archive *AH);
|
|
+static void dumpEncoding(Archive *AH);
|
|
+static void dumpStdStrings(Archive *AH);
|
|
+static void binary_upgrade_set_type_oids_by_type_oid(Archive *fout,
|
|
+ PQExpBuffer upgrade_buffer, Oid pg_type_oid);
|
|
+static bool binary_upgrade_set_type_oids_by_rel_oid(Archive *fout,
|
|
+ PQExpBuffer upgrade_buffer, Oid pg_rel_oid);
|
|
+static void binary_upgrade_set_pg_class_oids(Archive *fout,
|
|
+ PQExpBuffer upgrade_buffer,
|
|
+ Oid pg_class_oid, bool is_index);
|
|
+static void binary_upgrade_extension_member(PQExpBuffer upgrade_buffer,
|
|
+ DumpableObject *dobj,
|
|
+ const char *objlabel);
|
|
+static const char *getAttrName(int attrnum, TableInfo *tblInfo);
|
|
+static const char *fmtCopyColumnList(const TableInfo *ti, PQExpBuffer buffer);
|
|
+static char *get_synchronized_snapshot(Archive *fout);
|
|
+static PGresult *ExecuteSqlQueryForSingleRow(Archive *fout, char *query);
|
|
+static void setupDumpWorker(Archive *AHX, RestoreOptions *ropt);
|
|
+
|
|
+
|
|
+int
|
|
+main(int argc, char **argv)
|
|
+{
|
|
+ int c;
|
|
+ const char *filename = NULL;
|
|
+ const char *format = "p";
|
|
+ const char *dbname = NULL;
|
|
+ const char *pghost = NULL;
|
|
+ const char *pgport = NULL;
|
|
+ const char *username = NULL;
|
|
+ const char *dumpencoding = NULL;
|
|
+ const char *dumpsnapshot = NULL;
|
|
+ bool oids = false;
|
|
+ TableInfo *tblinfo;
|
|
+ int numTables;
|
|
+ DumpableObject **dobjs;
|
|
+ int numObjs;
|
|
+ DumpableObject *boundaryObjs;
|
|
+ int i;
|
|
+ int numWorkers = 1;
|
|
+ enum trivalue prompt_password = TRI_DEFAULT;
|
|
+ int compressLevel = -1;
|
|
+ int plainText = 0;
|
|
+ int outputClean = 0;
|
|
+ int outputCreateDB = 0;
|
|
+ bool outputBlobs = false;
|
|
+ int outputNoOwner = 0;
|
|
+ char *outputSuperuser = NULL;
|
|
+ char *use_role = NULL;
|
|
+ int optindex;
|
|
+ RestoreOptions *ropt;
|
|
+ ArchiveFormat archiveFormat = archUnknown;
|
|
+ ArchiveMode archiveMode;
|
|
+ Archive *fout; /* the script file */
|
|
+
|
|
+ static int disable_triggers = 0;
|
|
+ static int outputNoTablespaces = 0;
|
|
+ static int use_setsessauth = 0;
|
|
+
|
|
+ static struct option long_options[] = {
|
|
+ {"data-only", no_argument, NULL, 'a'},
|
|
+ {"blobs", no_argument, NULL, 'b'},
|
|
+ {"clean", no_argument, NULL, 'c'},
|
|
+ {"create", no_argument, NULL, 'C'},
|
|
+ {"dbname", required_argument, NULL, 'd'},
|
|
+ {"file", required_argument, NULL, 'f'},
|
|
+ {"format", required_argument, NULL, 'F'},
|
|
+ {"host", required_argument, NULL, 'h'},
|
|
+ {"ignore-version", no_argument, NULL, 'i'},
|
|
+ {"jobs", 1, NULL, 'j'},
|
|
+ {"no-reconnect", no_argument, NULL, 'R'},
|
|
+ {"oids", no_argument, NULL, 'o'},
|
|
+ {"no-owner", no_argument, NULL, 'O'},
|
|
+ {"port", required_argument, NULL, 'p'},
|
|
+ {"schema", required_argument, NULL, 'n'},
|
|
+ {"exclude-schema", required_argument, NULL, 'N'},
|
|
+ {"schema-only", no_argument, NULL, 's'},
|
|
+ {"superuser", required_argument, NULL, 'S'},
|
|
+ {"table", required_argument, NULL, 't'},
|
|
+ {"exclude-table", required_argument, NULL, 'T'},
|
|
+ {"no-password", no_argument, NULL, 'w'},
|
|
+ {"password", no_argument, NULL, 'W'},
|
|
+ {"username", required_argument, NULL, 'U'},
|
|
+ {"verbose", no_argument, NULL, 'v'},
|
|
+ {"no-privileges", no_argument, NULL, 'x'},
|
|
+ {"no-acl", no_argument, NULL, 'x'},
|
|
+ {"compress", required_argument, NULL, 'Z'},
|
|
+ {"encoding", required_argument, NULL, 'E'},
|
|
+ {"help", no_argument, NULL, '?'},
|
|
+ {"version", no_argument, NULL, 'V'},
|
|
+
|
|
+ /*
|
|
+ * the following options don't have an equivalent short option letter
|
|
+ */
|
|
+ {"attribute-inserts", no_argument, &column_inserts, 1},
|
|
+ {"binary-upgrade", no_argument, &binary_upgrade, 1},
|
|
+ {"column-inserts", no_argument, &column_inserts, 1},
|
|
+ {"disable-dollar-quoting", no_argument, &disable_dollar_quoting, 1},
|
|
+ {"disable-triggers", no_argument, &disable_triggers, 1},
|
|
+ {"exclude-table-data", required_argument, NULL, 4},
|
|
+ {"if-exists", no_argument, &if_exists, 1},
|
|
+ {"inserts", no_argument, &dump_inserts, 1},
|
|
+ {"lock-wait-timeout", required_argument, NULL, 2},
|
|
+ {"no-tablespaces", no_argument, &outputNoTablespaces, 1},
|
|
+ {"quote-all-identifiers", no_argument, "e_all_identifiers, 1},
|
|
+ {"role", required_argument, NULL, 3},
|
|
+ {"section", required_argument, NULL, 5},
|
|
+ {"serializable-deferrable", no_argument, &serializable_deferrable, 1},
|
|
+ {"snapshot", required_argument, NULL, 6},
|
|
+ {"use-set-session-authorization", no_argument, &use_setsessauth, 1},
|
|
+ {"no-security-labels", no_argument, &no_security_labels, 1},
|
|
+ {"no-synchronized-snapshots", no_argument, &no_synchronized_snapshots, 1},
|
|
+ {"no-unlogged-table-data", no_argument, &no_unlogged_table_data, 1},
|
|
+
|
|
+ {NULL, 0, NULL, 0}
|
|
+ };
|
|
+
|
|
+ set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("pg_dump"));
|
|
+
|
|
+ /*
|
|
+ * Initialize what we need for parallel execution, especially for thread
|
|
+ * support on Windows.
|
|
+ */
|
|
+ init_parallel_dump_utils();
|
|
+
|
|
+ g_verbose = false;
|
|
+
|
|
+ strcpy(g_comment_start, "-- ");
|
|
+ g_comment_end[0] = '\0';
|
|
+ strcpy(g_opaque_type, "opaque");
|
|
+
|
|
+ dataOnly = schemaOnly = false;
|
|
+ dumpSections = DUMP_UNSECTIONED;
|
|
+ lockWaitTimeout = NULL;
|
|
+
|
|
+ progname = get_progname(argv[0]);
|
|
+
|
|
+ /* Set default options based on progname */
|
|
+ if (strcmp(progname, "pg_backup") == 0)
|
|
+ format = "c";
|
|
+
|
|
+ if (argc > 1)
|
|
+ {
|
|
+ if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0)
|
|
+ {
|
|
+ help(progname);
|
|
+ exit_nicely(0);
|
|
+ }
|
|
+ if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0)
|
|
+ {
|
|
+ puts("pg_dump (PostgreSQL) " PG_VERSION);
|
|
+ exit_nicely(0);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ while ((c = getopt_long(argc, argv, "abcCd:E:f:F:h:ij:n:N:oOp:RsS:t:T:U:vwWxZ:",
|
|
+ long_options, &optindex)) != -1)
|
|
+ {
|
|
+ switch (c)
|
|
+ {
|
|
+ case 'a': /* Dump data only */
|
|
+ dataOnly = true;
|
|
+ break;
|
|
+
|
|
+ case 'b': /* Dump blobs */
|
|
+ outputBlobs = true;
|
|
+ break;
|
|
+
|
|
+ case 'c': /* clean (i.e., drop) schema prior to create */
|
|
+ outputClean = 1;
|
|
+ break;
|
|
+
|
|
+ case 'C': /* Create DB */
|
|
+ outputCreateDB = 1;
|
|
+ break;
|
|
+
|
|
+ case 'd': /* database name */
|
|
+ dbname = pg_strdup(optarg);
|
|
+ break;
|
|
+
|
|
+ case 'E': /* Dump encoding */
|
|
+ dumpencoding = pg_strdup(optarg);
|
|
+ break;
|
|
+
|
|
+ case 'f':
|
|
+ filename = pg_strdup(optarg);
|
|
+ break;
|
|
+
|
|
+ case 'F':
|
|
+ format = pg_strdup(optarg);
|
|
+ break;
|
|
+
|
|
+ case 'h': /* server host */
|
|
+ pghost = pg_strdup(optarg);
|
|
+ break;
|
|
+
|
|
+ case 'i':
|
|
+ /* ignored, deprecated option */
|
|
+ break;
|
|
+
|
|
+ case 'j': /* number of dump jobs */
|
|
+ numWorkers = atoi(optarg);
|
|
+ break;
|
|
+
|
|
+ case 'n': /* include schema(s) */
|
|
+ simple_string_list_append(&schema_include_patterns, optarg);
|
|
+ include_everything = false;
|
|
+ break;
|
|
+
|
|
+ case 'N': /* exclude schema(s) */
|
|
+ simple_string_list_append(&schema_exclude_patterns, optarg);
|
|
+ break;
|
|
+
|
|
+ case 'o': /* Dump oids */
|
|
+ oids = true;
|
|
+ break;
|
|
+
|
|
+ case 'O': /* Don't reconnect to match owner */
|
|
+ outputNoOwner = 1;
|
|
+ break;
|
|
+
|
|
+ case 'p': /* server port */
|
|
+ pgport = pg_strdup(optarg);
|
|
+ break;
|
|
+
|
|
+ case 'R':
|
|
+ /* no-op, still accepted for backwards compatibility */
|
|
+ break;
|
|
+
|
|
+ case 's': /* dump schema only */
|
|
+ schemaOnly = true;
|
|
+ break;
|
|
+
|
|
+ case 'S': /* Username for superuser in plain text output */
|
|
+ outputSuperuser = pg_strdup(optarg);
|
|
+ break;
|
|
+
|
|
+ case 't': /* include table(s) */
|
|
+ simple_string_list_append(&table_include_patterns, optarg);
|
|
+ include_everything = false;
|
|
+ break;
|
|
+
|
|
+ case 'T': /* exclude table(s) */
|
|
+ simple_string_list_append(&table_exclude_patterns, optarg);
|
|
+ break;
|
|
+
|
|
+ case 'U':
|
|
+ username = pg_strdup(optarg);
|
|
+ break;
|
|
+
|
|
+ case 'v': /* verbose */
|
|
+ g_verbose = true;
|
|
+ break;
|
|
+
|
|
+ case 'w':
|
|
+ prompt_password = TRI_NO;
|
|
+ break;
|
|
+
|
|
+ case 'W':
|
|
+ prompt_password = TRI_YES;
|
|
+ break;
|
|
+
|
|
+ case 'x': /* skip ACL dump */
|
|
+ aclsSkip = true;
|
|
+ break;
|
|
+
|
|
+ case 'Z': /* Compression Level */
|
|
+ compressLevel = atoi(optarg);
|
|
+ if (compressLevel < 0 || compressLevel > 9)
|
|
+ {
|
|
+ write_msg(NULL, "compression level must be in range 0..9\n");
|
|
+ exit_nicely(1);
|
|
+ }
|
|
+ break;
|
|
+
|
|
+ case 0:
|
|
+ /* This covers the long options. */
|
|
+ break;
|
|
+
|
|
+ case 2: /* lock-wait-timeout */
|
|
+ lockWaitTimeout = pg_strdup(optarg);
|
|
+ break;
|
|
+
|
|
+ case 3: /* SET ROLE */
|
|
+ use_role = pg_strdup(optarg);
|
|
+ break;
|
|
+
|
|
+ case 4: /* exclude table(s) data */
|
|
+ simple_string_list_append(&tabledata_exclude_patterns, optarg);
|
|
+ break;
|
|
+
|
|
+ case 5: /* section */
|
|
+ set_dump_section(optarg, &dumpSections);
|
|
+ break;
|
|
+
|
|
+ case 6: /* snapshot */
|
|
+ dumpsnapshot = pg_strdup(optarg);
|
|
+ break;
|
|
+
|
|
+ default:
|
|
+ fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
|
|
+ exit_nicely(1);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Non-option argument specifies database name as long as it wasn't
|
|
+ * already specified with -d / --dbname
|
|
+ */
|
|
+ if (optind < argc && dbname == NULL)
|
|
+ dbname = argv[optind++];
|
|
+
|
|
+ /* Complain if any arguments remain */
|
|
+ if (optind < argc)
|
|
+ {
|
|
+ fprintf(stderr, _("%s: too many command-line arguments (first is \"%s\")\n"),
|
|
+ progname, argv[optind]);
|
|
+ fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
|
|
+ progname);
|
|
+ exit_nicely(1);
|
|
+ }
|
|
+
|
|
+ /* --column-inserts implies --inserts */
|
|
+ if (column_inserts)
|
|
+ dump_inserts = 1;
|
|
+
|
|
+ if (dataOnly && schemaOnly)
|
|
+ {
|
|
+ write_msg(NULL, "options -s/--schema-only and -a/--data-only cannot be used together\n");
|
|
+ exit_nicely(1);
|
|
+ }
|
|
+
|
|
+ if (dataOnly && outputClean)
|
|
+ {
|
|
+ write_msg(NULL, "options -c/--clean and -a/--data-only cannot be used together\n");
|
|
+ exit_nicely(1);
|
|
+ }
|
|
+
|
|
+ if (dump_inserts && oids)
|
|
+ {
|
|
+ write_msg(NULL, "options --inserts/--column-inserts and -o/--oids cannot be used together\n");
|
|
+ write_msg(NULL, "(The INSERT command cannot set OIDs.)\n");
|
|
+ exit_nicely(1);
|
|
+ }
|
|
+
|
|
+ if (if_exists && !outputClean)
|
|
+ exit_horribly(NULL, "option --if-exists requires option -c/--clean\n");
|
|
+
|
|
+ /* Identify archive format to emit */
|
|
+ archiveFormat = parseArchiveFormat(format, &archiveMode);
|
|
+
|
|
+ /* archiveFormat specific setup */
|
|
+ if (archiveFormat == archNull)
|
|
+ plainText = 1;
|
|
+
|
|
+ /* Custom and directory formats are compressed by default, others not */
|
|
+ if (compressLevel == -1)
|
|
+ {
|
|
+ if (archiveFormat == archCustom || archiveFormat == archDirectory)
|
|
+ compressLevel = Z_DEFAULT_COMPRESSION;
|
|
+ else
|
|
+ compressLevel = 0;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * On Windows we can only have at most MAXIMUM_WAIT_OBJECTS (= 64 usually)
|
|
+ * parallel jobs because that's the maximum limit for the
|
|
+ * WaitForMultipleObjects() call.
|
|
+ */
|
|
+ if (numWorkers <= 0
|
|
+#ifdef WIN32
|
|
+ || numWorkers > MAXIMUM_WAIT_OBJECTS
|
|
+#endif
|
|
+ )
|
|
+ exit_horribly(NULL, "%s: invalid number of parallel jobs\n", progname);
|
|
+
|
|
+ /* Parallel backup only in the directory archive format so far */
|
|
+ if (archiveFormat != archDirectory && numWorkers > 1)
|
|
+ exit_horribly(NULL, "parallel backup only supported by the directory format\n");
|
|
+
|
|
+ /* Open the output file */
|
|
+ fout = CreateArchive(filename, archiveFormat, compressLevel, archiveMode,
|
|
+ setupDumpWorker);
|
|
+
|
|
+ /* Register the cleanup hook */
|
|
+ on_exit_close_archive(fout);
|
|
+
|
|
+ if (fout == NULL)
|
|
+ exit_horribly(NULL, "could not open output file \"%s\" for writing\n", filename);
|
|
+
|
|
+ /* Let the archiver know how noisy to be */
|
|
+ fout->verbose = g_verbose;
|
|
+
|
|
+ /*
|
|
+ * We allow the server to be back to 7.0, and up to any minor release of
|
|
+ * our own major version. (See also version check in pg_dumpall.c.)
|
|
+ */
|
|
+ fout->minRemoteVersion = 70000;
|
|
+ fout->maxRemoteVersion = (PG_VERSION_NUM / 100) * 100 + 99;
|
|
+
|
|
+ fout->numWorkers = numWorkers;
|
|
+
|
|
+ /*
|
|
+ * Open the database using the Archiver, so it knows about it. Errors mean
|
|
+ * death.
|
|
+ */
|
|
+ ConnectDatabase(fout, dbname, pghost, pgport, username, prompt_password);
|
|
+ setup_connection(fout, dumpencoding, dumpsnapshot, use_role);
|
|
+
|
|
+ /*
|
|
+ * Disable security label support if server version < v9.1.x (prevents
|
|
+ * access to nonexistent pg_seclabel catalog)
|
|
+ */
|
|
+ if (fout->remoteVersion < 90100)
|
|
+ no_security_labels = 1;
|
|
+
|
|
+ /*
|
|
+ * When running against 9.0 or later, check if we are in recovery mode,
|
|
+ * which means we are on a hot standby.
|
|
+ */
|
|
+ if (fout->remoteVersion >= 90000)
|
|
+ {
|
|
+ PGresult *res = ExecuteSqlQueryForSingleRow(fout, "SELECT pg_catalog.pg_is_in_recovery()");
|
|
+
|
|
+ if (strcmp(PQgetvalue(res, 0, 0), "t") == 0)
|
|
+ {
|
|
+ /*
|
|
+ * On hot standby slaves, never try to dump unlogged table data,
|
|
+ * since it will just throw an error.
|
|
+ */
|
|
+ no_unlogged_table_data = true;
|
|
+ }
|
|
+ PQclear(res);
|
|
+ }
|
|
+
|
|
+ /* Select the appropriate subquery to convert user IDs to names */
|
|
+ if (fout->remoteVersion >= 80100)
|
|
+ username_subquery = "SELECT rolname FROM pg_catalog.pg_roles WHERE oid =";
|
|
+ else if (fout->remoteVersion >= 70300)
|
|
+ username_subquery = "SELECT usename FROM pg_catalog.pg_user WHERE usesysid =";
|
|
+ else
|
|
+ username_subquery = "SELECT usename FROM pg_user WHERE usesysid =";
|
|
+
|
|
+ /* check the version for the synchronized snapshots feature */
|
|
+ if (numWorkers > 1 && fout->remoteVersion < 90200
|
|
+ && !no_synchronized_snapshots)
|
|
+ exit_horribly(NULL,
|
|
+ "Synchronized snapshots are not supported by this server version.\n"
|
|
+ "Run with --no-synchronized-snapshots instead if you do not need\n"
|
|
+ "synchronized snapshots.\n");
|
|
+
|
|
+ /* check the version when a snapshot is explicitly specified by user */
|
|
+ if (dumpsnapshot && fout->remoteVersion < 90200)
|
|
+ exit_horribly(NULL,
|
|
+ "Exported snapshots are not supported by this server version.\n");
|
|
+
|
|
+ /* Find the last built-in OID, if needed */
|
|
+ if (fout->remoteVersion < 70300)
|
|
+ {
|
|
+ if (fout->remoteVersion >= 70100)
|
|
+ g_last_builtin_oid = findLastBuiltinOid_V71(fout,
|
|
+ PQdb(GetConnection(fout)));
|
|
+ else
|
|
+ g_last_builtin_oid = findLastBuiltinOid_V70(fout);
|
|
+ if (g_verbose)
|
|
+ write_msg(NULL, "last built-in OID is %u\n", g_last_builtin_oid);
|
|
+ }
|
|
+
|
|
+ /* Expand schema selection patterns into OID lists */
|
|
+ if (schema_include_patterns.head != NULL)
|
|
+ {
|
|
+ expand_schema_name_patterns(fout, &schema_include_patterns,
|
|
+ &schema_include_oids);
|
|
+ if (schema_include_oids.head == NULL)
|
|
+ exit_horribly(NULL, "No matching schemas were found\n");
|
|
+ }
|
|
+ expand_schema_name_patterns(fout, &schema_exclude_patterns,
|
|
+ &schema_exclude_oids);
|
|
+ /* non-matching exclusion patterns aren't an error */
|
|
+
|
|
+ /* Expand table selection patterns into OID lists */
|
|
+ if (table_include_patterns.head != NULL)
|
|
+ {
|
|
+ expand_table_name_patterns(fout, &table_include_patterns,
|
|
+ &table_include_oids);
|
|
+ if (table_include_oids.head == NULL)
|
|
+ exit_horribly(NULL, "No matching tables were found\n");
|
|
+ }
|
|
+ expand_table_name_patterns(fout, &table_exclude_patterns,
|
|
+ &table_exclude_oids);
|
|
+
|
|
+ expand_table_name_patterns(fout, &tabledata_exclude_patterns,
|
|
+ &tabledata_exclude_oids);
|
|
+
|
|
+ /* non-matching exclusion patterns aren't an error */
|
|
+
|
|
+ /*
|
|
+ * Dumping blobs is now default unless we saw an inclusion switch or -s
|
|
+ * ... but even if we did see one of these, -b turns it back on.
|
|
+ */
|
|
+ if (include_everything && !schemaOnly)
|
|
+ outputBlobs = true;
|
|
+
|
|
+ /*
|
|
+ * Now scan the database and create DumpableObject structs for all the
|
|
+ * objects we intend to dump.
|
|
+ */
|
|
+ tblinfo = getSchemaData(fout, &numTables);
|
|
+
|
|
+ if (fout->remoteVersion < 80400)
|
|
+ guessConstraintInheritance(tblinfo, numTables);
|
|
+
|
|
+ if (!schemaOnly)
|
|
+ {
|
|
+ getTableData(tblinfo, numTables, oids);
|
|
+ buildMatViewRefreshDependencies(fout);
|
|
+ if (dataOnly)
|
|
+ getTableDataFKConstraints();
|
|
+ }
|
|
+
|
|
+ if (outputBlobs)
|
|
+ getBlobs(fout);
|
|
+
|
|
+ /*
|
|
+ * Collect dependency data to assist in ordering the objects.
|
|
+ */
|
|
+ getDependencies(fout);
|
|
+
|
|
+ /* Lastly, create dummy objects to represent the section boundaries */
|
|
+ boundaryObjs = createBoundaryObjects();
|
|
+
|
|
+ /* Get pointers to all the known DumpableObjects */
|
|
+ getDumpableObjects(&dobjs, &numObjs);
|
|
+
|
|
+ /*
|
|
+ * Add dummy dependencies to enforce the dump section ordering.
|
|
+ */
|
|
+ addBoundaryDependencies(dobjs, numObjs, boundaryObjs);
|
|
+
|
|
+ /*
|
|
+ * Sort the objects into a safe dump order (no forward references).
|
|
+ *
|
|
+ * In 7.3 or later, we can rely on dependency information to help us
|
|
+ * determine a safe order, so the initial sort is mostly for cosmetic
|
|
+ * purposes: we sort by name to ensure that logically identical schemas
|
|
+ * will dump identically. Before 7.3 we don't have dependencies and we
|
|
+ * use OID ordering as an (unreliable) guide to creation order.
|
|
+ */
|
|
+ if (fout->remoteVersion >= 70300)
|
|
+ sortDumpableObjectsByTypeName(dobjs, numObjs);
|
|
+ else
|
|
+ sortDumpableObjectsByTypeOid(dobjs, numObjs);
|
|
+
|
|
+ /* If we do a parallel dump, we want the largest tables to go first */
|
|
+ if (archiveFormat == archDirectory && numWorkers > 1)
|
|
+ sortDataAndIndexObjectsBySize(dobjs, numObjs);
|
|
+
|
|
+ sortDumpableObjects(dobjs, numObjs,
|
|
+ boundaryObjs[0].dumpId, boundaryObjs[1].dumpId);
|
|
+
|
|
+ /*
|
|
+ * Create archive TOC entries for all the objects to be dumped, in a safe
|
|
+ * order.
|
|
+ */
|
|
+
|
|
+ /* First the special ENCODING and STDSTRINGS entries. */
|
|
+ dumpEncoding(fout);
|
|
+ dumpStdStrings(fout);
|
|
+
|
|
+ /* The database item is always next, unless we don't want it at all */
|
|
+ if (include_everything && !dataOnly)
|
|
+ dumpDatabase(fout);
|
|
+
|
|
+ /* Now the rearrangeable objects. */
|
|
+ for (i = 0; i < numObjs; i++)
|
|
+ dumpDumpableObject(fout, dobjs[i]);
|
|
+
|
|
+ /*
|
|
+ * Set up options info to ensure we dump what we want.
|
|
+ */
|
|
+ ropt = NewRestoreOptions();
|
|
+ ropt->filename = filename;
|
|
+ ropt->dropSchema = outputClean;
|
|
+ ropt->dataOnly = dataOnly;
|
|
+ ropt->schemaOnly = schemaOnly;
|
|
+ ropt->if_exists = if_exists;
|
|
+ ropt->dumpSections = dumpSections;
|
|
+ ropt->aclsSkip = aclsSkip;
|
|
+ ropt->superuser = outputSuperuser;
|
|
+ ropt->createDB = outputCreateDB;
|
|
+ ropt->noOwner = outputNoOwner;
|
|
+ ropt->noTablespace = outputNoTablespaces;
|
|
+ ropt->disable_triggers = disable_triggers;
|
|
+ ropt->use_setsessauth = use_setsessauth;
|
|
+
|
|
+ if (compressLevel == -1)
|
|
+ ropt->compression = 0;
|
|
+ else
|
|
+ ropt->compression = compressLevel;
|
|
+
|
|
+ ropt->suppressDumpWarnings = true; /* We've already shown them */
|
|
+
|
|
+ SetArchiveRestoreOptions(fout, ropt);
|
|
+
|
|
+ /*
|
|
+ * The archive's TOC entries are now marked as to which ones will actually
|
|
+ * be output, so we can set up their dependency lists properly. This isn't
|
|
+ * necessary for plain-text output, though.
|
|
+ */
|
|
+ if (!plainText)
|
|
+ BuildArchiveDependencies(fout);
|
|
+
|
|
+ /*
|
|
+ * And finally we can do the actual output.
|
|
+ *
|
|
+ * Note: for non-plain-text output formats, the output file is written
|
|
+ * inside CloseArchive(). This is, um, bizarre; but not worth changing
|
|
+ * right now.
|
|
+ */
|
|
+ if (plainText)
|
|
+ RestoreArchive(fout);
|
|
+
|
|
+ CloseArchive(fout);
|
|
+
|
|
+ exit_nicely(0);
|
|
+}
|
|
+
|
|
+
|
|
+static void
|
|
+help(const char *progname)
|
|
+{
|
|
+ printf(_("%s dumps a database as a text file or to other formats.\n\n"), progname);
|
|
+ printf(_("Usage:\n"));
|
|
+ printf(_(" %s [OPTION]... [DBNAME]\n"), progname);
|
|
+
|
|
+ printf(_("\nGeneral options:\n"));
|
|
+ printf(_(" -f, --file=FILENAME output file or directory name\n"));
|
|
+ printf(_(" -F, --format=c|d|t|p output file format (custom, directory, tar,\n"
|
|
+ " plain text (default))\n"));
|
|
+ printf(_(" -j, --jobs=NUM use this many parallel jobs to dump\n"));
|
|
+ printf(_(" -v, --verbose verbose mode\n"));
|
|
+ printf(_(" -V, --version output version information, then exit\n"));
|
|
+ printf(_(" -Z, --compress=0-9 compression level for compressed formats\n"));
|
|
+ printf(_(" --lock-wait-timeout=TIMEOUT fail after waiting TIMEOUT for a table lock\n"));
|
|
+ printf(_(" -?, --help show this help, then exit\n"));
|
|
+
|
|
+ printf(_("\nOptions controlling the output content:\n"));
|
|
+ printf(_(" -a, --data-only dump only the data, not the schema\n"));
|
|
+ printf(_(" -b, --blobs include large objects in dump\n"));
|
|
+ printf(_(" -c, --clean clean (drop) database objects before recreating\n"));
|
|
+ printf(_(" -C, --create include commands to create database in dump\n"));
|
|
+ printf(_(" -E, --encoding=ENCODING dump the data in encoding ENCODING\n"));
|
|
+ printf(_(" -n, --schema=SCHEMA dump the named schema(s) only\n"));
|
|
+ printf(_(" -N, --exclude-schema=SCHEMA do NOT dump the named schema(s)\n"));
|
|
+ printf(_(" -o, --oids include OIDs in dump\n"));
|
|
+ printf(_(" -O, --no-owner skip restoration of object ownership in\n"
|
|
+ " plain-text format\n"));
|
|
+ printf(_(" -s, --schema-only dump only the schema, no data\n"));
|
|
+ printf(_(" -S, --superuser=NAME superuser user name to use in plain-text format\n"));
|
|
+ printf(_(" -t, --table=TABLE dump the named table(s) only\n"));
|
|
+ printf(_(" -T, --exclude-table=TABLE do NOT dump the named table(s)\n"));
|
|
+ printf(_(" -x, --no-privileges do not dump privileges (grant/revoke)\n"));
|
|
+ printf(_(" --binary-upgrade for use by upgrade utilities only\n"));
|
|
+ printf(_(" --column-inserts dump data as INSERT commands with column names\n"));
|
|
+ printf(_(" --disable-dollar-quoting disable dollar quoting, use SQL standard quoting\n"));
|
|
+ printf(_(" --disable-triggers disable triggers during data-only restore\n"));
|
|
+ printf(_(" --exclude-table-data=TABLE do NOT dump data for the named table(s)\n"));
|
|
+ printf(_(" --if-exists use IF EXISTS when dropping objects\n"));
|
|
+ printf(_(" --inserts dump data as INSERT commands, rather than COPY\n"));
|
|
+ printf(_(" --no-security-labels do not dump security label assignments\n"));
|
|
+ printf(_(" --no-synchronized-snapshots do not use synchronized snapshots in parallel jobs\n"));
|
|
+ printf(_(" --no-tablespaces do not dump tablespace assignments\n"));
|
|
+ printf(_(" --no-unlogged-table-data do not dump unlogged table data\n"));
|
|
+ printf(_(" --quote-all-identifiers quote all identifiers, even if not key words\n"));
|
|
+ printf(_(" --section=SECTION dump named section (pre-data, data, or post-data)\n"));
|
|
+ printf(_(" --serializable-deferrable wait until the dump can run without anomalies\n"));
|
|
+ printf(_(" --snapshot=SNAPSHOT use given synchronous snapshot for the dump\n"));
|
|
+ printf(_(" --use-set-session-authorization\n"
|
|
+ " use SET SESSION AUTHORIZATION commands instead of\n"
|
|
+ " ALTER OWNER commands to set ownership\n"));
|
|
+
|
|
+ printf(_("\nConnection options:\n"));
|
|
+ printf(_(" -d, --dbname=DBNAME database to dump\n"));
|
|
+ printf(_(" -h, --host=HOSTNAME database server host or socket directory\n"));
|
|
+ printf(_(" -p, --port=PORT database server port number\n"));
|
|
+ printf(_(" -U, --username=NAME connect as specified database user\n"));
|
|
+ printf(_(" -w, --no-password never prompt for password\n"));
|
|
+ printf(_(" -W, --password force password prompt (should happen automatically)\n"));
|
|
+ printf(_(" --role=ROLENAME do SET ROLE before dump\n"));
|
|
+
|
|
+ printf(_("\nIf no database name is supplied, then the PGDATABASE environment\n"
|
|
+ "variable value is used.\n\n"));
|
|
+ printf(_("Report bugs to <pgsql-bugs@postgresql.org>.\n"));
|
|
+}
|
|
+
|
|
+static void
|
|
+setup_connection(Archive *AH, const char *dumpencoding,
|
|
+ const char *dumpsnapshot, char *use_role)
|
|
+{
|
|
+ PGconn *conn = GetConnection(AH);
|
|
+ const char *std_strings;
|
|
+
|
|
+ /*
|
|
+ * Set the client encoding if requested. If dumpencoding == NULL then
|
|
+ * either it hasn't been requested or we're a cloned connection and then
|
|
+ * this has already been set in CloneArchive according to the original
|
|
+ * connection encoding.
|
|
+ */
|
|
+ if (dumpencoding)
|
|
+ {
|
|
+ if (PQsetClientEncoding(conn, dumpencoding) < 0)
|
|
+ exit_horribly(NULL, "invalid client encoding \"%s\" specified\n",
|
|
+ dumpencoding);
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Get the active encoding and the standard_conforming_strings setting, so
|
|
+ * we know how to escape strings.
|
|
+ */
|
|
+ AH->encoding = PQclientEncoding(conn);
|
|
+
|
|
+ std_strings = PQparameterStatus(conn, "standard_conforming_strings");
|
|
+ AH->std_strings = (std_strings && strcmp(std_strings, "on") == 0);
|
|
+
|
|
+ /* Set the role if requested */
|
|
+ if (!use_role && AH->use_role)
|
|
+ use_role = AH->use_role;
|
|
+
|
|
+ /* Set the role if requested */
|
|
+ if (use_role && AH->remoteVersion >= 80100)
|
|
+ {
|
|
+ PQExpBuffer query = createPQExpBuffer();
|
|
+
|
|
+ appendPQExpBuffer(query, "SET ROLE %s", fmtId(use_role));
|
|
+ ExecuteSqlStatement(AH, query->data);
|
|
+ destroyPQExpBuffer(query);
|
|
+
|
|
+ /* save this for later use on parallel connections */
|
|
+ if (!AH->use_role)
|
|
+ AH->use_role = strdup(use_role);
|
|
+ }
|
|
+
|
|
+ /* Set the datestyle to ISO to ensure the dump's portability */
|
|
+ ExecuteSqlStatement(AH, "SET DATESTYLE = ISO");
|
|
+
|
|
+ /* Likewise, avoid using sql_standard intervalstyle */
|
|
+ if (AH->remoteVersion >= 80400)
|
|
+ ExecuteSqlStatement(AH, "SET INTERVALSTYLE = POSTGRES");
|
|
+
|
|
+ /*
|
|
+ * If supported, set extra_float_digits so that we can dump float data
|
|
+ * exactly (given correctly implemented float I/O code, anyway)
|
|
+ */
|
|
+ if (AH->remoteVersion >= 90000)
|
|
+ ExecuteSqlStatement(AH, "SET extra_float_digits TO 3");
|
|
+ else if (AH->remoteVersion >= 70400)
|
|
+ ExecuteSqlStatement(AH, "SET extra_float_digits TO 2");
|
|
+
|
|
+ /*
|
|
+ * If synchronized scanning is supported, disable it, to prevent
|
|
+ * unpredictable changes in row ordering across a dump and reload.
|
|
+ */
|
|
+ if (AH->remoteVersion >= 80300)
|
|
+ ExecuteSqlStatement(AH, "SET synchronize_seqscans TO off");
|
|
+
|
|
+ /*
|
|
+ * Disable timeouts if supported.
|
|
+ */
|
|
+ if (AH->remoteVersion >= 70300)
|
|
+ ExecuteSqlStatement(AH, "SET statement_timeout = 0");
|
|
+ if (AH->remoteVersion >= 90300)
|
|
+ ExecuteSqlStatement(AH, "SET lock_timeout = 0");
|
|
+
|
|
+ /*
|
|
+ * Quote all identifiers, if requested.
|
|
+ */
|
|
+ if (quote_all_identifiers && AH->remoteVersion >= 90100)
|
|
+ ExecuteSqlStatement(AH, "SET quote_all_identifiers = true");
|
|
+
|
|
+ /*
|
|
+ * Start transaction-snapshot mode transaction to dump consistent data.
|
|
+ */
|
|
+ ExecuteSqlStatement(AH, "BEGIN");
|
|
+ if (AH->remoteVersion >= 90100)
|
|
+ {
|
|
+ /*
|
|
+ * To support the combination of serializable_deferrable with the jobs
|
|
+ * option we use REPEATABLE READ for the worker connections that are
|
|
+ * passed a snapshot. As long as the snapshot is acquired in a
|
|
+ * SERIALIZABLE, READ ONLY, DEFERRABLE transaction, its use within a
|
|
+ * REPEATABLE READ transaction provides the appropriate integrity
|
|
+ * guarantees. This is a kluge, but safe for back-patching.
|
|
+ */
|
|
+ if (serializable_deferrable && AH->sync_snapshot_id == NULL)
|
|
+ ExecuteSqlStatement(AH,
|
|
+ "SET TRANSACTION ISOLATION LEVEL "
|
|
+ "SERIALIZABLE, READ ONLY, DEFERRABLE");
|
|
+ else
|
|
+ ExecuteSqlStatement(AH,
|
|
+ "SET TRANSACTION ISOLATION LEVEL "
|
|
+ "REPEATABLE READ, READ ONLY");
|
|
+ }
|
|
+ else if (AH->remoteVersion >= 70400)
|
|
+ {
|
|
+ /* note: comma was not accepted in SET TRANSACTION before 8.0 */
|
|
+ ExecuteSqlStatement(AH,
|
|
+ "SET TRANSACTION ISOLATION LEVEL "
|
|
+ "SERIALIZABLE READ ONLY");
|
|
+ }
|
|
+ else
|
|
+ ExecuteSqlStatement(AH,
|
|
+ "SET TRANSACTION ISOLATION LEVEL SERIALIZABLE");
|
|
+
|
|
+
|
|
+ /*
|
|
+ * define an export snapshot, either chosen by user or needed for
|
|
+ * parallel dump.
|
|
+ */
|
|
+ if (dumpsnapshot)
|
|
+ AH->sync_snapshot_id = strdup(dumpsnapshot);
|
|
+
|
|
+
|
|
+ if (AH->sync_snapshot_id)
|
|
+ {
|
|
+ PQExpBuffer query = createPQExpBuffer();
|
|
+ appendPQExpBuffer(query, "SET TRANSACTION SNAPSHOT ");
|
|
+ appendStringLiteralConn(query, AH->sync_snapshot_id, conn);
|
|
+ ExecuteSqlStatement(AH, query->data);
|
|
+ destroyPQExpBuffer(query);
|
|
+ }
|
|
+ else if (AH->numWorkers > 1 &&
|
|
+ AH->remoteVersion >= 90200 &&
|
|
+ !no_synchronized_snapshots)
|
|
+ AH->sync_snapshot_id = get_synchronized_snapshot(AH);
|
|
+}
|
|
+
|
|
+static void
|
|
+setupDumpWorker(Archive *AHX, RestoreOptions *ropt)
|
|
+{
|
|
+ setup_connection(AHX, NULL, NULL, NULL);
|
|
+}
|
|
+
|
|
+static char *
|
|
+get_synchronized_snapshot(Archive *fout)
|
|
+{
|
|
+ char *query = "SELECT pg_export_snapshot()";
|
|
+ char *result;
|
|
+ PGresult *res;
|
|
+
|
|
+ res = ExecuteSqlQueryForSingleRow(fout, query);
|
|
+ result = strdup(PQgetvalue(res, 0, 0));
|
|
+ PQclear(res);
|
|
+
|
|
+ return result;
|
|
+}
|
|
+
|
|
+static ArchiveFormat
|
|
+parseArchiveFormat(const char *format, ArchiveMode *mode)
|
|
+{
|
|
+ ArchiveFormat archiveFormat;
|
|
+
|
|
+ *mode = archModeWrite;
|
|
+
|
|
+ if (pg_strcasecmp(format, "a") == 0 || pg_strcasecmp(format, "append") == 0)
|
|
+ {
|
|
+ /* This is used by pg_dumpall, and is not documented */
|
|
+ archiveFormat = archNull;
|
|
+ *mode = archModeAppend;
|
|
+ }
|
|
+ else if (pg_strcasecmp(format, "c") == 0)
|
|
+ archiveFormat = archCustom;
|
|
+ else if (pg_strcasecmp(format, "custom") == 0)
|
|
+ archiveFormat = archCustom;
|
|
+ else if (pg_strcasecmp(format, "d") == 0)
|
|
+ archiveFormat = archDirectory;
|
|
+ else if (pg_strcasecmp(format, "directory") == 0)
|
|
+ archiveFormat = archDirectory;
|
|
+ else if (pg_strcasecmp(format, "p") == 0)
|
|
+ archiveFormat = archNull;
|
|
+ else if (pg_strcasecmp(format, "plain") == 0)
|
|
+ archiveFormat = archNull;
|
|
+ else if (pg_strcasecmp(format, "t") == 0)
|
|
+ archiveFormat = archTar;
|
|
+ else if (pg_strcasecmp(format, "tar") == 0)
|
|
+ archiveFormat = archTar;
|
|
+ else
|
|
+ exit_horribly(NULL, "invalid output format \"%s\" specified\n", format);
|
|
+ return archiveFormat;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Find the OIDs of all schemas matching the given list of patterns,
|
|
+ * and append them to the given OID list.
|
|
+ */
|
|
+static void
|
|
+expand_schema_name_patterns(Archive *fout,
|
|
+ SimpleStringList *patterns,
|
|
+ SimpleOidList *oids)
|
|
+{
|
|
+ PQExpBuffer query;
|
|
+ PGresult *res;
|
|
+ SimpleStringListCell *cell;
|
|
+ int i;
|
|
+
|
|
+ if (patterns->head == NULL)
|
|
+ return; /* nothing to do */
|
|
+
|
|
+ if (fout->remoteVersion < 70300)
|
|
+ exit_horribly(NULL, "server version must be at least 7.3 to use schema selection switches\n");
|
|
+
|
|
+ query = createPQExpBuffer();
|
|
+
|
|
+ /*
|
|
+ * We use UNION ALL rather than UNION; this might sometimes result in
|
|
+ * duplicate entries in the OID list, but we don't care.
|
|
+ */
|
|
+
|
|
+ for (cell = patterns->head; cell; cell = cell->next)
|
|
+ {
|
|
+ if (cell != patterns->head)
|
|
+ appendPQExpBufferStr(query, "UNION ALL\n");
|
|
+ appendPQExpBuffer(query,
|
|
+ "SELECT oid FROM pg_catalog.pg_namespace n\n");
|
|
+ processSQLNamePattern(GetConnection(fout), query, cell->val, false,
|
|
+ false, NULL, "n.nspname", NULL, NULL);
|
|
+ }
|
|
+
|
|
+ res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
|
|
+
|
|
+ for (i = 0; i < PQntuples(res); i++)
|
|
+ {
|
|
+ simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
|
|
+ }
|
|
+
|
|
+ PQclear(res);
|
|
+ destroyPQExpBuffer(query);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Find the OIDs of all tables matching the given list of patterns,
|
|
+ * and append them to the given OID list.
|
|
+ */
|
|
+static void
|
|
+expand_table_name_patterns(Archive *fout,
|
|
+ SimpleStringList *patterns, SimpleOidList *oids)
|
|
+{
|
|
+ PQExpBuffer query;
|
|
+ PGresult *res;
|
|
+ SimpleStringListCell *cell;
|
|
+ int i;
|
|
+
|
|
+ if (patterns->head == NULL)
|
|
+ return; /* nothing to do */
|
|
+
|
|
+ query = createPQExpBuffer();
|
|
+
|
|
+ /*
|
|
+ * We use UNION ALL rather than UNION; this might sometimes result in
|
|
+ * duplicate entries in the OID list, but we don't care.
|
|
+ */
|
|
+
|
|
+ for (cell = patterns->head; cell; cell = cell->next)
|
|
+ {
|
|
+ if (cell != patterns->head)
|
|
+ appendPQExpBufferStr(query, "UNION ALL\n");
|
|
+ appendPQExpBuffer(query,
|
|
+ "SELECT c.oid"
|
|
+ "\nFROM pg_catalog.pg_class c"
|
|
+ "\n LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace"
|
|
+ "\nWHERE c.relkind in ('%c', '%c', '%c', '%c', '%c')\n",
|
|
+ RELKIND_RELATION, RELKIND_SEQUENCE, RELKIND_VIEW,
|
|
+ RELKIND_MATVIEW, RELKIND_FOREIGN_TABLE);
|
|
+ processSQLNamePattern(GetConnection(fout), query, cell->val, true,
|
|
+ false, "n.nspname", "c.relname", NULL,
|
|
+ "pg_catalog.pg_table_is_visible(c.oid)");
|
|
+ }
|
|
+
|
|
+ res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
|
|
+
|
|
+ for (i = 0; i < PQntuples(res); i++)
|
|
+ {
|
|
+ simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
|
|
+ }
|
|
+
|
|
+ PQclear(res);
|
|
+ destroyPQExpBuffer(query);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * selectDumpableNamespace: policy-setting subroutine
|
|
+ * Mark a namespace as to be dumped or not
|
|
+ */
|
|
+static void
|
|
+selectDumpableNamespace(NamespaceInfo *nsinfo)
|
|
+{
|
|
+ /*
|
|
+ * If specific tables are being dumped, do not dump any complete
|
|
+ * namespaces. If specific namespaces are being dumped, dump just those
|
|
+ * namespaces. Otherwise, dump all non-system namespaces.
|
|
+ */
|
|
+ if (table_include_oids.head != NULL)
|
|
+ nsinfo->dobj.dump = false;
|
|
+ else if (schema_include_oids.head != NULL)
|
|
+ nsinfo->dobj.dump = simple_oid_list_member(&schema_include_oids,
|
|
+ nsinfo->dobj.catId.oid);
|
|
+ else if (strncmp(nsinfo->dobj.name, "pg_", 3) == 0 ||
|
|
+ strcmp(nsinfo->dobj.name, "information_schema") == 0)
|
|
+ nsinfo->dobj.dump = false;
|
|
+ else
|
|
+ nsinfo->dobj.dump = true;
|
|
+
|
|
+ /*
|
|
+ * In any case, a namespace can be excluded by an exclusion switch
|
|
+ */
|
|
+ if (nsinfo->dobj.dump &&
|
|
+ simple_oid_list_member(&schema_exclude_oids,
|
|
+ nsinfo->dobj.catId.oid))
|
|
+ nsinfo->dobj.dump = false;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * selectDumpableTable: policy-setting subroutine
|
|
+ * Mark a table as to be dumped or not
|
|
+ */
|
|
+static void
|
|
+selectDumpableTable(TableInfo *tbinfo)
|
|
+{
|
|
+ /*
|
|
+ * If specific tables are being dumped, dump just those tables; else, dump
|
|
+ * according to the parent namespace's dump flag.
|
|
+ */
|
|
+ if (table_include_oids.head != NULL)
|
|
+ tbinfo->dobj.dump = simple_oid_list_member(&table_include_oids,
|
|
+ tbinfo->dobj.catId.oid);
|
|
+ else
|
|
+ tbinfo->dobj.dump = tbinfo->dobj.namespace->dobj.dump;
|
|
+
|
|
+ /*
|
|
+ * In any case, a table can be excluded by an exclusion switch
|
|
+ */
|
|
+ if (tbinfo->dobj.dump &&
|
|
+ simple_oid_list_member(&table_exclude_oids,
|
|
+ tbinfo->dobj.catId.oid))
|
|
+ tbinfo->dobj.dump = false;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * selectDumpableType: policy-setting subroutine
|
|
+ * Mark a type as to be dumped or not
|
|
+ *
|
|
+ * If it's a table's rowtype or an autogenerated array type, we also apply a
|
|
+ * special type code to facilitate sorting into the desired order. (We don't
|
|
+ * want to consider those to be ordinary types because that would bring tables
|
|
+ * up into the datatype part of the dump order.) We still set the object's
|
|
+ * dump flag; that's not going to cause the dummy type to be dumped, but we
|
|
+ * need it so that casts involving such types will be dumped correctly -- see
|
|
+ * dumpCast. This means the flag should be set the same as for the underlying
|
|
+ * object (the table or base type).
|
|
+ */
|
|
+static void
|
|
+selectDumpableType(TypeInfo *tyinfo)
|
|
+{
|
|
+ /* skip complex types, except for standalone composite types */
|
|
+ if (OidIsValid(tyinfo->typrelid) &&
|
|
+ tyinfo->typrelkind != RELKIND_COMPOSITE_TYPE)
|
|
+ {
|
|
+ TableInfo *tytable = findTableByOid(tyinfo->typrelid);
|
|
+
|
|
+ tyinfo->dobj.objType = DO_DUMMY_TYPE;
|
|
+ if (tytable != NULL)
|
|
+ tyinfo->dobj.dump = tytable->dobj.dump;
|
|
+ else
|
|
+ tyinfo->dobj.dump = false;
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ /* skip auto-generated array types */
|
|
+ if (tyinfo->isArray)
|
|
+ {
|
|
+ tyinfo->dobj.objType = DO_DUMMY_TYPE;
|
|
+
|
|
+ /*
|
|
+ * Fall through to set the dump flag; we assume that the subsequent
|
|
+ * rules will do the same thing as they would for the array's base
|
|
+ * type. (We cannot reliably look up the base type here, since
|
|
+ * getTypes may not have processed it yet.)
|
|
+ */
|
|
+ }
|
|
+
|
|
+ /* dump only types in dumpable namespaces */
|
|
+ if (!tyinfo->dobj.namespace->dobj.dump)
|
|
+ tyinfo->dobj.dump = false;
|
|
+ else
|
|
+ tyinfo->dobj.dump = true;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * selectDumpableDefaultACL: policy-setting subroutine
|
|
+ * Mark a default ACL as to be dumped or not
|
|
+ *
|
|
+ * For per-schema default ACLs, dump if the schema is to be dumped.
|
|
+ * Otherwise dump if we are dumping "everything". Note that dataOnly
|
|
+ * and aclsSkip are checked separately.
|
|
+ */
|
|
+static void
|
|
+selectDumpableDefaultACL(DefaultACLInfo *dinfo)
|
|
+{
|
|
+ if (dinfo->dobj.namespace)
|
|
+ dinfo->dobj.dump = dinfo->dobj.namespace->dobj.dump;
|
|
+ else
|
|
+ dinfo->dobj.dump = include_everything;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * selectDumpableCast: policy-setting subroutine
|
|
+ * Mark a cast as to be dumped or not
|
|
+ *
|
|
+ * Casts do not belong to any particular namespace (since they haven't got
|
|
+ * names), nor do they have identifiable owners. To distinguish user-defined
|
|
+ * casts from built-in ones, we must resort to checking whether the cast's
|
|
+ * OID is in the range reserved for initdb.
|
|
+ */
|
|
+static void
|
|
+selectDumpableCast(CastInfo *cast)
|
|
+{
|
|
+ if (cast->dobj.catId.oid < (Oid) FirstNormalObjectId)
|
|
+ cast->dobj.dump = false;
|
|
+ else
|
|
+ cast->dobj.dump = include_everything;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * selectDumpableExtension: policy-setting subroutine
|
|
+ * Mark an extension as to be dumped or not
|
|
+ *
|
|
+ * Normally, we dump all extensions, or none of them if include_everything
|
|
+ * is false (i.e., a --schema or --table switch was given). However, in
|
|
+ * binary-upgrade mode it's necessary to skip built-in extensions, since we
|
|
+ * assume those will already be installed in the target database. We identify
|
|
+ * such extensions by their having OIDs in the range reserved for initdb.
|
|
+ */
|
|
+static void
|
|
+selectDumpableExtension(ExtensionInfo *extinfo)
|
|
+{
|
|
+ if (binary_upgrade && extinfo->dobj.catId.oid < (Oid) FirstNormalObjectId)
|
|
+ extinfo->dobj.dump = false;
|
|
+ else
|
|
+ extinfo->dobj.dump = include_everything;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * selectDumpableObject: policy-setting subroutine
|
|
+ * Mark a generic dumpable object as to be dumped or not
|
|
+ *
|
|
+ * Use this only for object types without a special-case routine above.
|
|
+ */
|
|
+static void
|
|
+selectDumpableObject(DumpableObject *dobj)
|
|
+{
|
|
+ /*
|
|
+ * Default policy is to dump if parent namespace is dumpable, or always
|
|
+ * for non-namespace-associated items.
|
|
+ */
|
|
+ if (dobj->namespace)
|
|
+ dobj->dump = dobj->namespace->dobj.dump;
|
|
+ else
|
|
+ dobj->dump = true;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Dump a table's contents for loading using the COPY command
|
|
+ * - this routine is called by the Archiver when it wants the table
|
|
+ * to be dumped.
|
|
+ */
|
|
+
|
|
+static int
|
|
+dumpTableData_copy(Archive *fout, void *dcontext)
|
|
+{
|
|
+ TableDataInfo *tdinfo = (TableDataInfo *) dcontext;
|
|
+ TableInfo *tbinfo = tdinfo->tdtable;
|
|
+ const char *classname = tbinfo->dobj.name;
|
|
+ const bool hasoids = tbinfo->hasoids;
|
|
+ const bool oids = tdinfo->oids;
|
|
+ PQExpBuffer q = createPQExpBuffer();
|
|
+
|
|
+ /*
|
|
+ * Note: can't use getThreadLocalPQExpBuffer() here, we're calling fmtId
|
|
+ * which uses it already.
|
|
+ */
|
|
+ PQExpBuffer clistBuf = createPQExpBuffer();
|
|
+ PGconn *conn = GetConnection(fout);
|
|
+ PGresult *res;
|
|
+ int ret;
|
|
+ char *copybuf;
|
|
+ const char *column_list;
|
|
+
|
|
+ if (g_verbose)
|
|
+ write_msg(NULL, "dumping contents of table %s\n", classname);
|
|
+
|
|
+ /*
|
|
+ * Make sure we are in proper schema. We will qualify the table name
|
|
+ * below anyway (in case its name conflicts with a pg_catalog table); but
|
|
+ * this ensures reproducible results in case the table contains regproc,
|
|
+ * regclass, etc columns.
|
|
+ */
|
|
+ selectSourceSchema(fout, tbinfo->dobj.namespace->dobj.name);
|
|
+
|
|
+ /*
|
|
+ * If possible, specify the column list explicitly so that we have no
|
|
+ * possibility of retrieving data in the wrong column order. (The default
|
|
+ * column ordering of COPY will not be what we want in certain corner
|
|
+ * cases involving ADD COLUMN and inheritance.)
|
|
+ */
|
|
+ if (fout->remoteVersion >= 70300)
|
|
+ column_list = fmtCopyColumnList(tbinfo, clistBuf);
|
|
+ else
|
|
+ column_list = ""; /* can't select columns in COPY */
|
|
+
|
|
+ if (oids && hasoids)
|
|
+ {
|
|
+ appendPQExpBuffer(q, "COPY %s %s WITH OIDS TO stdout;",
|
|
+ fmtQualifiedId(fout->remoteVersion,
|
|
+ tbinfo->dobj.namespace->dobj.name,
|
|
+ classname),
|
|
+ column_list);
|
|
+ }
|
|
+ else if (tdinfo->filtercond)
|
|
+ {
|
|
+ /* Note: this syntax is only supported in 8.2 and up */
|
|
+ appendPQExpBufferStr(q, "COPY (SELECT ");
|
|
+ /* klugery to get rid of parens in column list */
|
|
+ if (strlen(column_list) > 2)
|
|
+ {
|
|
+ appendPQExpBufferStr(q, column_list + 1);
|
|
+ q->data[q->len - 1] = ' ';
|
|
+ }
|
|
+ else
|
|
+ appendPQExpBufferStr(q, "* ");
|
|
+ appendPQExpBuffer(q, "FROM %s %s) TO stdout;",
|
|
+ fmtQualifiedId(fout->remoteVersion,
|
|
+ tbinfo->dobj.namespace->dobj.name,
|
|
+ classname),
|
|
+ tdinfo->filtercond);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ appendPQExpBuffer(q, "COPY %s %s TO stdout;",
|
|
+ fmtQualifiedId(fout->remoteVersion,
|
|
+ tbinfo->dobj.namespace->dobj.name,
|
|
+ classname),
|
|
+ column_list);
|
|
+ }
|
|
+ res = ExecuteSqlQuery(fout, q->data, PGRES_COPY_OUT);
|
|
+ PQclear(res);
|
|
+ destroyPQExpBuffer(clistBuf);
|
|
+
|
|
+ for (;;)
|
|
+ {
|
|
+ ret = PQgetCopyData(conn, ©buf, 0);
|
|
+
|
|
+ if (ret < 0)
|
|
+ break; /* done or error */
|
|
+
|
|
+ if (copybuf)
|
|
+ {
|
|
+ WriteData(fout, copybuf, ret);
|
|
+ PQfreemem(copybuf);
|
|
+ }
|
|
+
|
|
+ /* ----------
|
|
+ * THROTTLE:
|
|
+ *
|
|
+ * There was considerable discussion in late July, 2000 regarding
|
|
+ * slowing down pg_dump when backing up large tables. Users with both
|
|
+ * slow & fast (multi-processor) machines experienced performance
|
|
+ * degradation when doing a backup.
|
|
+ *
|
|
+ * Initial attempts based on sleeping for a number of ms for each ms
|
|
+ * of work were deemed too complex, then a simple 'sleep in each loop'
|
|
+ * implementation was suggested. The latter failed because the loop
|
|
+ * was too tight. Finally, the following was implemented:
|
|
+ *
|
|
+ * If throttle is non-zero, then
|
|
+ * See how long since the last sleep.
|
|
+ * Work out how long to sleep (based on ratio).
|
|
+ * If sleep is more than 100ms, then
|
|
+ * sleep
|
|
+ * reset timer
|
|
+ * EndIf
|
|
+ * EndIf
|
|
+ *
|
|
+ * where the throttle value was the number of ms to sleep per ms of
|
|
+ * work. The calculation was done in each loop.
|
|
+ *
|
|
+ * Most of the hard work is done in the backend, and this solution
|
|
+ * still did not work particularly well: on slow machines, the ratio
|
|
+ * was 50:1, and on medium paced machines, 1:1, and on fast
|
|
+ * multi-processor machines, it had little or no effect, for reasons
|
|
+ * that were unclear.
|
|
+ *
|
|
+ * Further discussion ensued, and the proposal was dropped.
|
|
+ *
|
|
+ * For those people who want this feature, it can be implemented using
|
|
+ * gettimeofday in each loop, calculating the time since last sleep,
|
|
+ * multiplying that by the sleep ratio, then if the result is more
|
|
+ * than a preset 'minimum sleep time' (say 100ms), call the 'select'
|
|
+ * function to sleep for a subsecond period ie.
|
|
+ *
|
|
+ * select(0, NULL, NULL, NULL, &tvi);
|
|
+ *
|
|
+ * This will return after the interval specified in the structure tvi.
|
|
+ * Finally, call gettimeofday again to save the 'last sleep time'.
|
|
+ * ----------
|
|
+ */
|
|
+ }
|
|
+ archprintf(fout, "\\.\n\n\n");
|
|
+
|
|
+ if (ret == -2)
|
|
+ {
|
|
+ /* copy data transfer failed */
|
|
+ write_msg(NULL, "Dumping the contents of table \"%s\" failed: PQgetCopyData() failed.\n", classname);
|
|
+ write_msg(NULL, "Error message from server: %s", PQerrorMessage(conn));
|
|
+ write_msg(NULL, "The command was: %s\n", q->data);
|
|
+ exit_nicely(1);
|
|
+ }
|
|
+
|
|
+ /* Check command status and return to normal libpq state */
|
|
+ res = PQgetResult(conn);
|
|
+ if (PQresultStatus(res) != PGRES_COMMAND_OK)
|
|
+ {
|
|
+ write_msg(NULL, "Dumping the contents of table \"%s\" failed: PQgetResult() failed.\n", classname);
|
|
+ write_msg(NULL, "Error message from server: %s", PQerrorMessage(conn));
|
|
+ write_msg(NULL, "The command was: %s\n", q->data);
|
|
+ exit_nicely(1);
|
|
+ }
|
|
+ PQclear(res);
|
|
+
|
|
+ destroyPQExpBuffer(q);
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Dump table data using INSERT commands.
|
|
+ *
|
|
+ * Caution: when we restore from an archive file direct to database, the
|
|
+ * INSERT commands emitted by this function have to be parsed by
|
|
+ * pg_backup_db.c's ExecuteSimpleCommands(), which will not handle comments,
|
|
+ * E'' strings, or dollar-quoted strings. So don't emit anything like that.
|
|
+ */
|
|
+static int
|
|
+dumpTableData_insert(Archive *fout, void *dcontext)
|
|
+{
|
|
+ TableDataInfo *tdinfo = (TableDataInfo *) dcontext;
|
|
+ TableInfo *tbinfo = tdinfo->tdtable;
|
|
+ const char *classname = tbinfo->dobj.name;
|
|
+ PQExpBuffer q = createPQExpBuffer();
|
|
+ PQExpBuffer insertStmt = NULL;
|
|
+ PGresult *res;
|
|
+ int tuple;
|
|
+ int nfields;
|
|
+ int field;
|
|
+
|
|
+ /*
|
|
+ * Make sure we are in proper schema. We will qualify the table name
|
|
+ * below anyway (in case its name conflicts with a pg_catalog table); but
|
|
+ * this ensures reproducible results in case the table contains regproc,
|
|
+ * regclass, etc columns.
|
|
+ */
|
|
+ selectSourceSchema(fout, tbinfo->dobj.namespace->dobj.name);
|
|
+
|
|
+ if (fout->remoteVersion >= 70100)
|
|
+ {
|
|
+ appendPQExpBuffer(q, "DECLARE _pg_dump_cursor CURSOR FOR "
|
|
+ "SELECT * FROM ONLY %s",
|
|
+ fmtQualifiedId(fout->remoteVersion,
|
|
+ tbinfo->dobj.namespace->dobj.name,
|
|
+ classname));
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ appendPQExpBuffer(q, "DECLARE _pg_dump_cursor CURSOR FOR "
|
|
+ "SELECT * FROM %s",
|
|
+ fmtQualifiedId(fout->remoteVersion,
|
|
+ tbinfo->dobj.namespace->dobj.name,
|
|
+ classname));
|
|
+ }
|
|
+ if (tdinfo->filtercond)
|
|
+ appendPQExpBuffer(q, " %s", tdinfo->filtercond);
|
|
+
|
|
+ ExecuteSqlStatement(fout, q->data);
|
|
+
|
|
+ while (1)
|
|
+ {
|
|
+ res = ExecuteSqlQuery(fout, "FETCH 100 FROM _pg_dump_cursor",
|
|
+ PGRES_TUPLES_OK);
|
|
+ nfields = PQnfields(res);
|
|
+ for (tuple = 0; tuple < PQntuples(res); tuple++)
|
|
+ {
|
|
+ /*
|
|
+ * First time through, we build as much of the INSERT statement as
|
|
+ * possible in "insertStmt", which we can then just print for each
|
|
+ * line. If the table happens to have zero columns then this will
|
|
+ * be a complete statement, otherwise it will end in "VALUES(" and
|
|
+ * be ready to have the row's column values appended.
|
|
+ */
|
|
+ if (insertStmt == NULL)
|
|
+ {
|
|
+ insertStmt = createPQExpBuffer();
|
|
+ appendPQExpBuffer(insertStmt, "INSERT INTO %s ",
|
|
+ fmtId(classname));
|
|
+
|
|
+ /* corner case for zero-column table */
|
|
+ if (nfields == 0)
|
|
+ {
|
|
+ appendPQExpBufferStr(insertStmt, "DEFAULT VALUES;\n");
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ /* append the list of column names if required */
|
|
+ if (column_inserts)
|
|
+ {
|
|
+ appendPQExpBufferStr(insertStmt, "(");
|
|
+ for (field = 0; field < nfields; field++)
|
|
+ {
|
|
+ if (field > 0)
|
|
+ appendPQExpBufferStr(insertStmt, ", ");
|
|
+ appendPQExpBufferStr(insertStmt,
|
|
+ fmtId(PQfname(res, field)));
|
|
+ }
|
|
+ appendPQExpBufferStr(insertStmt, ") ");
|
|
+ }
|
|
+
|
|
+ appendPQExpBufferStr(insertStmt, "VALUES (");
|
|
+ }
|
|
+ }
|
|
+
|
|
+ archputs(insertStmt->data, fout);
|
|
+
|
|
+ /* if it is zero-column table then we're done */
|
|
+ if (nfields == 0)
|
|
+ continue;
|
|
+
|
|
+ for (field = 0; field < nfields; field++)
|
|
+ {
|
|
+ if (field > 0)
|
|
+ archputs(", ", fout);
|
|
+ if (PQgetisnull(res, tuple, field))
|
|
+ {
|
|
+ archputs("NULL", fout);
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ /* XXX This code is partially duplicated in ruleutils.c */
|
|
+ switch (PQftype(res, field))
|
|
+ {
|
|
+ case INT2OID:
|
|
+ case INT4OID:
|
|
+ case INT8OID:
|
|
+ case OIDOID:
|
|
+ case FLOAT4OID:
|
|
+ case FLOAT8OID:
|
|
+ case NUMERICOID:
|
|
+ {
|
|
+ /*
|
|
+ * These types are printed without quotes unless
|
|
+ * they contain values that aren't accepted by the
|
|
+ * scanner unquoted (e.g., 'NaN'). Note that
|
|
+ * strtod() and friends might accept NaN, so we
|
|
+ * can't use that to test.
|
|
+ *
|
|
+ * In reality we only need to defend against
|
|
+ * infinity and NaN, so we need not get too crazy
|
|
+ * about pattern matching here.
|
|
+ */
|
|
+ const char *s = PQgetvalue(res, tuple, field);
|
|
+
|
|
+ if (strspn(s, "0123456789 +-eE.") == strlen(s))
|
|
+ archputs(s, fout);
|
|
+ else
|
|
+ archprintf(fout, "'%s'", s);
|
|
+ }
|
|
+ break;
|
|
+
|
|
+ case BITOID:
|
|
+ case VARBITOID:
|
|
+ archprintf(fout, "B'%s'",
|
|
+ PQgetvalue(res, tuple, field));
|
|
+ break;
|
|
+
|
|
+ case BOOLOID:
|
|
+ if (strcmp(PQgetvalue(res, tuple, field), "t") == 0)
|
|
+ archputs("true", fout);
|
|
+ else
|
|
+ archputs("false", fout);
|
|
+ break;
|
|
+
|
|
+ default:
|
|
+ /* All other types are printed as string literals. */
|
|
+ resetPQExpBuffer(q);
|
|
+ appendStringLiteralAH(q,
|
|
+ PQgetvalue(res, tuple, field),
|
|
+ fout);
|
|
+ archputs(q->data, fout);
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ archputs(");\n", fout);
|
|
+ }
|
|
+
|
|
+ if (PQntuples(res) <= 0)
|
|
+ {
|
|
+ PQclear(res);
|
|
+ break;
|
|
+ }
|
|
+ PQclear(res);
|
|
+ }
|
|
+
|
|
+ archputs("\n\n", fout);
|
|
+
|
|
+ ExecuteSqlStatement(fout, "CLOSE _pg_dump_cursor");
|
|
+
|
|
+ destroyPQExpBuffer(q);
|
|
+ if (insertStmt != NULL)
|
|
+ destroyPQExpBuffer(insertStmt);
|
|
+
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+
|
|
+/*
|
|
+ * dumpTableData -
|
|
+ * dump the contents of a single table
|
|
+ *
|
|
+ * Actually, this just makes an ArchiveEntry for the table contents.
|
|
+ */
|
|
+static void
|
|
+dumpTableData(Archive *fout, TableDataInfo *tdinfo)
|
|
+{
|
|
+ TableInfo *tbinfo = tdinfo->tdtable;
|
|
+ PQExpBuffer copyBuf = createPQExpBuffer();
|
|
+ PQExpBuffer clistBuf = createPQExpBuffer();
|
|
+ DataDumperPtr dumpFn;
|
|
+ char *copyStmt;
|
|
+
|
|
+ if (!dump_inserts)
|
|
+ {
|
|
+ /* Dump/restore using COPY */
|
|
+ dumpFn = dumpTableData_copy;
|
|
+ /* must use 2 steps here 'cause fmtId is nonreentrant */
|
|
+ appendPQExpBuffer(copyBuf, "COPY %s ",
|
|
+ fmtId(tbinfo->dobj.name));
|
|
+ appendPQExpBuffer(copyBuf, "%s %sFROM stdin;\n",
|
|
+ fmtCopyColumnList(tbinfo, clistBuf),
|
|
+ (tdinfo->oids && tbinfo->hasoids) ? "WITH OIDS " : "");
|
|
+ copyStmt = copyBuf->data;
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ /* Restore using INSERT */
|
|
+ dumpFn = dumpTableData_insert;
|
|
+ copyStmt = NULL;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Note: although the TableDataInfo is a full DumpableObject, we treat its
|
|
+ * dependency on its table as "special" and pass it to ArchiveEntry now.
|
|
+ * See comments for BuildArchiveDependencies.
|
|
+ */
|
|
+ ArchiveEntry(fout, tdinfo->dobj.catId, tdinfo->dobj.dumpId,
|
|
+ tbinfo->dobj.name, tbinfo->dobj.namespace->dobj.name,
|
|
+ NULL, tbinfo->rolname,
|
|
+ false, "TABLE DATA", SECTION_DATA,
|
|
+ "", "", copyStmt,
|
|
+ &(tbinfo->dobj.dumpId), 1,
|
|
+ dumpFn, tdinfo);
|
|
+
|
|
+ destroyPQExpBuffer(copyBuf);
|
|
+ destroyPQExpBuffer(clistBuf);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * refreshMatViewData -
|
|
+ * load or refresh the contents of a single materialized view
|
|
+ *
|
|
+ * Actually, this just makes an ArchiveEntry for the REFRESH MATERIALIZED VIEW
|
|
+ * statement.
|
|
+ */
|
|
+static void
|
|
+refreshMatViewData(Archive *fout, TableDataInfo *tdinfo)
|
|
+{
|
|
+ TableInfo *tbinfo = tdinfo->tdtable;
|
|
+ PQExpBuffer q;
|
|
+
|
|
+ /* If the materialized view is not flagged as populated, skip this. */
|
|
+ if (!tbinfo->relispopulated)
|
|
+ return;
|
|
+
|
|
+ q = createPQExpBuffer();
|
|
+
|
|
+ appendPQExpBuffer(q, "REFRESH MATERIALIZED VIEW %s;\n",
|
|
+ fmtId(tbinfo->dobj.name));
|
|
+
|
|
+ ArchiveEntry(fout,
|
|
+ tdinfo->dobj.catId, /* catalog ID */
|
|
+ tdinfo->dobj.dumpId, /* dump ID */
|
|
+ tbinfo->dobj.name, /* Name */
|
|
+ tbinfo->dobj.namespace->dobj.name, /* Namespace */
|
|
+ NULL, /* Tablespace */
|
|
+ tbinfo->rolname, /* Owner */
|
|
+ false, /* with oids */
|
|
+ "MATERIALIZED VIEW DATA", /* Desc */
|
|
+ SECTION_POST_DATA, /* Section */
|
|
+ q->data, /* Create */
|
|
+ "", /* Del */
|
|
+ NULL, /* Copy */
|
|
+ tdinfo->dobj.dependencies, /* Deps */
|
|
+ tdinfo->dobj.nDeps, /* # Deps */
|
|
+ NULL, /* Dumper */
|
|
+ NULL); /* Dumper Arg */
|
|
+
|
|
+ destroyPQExpBuffer(q);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * getTableData -
|
|
+ * set up dumpable objects representing the contents of tables
|
|
+ */
|
|
+static void
|
|
+getTableData(TableInfo *tblinfo, int numTables, bool oids)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < numTables; i++)
|
|
+ {
|
|
+ if (tblinfo[i].dobj.dump)
|
|
+ makeTableDataInfo(&(tblinfo[i]), oids);
|
|
+ }
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Make a dumpable object for the data of this specific table
|
|
+ *
|
|
+ * Note: we make a TableDataInfo if and only if we are going to dump the
|
|
+ * table data; the "dump" flag in such objects isn't used.
|
|
+ */
|
|
+static void
|
|
+makeTableDataInfo(TableInfo *tbinfo, bool oids)
|
|
+{
|
|
+ TableDataInfo *tdinfo;
|
|
+
|
|
+ /*
|
|
+ * Nothing to do if we already decided to dump the table. This will
|
|
+ * happen for "config" tables.
|
|
+ */
|
|
+ if (tbinfo->dataObj != NULL)
|
|
+ return;
|
|
+
|
|
+ /* Skip VIEWs (no data to dump) */
|
|
+ if (tbinfo->relkind == RELKIND_VIEW)
|
|
+ return;
|
|
+ /* Skip FOREIGN TABLEs (no data to dump) */
|
|
+ if (tbinfo->relkind == RELKIND_FOREIGN_TABLE)
|
|
+ return;
|
|
+
|
|
+ /* Don't dump data in unlogged tables, if so requested */
|
|
+ if (tbinfo->relpersistence == RELPERSISTENCE_UNLOGGED &&
|
|
+ no_unlogged_table_data)
|
|
+ return;
|
|
+
|
|
+ /* Check that the data is not explicitly excluded */
|
|
+ if (simple_oid_list_member(&tabledata_exclude_oids,
|
|
+ tbinfo->dobj.catId.oid))
|
|
+ return;
|
|
+
|
|
+ /* OK, let's dump it */
|
|
+ tdinfo = (TableDataInfo *) pg_malloc(sizeof(TableDataInfo));
|
|
+
|
|
+ if (tbinfo->relkind == RELKIND_MATVIEW)
|
|
+ tdinfo->dobj.objType = DO_REFRESH_MATVIEW;
|
|
+ else
|
|
+ tdinfo->dobj.objType = DO_TABLE_DATA;
|
|
+
|
|
+ /*
|
|
+ * Note: use tableoid 0 so that this object won't be mistaken for
|
|
+ * something that pg_depend entries apply to.
|
|
+ */
|
|
+ tdinfo->dobj.catId.tableoid = 0;
|
|
+ tdinfo->dobj.catId.oid = tbinfo->dobj.catId.oid;
|
|
+ AssignDumpId(&tdinfo->dobj);
|
|
+ tdinfo->dobj.name = tbinfo->dobj.name;
|
|
+ tdinfo->dobj.namespace = tbinfo->dobj.namespace;
|
|
+ tdinfo->tdtable = tbinfo;
|
|
+ tdinfo->oids = oids;
|
|
+ tdinfo->filtercond = NULL; /* might get set later */
|
|
+ addObjectDependency(&tdinfo->dobj, tbinfo->dobj.dumpId);
|
|
+
|
|
+ tbinfo->dataObj = tdinfo;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * The refresh for a materialized view must be dependent on the refresh for
|
|
+ * any materialized view that this one is dependent on.
|
|
+ *
|
|
+ * This must be called after all the objects are created, but before they are
|
|
+ * sorted.
|
|
+ */
|
|
+static void
|
|
+buildMatViewRefreshDependencies(Archive *fout)
|
|
+{
|
|
+ PQExpBuffer query;
|
|
+ PGresult *res;
|
|
+ int ntups,
|
|
+ i;
|
|
+ int i_classid,
|
|
+ i_objid,
|
|
+ i_refobjid;
|
|
+
|
|
+ /* No Mat Views before 9.3. */
|
|
+ if (fout->remoteVersion < 90300)
|
|
+ return;
|
|
+
|
|
+ /* Make sure we are in proper schema */
|
|
+ selectSourceSchema(fout, "pg_catalog");
|
|
+
|
|
+ query = createPQExpBuffer();
|
|
+
|
|
+ appendPQExpBufferStr(query, "WITH RECURSIVE w AS "
|
|
+ "( "
|
|
+ "SELECT d1.objid, d2.refobjid, c2.relkind AS refrelkind "
|
|
+ "FROM pg_depend d1 "
|
|
+ "JOIN pg_class c1 ON c1.oid = d1.objid "
|
|
+ "AND c1.relkind = 'm' "
|
|
+ "JOIN pg_rewrite r1 ON r1.ev_class = d1.objid "
|
|
+ "JOIN pg_depend d2 ON d2.classid = 'pg_rewrite'::regclass "
|
|
+ "AND d2.objid = r1.oid "
|
|
+ "AND d2.refobjid <> d1.objid "
|
|
+ "JOIN pg_class c2 ON c2.oid = d2.refobjid "
|
|
+ "AND c2.relkind IN ('m','v') "
|
|
+ "WHERE d1.classid = 'pg_class'::regclass "
|
|
+ "UNION "
|
|
+ "SELECT w.objid, d3.refobjid, c3.relkind "
|
|
+ "FROM w "
|
|
+ "JOIN pg_rewrite r3 ON r3.ev_class = w.refobjid "
|
|
+ "JOIN pg_depend d3 ON d3.classid = 'pg_rewrite'::regclass "
|
|
+ "AND d3.objid = r3.oid "
|
|
+ "AND d3.refobjid <> w.refobjid "
|
|
+ "JOIN pg_class c3 ON c3.oid = d3.refobjid "
|
|
+ "AND c3.relkind IN ('m','v') "
|
|
+ ") "
|
|
+ "SELECT 'pg_class'::regclass::oid AS classid, objid, refobjid "
|
|
+ "FROM w "
|
|
+ "WHERE refrelkind = 'm'");
|
|
+
|
|
+ res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
|
|
+
|
|
+ ntups = PQntuples(res);
|
|
+
|
|
+ i_classid = PQfnumber(res, "classid");
|
|
+ i_objid = PQfnumber(res, "objid");
|
|
+ i_refobjid = PQfnumber(res, "refobjid");
|
|
+
|
|
+ for (i = 0; i < ntups; i++)
|
|
+ {
|
|
+ CatalogId objId;
|
|
+ CatalogId refobjId;
|
|
+ DumpableObject *dobj;
|
|
+ DumpableObject *refdobj;
|
|
+ TableInfo *tbinfo;
|
|
+ TableInfo *reftbinfo;
|
|
+
|
|
+ objId.tableoid = atooid(PQgetvalue(res, i, i_classid));
|
|
+ objId.oid = atooid(PQgetvalue(res, i, i_objid));
|
|
+ refobjId.tableoid = objId.tableoid;
|
|
+ refobjId.oid = atooid(PQgetvalue(res, i, i_refobjid));
|
|
+
|
|
+ dobj = findObjectByCatalogId(objId);
|
|
+ if (dobj == NULL)
|
|
+ continue;
|
|
+
|
|
+ Assert(dobj->objType == DO_TABLE);
|
|
+ tbinfo = (TableInfo *) dobj;
|
|
+ Assert(tbinfo->relkind == RELKIND_MATVIEW);
|
|
+ dobj = (DumpableObject *) tbinfo->dataObj;
|
|
+ if (dobj == NULL)
|
|
+ continue;
|
|
+ Assert(dobj->objType == DO_REFRESH_MATVIEW);
|
|
+
|
|
+ refdobj = findObjectByCatalogId(refobjId);
|
|
+ if (refdobj == NULL)
|
|
+ continue;
|
|
+
|
|
+ Assert(refdobj->objType == DO_TABLE);
|
|
+ reftbinfo = (TableInfo *) refdobj;
|
|
+ Assert(reftbinfo->relkind == RELKIND_MATVIEW);
|
|
+ refdobj = (DumpableObject *) reftbinfo->dataObj;
|
|
+ if (refdobj == NULL)
|
|
+ continue;
|
|
+ Assert(refdobj->objType == DO_REFRESH_MATVIEW);
|
|
+
|
|
+ addObjectDependency(dobj, refdobj->dumpId);
|
|
+
|
|
+ if (!reftbinfo->relispopulated)
|
|
+ tbinfo->relispopulated = false;
|
|
+ }
|
|
+
|
|
+ PQclear(res);
|
|
+
|
|
+ destroyPQExpBuffer(query);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * getTableDataFKConstraints -
|
|
+ * add dump-order dependencies reflecting foreign key constraints
|
|
+ *
|
|
+ * This code is executed only in a data-only dump --- in schema+data dumps
|
|
+ * we handle foreign key issues by not creating the FK constraints until
|
|
+ * after the data is loaded. In a data-only dump, however, we want to
|
|
+ * order the table data objects in such a way that a table's referenced
|
|
+ * tables are restored first. (In the presence of circular references or
|
|
+ * self-references this may be impossible; we'll detect and complain about
|
|
+ * that during the dependency sorting step.)
|
|
+ */
|
|
+static void
|
|
+getTableDataFKConstraints(void)
|
|
+{
|
|
+ DumpableObject **dobjs;
|
|
+ int numObjs;
|
|
+ int i;
|
|
+
|
|
+ /* Search through all the dumpable objects for FK constraints */
|
|
+ getDumpableObjects(&dobjs, &numObjs);
|
|
+ for (i = 0; i < numObjs; i++)
|
|
+ {
|
|
+ if (dobjs[i]->objType == DO_FK_CONSTRAINT)
|
|
+ {
|
|
+ ConstraintInfo *cinfo = (ConstraintInfo *) dobjs[i];
|
|
+ TableInfo *ftable;
|
|
+
|
|
+ /* Not interesting unless both tables are to be dumped */
|
|
+ if (cinfo->contable == NULL ||
|
|
+ cinfo->contable->dataObj == NULL)
|
|
+ continue;
|
|
+ ftable = findTableByOid(cinfo->confrelid);
|
|
+ if (ftable == NULL ||
|
|
+ ftable->dataObj == NULL)
|
|
+ continue;
|
|
+
|
|
+ /*
|
|
+ * Okay, make referencing table's TABLE_DATA object depend on the
|
|
+ * referenced table's TABLE_DATA object.
|
|
+ */
|
|
+ addObjectDependency(&cinfo->contable->dataObj->dobj,
|
|
+ ftable->dataObj->dobj.dumpId);
|
|
+ }
|
|
+ }
|
|
+ free(dobjs);
|
|
+}
|
|
+
|
|
+
|
|
+/*
|
|
+ * guessConstraintInheritance:
|
|
+ * In pre-8.4 databases, we can't tell for certain which constraints
|
|
+ * are inherited. We assume a CHECK constraint is inherited if its name
|
|
+ * matches the name of any constraint in the parent. Originally this code
|
|
+ * tried to compare the expression texts, but that can fail for various
|
|
+ * reasons --- for example, if the parent and child tables are in different
|
|
+ * schemas, reverse-listing of function calls may produce different text
|
|
+ * (schema-qualified or not) depending on search path.
|
|
+ *
|
|
+ * In 8.4 and up we can rely on the conislocal field to decide which
|
|
+ * constraints must be dumped; much safer.
|
|
+ *
|
|
+ * This function assumes all conislocal flags were initialized to TRUE.
|
|
+ * It clears the flag on anything that seems to be inherited.
|
|
+ */
|
|
+static void
|
|
+guessConstraintInheritance(TableInfo *tblinfo, int numTables)
|
|
+{
|
|
+ int i,
|
|
+ j,
|
|
+ k;
|
|
+
|
|
+ for (i = 0; i < numTables; i++)
|
|
+ {
|
|
+ TableInfo *tbinfo = &(tblinfo[i]);
|
|
+ int numParents;
|
|
+ TableInfo **parents;
|
|
+ TableInfo *parent;
|
|
+
|
|
+ /* Sequences and views never have parents */
|
|
+ if (tbinfo->relkind == RELKIND_SEQUENCE ||
|
|
+ tbinfo->relkind == RELKIND_VIEW)
|
|
+ continue;
|
|
+
|
|
+ /* Don't bother computing anything for non-target tables, either */
|
|
+ if (!tbinfo->dobj.dump)
|
|
+ continue;
|
|
+
|
|
+ numParents = tbinfo->numParents;
|
|
+ parents = tbinfo->parents;
|
|
+
|
|
+ if (numParents == 0)
|
|
+ continue; /* nothing to see here, move along */
|
|
+
|
|
+ /* scan for inherited CHECK constraints */
|
|
+ for (j = 0; j < tbinfo->ncheck; j++)
|
|
+ {
|
|
+ ConstraintInfo *constr;
|
|
+
|
|
+ constr = &(tbinfo->checkexprs[j]);
|
|
+
|
|
+ for (k = 0; k < numParents; k++)
|
|
+ {
|
|
+ int l;
|
|
+
|
|
+ parent = parents[k];
|
|
+ for (l = 0; l < parent->ncheck; l++)
|
|
+ {
|
|
+ ConstraintInfo *pconstr = &(parent->checkexprs[l]);
|
|
+
|
|
+ if (strcmp(pconstr->dobj.name, constr->dobj.name) == 0)
|
|
+ {
|
|
+ constr->conislocal = false;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ if (!constr->conislocal)
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+
|
|
+/*
|
|
+ * dumpDatabase:
|
|
+ * dump the database definition
|
|
+ */
|
|
+static void
|
|
+dumpDatabase(Archive *fout)
|
|
+{
|
|
+ PQExpBuffer dbQry = createPQExpBuffer();
|
|
+ PQExpBuffer delQry = createPQExpBuffer();
|
|
+ PQExpBuffer creaQry = createPQExpBuffer();
|
|
+ PGconn *conn = GetConnection(fout);
|
|
+ PGresult *res;
|
|
+ int i_tableoid,
|
|
+ i_oid,
|
|
+ i_dba,
|
|
+ i_encoding,
|
|
+ i_collate,
|
|
+ i_ctype,
|
|
+ i_frozenxid,
|
|
+ i_minmxid,
|
|
+ i_tablespace;
|
|
+ CatalogId dbCatId;
|
|
+ DumpId dbDumpId;
|
|
+ const char *datname,
|
|
+ *dba,
|
|
+ *encoding,
|
|
+ *collate,
|
|
+ *ctype,
|
|
+ *tablespace;
|
|
+ uint32 frozenxid, minmxid;
|
|
+
|
|
+ datname = PQdb(conn);
|
|
+
|
|
+ if (g_verbose)
|
|
+ write_msg(NULL, "saving database definition\n");
|
|
+
|
|
+ /* Make sure we are in proper schema */
|
|
+ selectSourceSchema(fout, "pg_catalog");
|
|
+
|
|
+ /* Get the database owner and parameters from pg_database */
|
|
+ if (fout->remoteVersion >= 90300)
|
|
+ {
|
|
+ appendPQExpBuffer(dbQry, "SELECT tableoid, oid, "
|
|
+ "(%s datdba) AS dba, "
|
|
+ "pg_encoding_to_char(encoding) AS encoding, "
|
|
+ "datcollate, datctype, datfrozenxid, datminmxid, "
|
|
+ "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
|
|
+ "shobj_description(oid, 'pg_database') AS description "
|
|
+
|
|
+ "FROM pg_database "
|
|
+ "WHERE datname = ",
|
|
+ username_subquery);
|
|
+ appendStringLiteralAH(dbQry, datname, fout);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 80400)
|
|
+ {
|
|
+ appendPQExpBuffer(dbQry, "SELECT tableoid, oid, "
|
|
+ "(%s datdba) AS dba, "
|
|
+ "pg_encoding_to_char(encoding) AS encoding, "
|
|
+ "datcollate, datctype, datfrozenxid, 0 AS datminmxid, "
|
|
+ "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
|
|
+ "shobj_description(oid, 'pg_database') AS description "
|
|
+
|
|
+ "FROM pg_database "
|
|
+ "WHERE datname = ",
|
|
+ username_subquery);
|
|
+ appendStringLiteralAH(dbQry, datname, fout);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 80200)
|
|
+ {
|
|
+ appendPQExpBuffer(dbQry, "SELECT tableoid, oid, "
|
|
+ "(%s datdba) AS dba, "
|
|
+ "pg_encoding_to_char(encoding) AS encoding, "
|
|
+ "NULL AS datcollate, NULL AS datctype, datfrozenxid, 0 AS datminmxid, "
|
|
+ "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
|
|
+ "shobj_description(oid, 'pg_database') AS description "
|
|
+
|
|
+ "FROM pg_database "
|
|
+ "WHERE datname = ",
|
|
+ username_subquery);
|
|
+ appendStringLiteralAH(dbQry, datname, fout);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 80000)
|
|
+ {
|
|
+ appendPQExpBuffer(dbQry, "SELECT tableoid, oid, "
|
|
+ "(%s datdba) AS dba, "
|
|
+ "pg_encoding_to_char(encoding) AS encoding, "
|
|
+ "NULL AS datcollate, NULL AS datctype, datfrozenxid, 0 AS datminmxid, "
|
|
+ "(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace "
|
|
+ "FROM pg_database "
|
|
+ "WHERE datname = ",
|
|
+ username_subquery);
|
|
+ appendStringLiteralAH(dbQry, datname, fout);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 70100)
|
|
+ {
|
|
+ appendPQExpBuffer(dbQry, "SELECT tableoid, oid, "
|
|
+ "(%s datdba) AS dba, "
|
|
+ "pg_encoding_to_char(encoding) AS encoding, "
|
|
+ "NULL AS datcollate, NULL AS datctype, "
|
|
+ "0 AS datfrozenxid, 0 AS datminmxid, "
|
|
+ "NULL AS tablespace "
|
|
+ "FROM pg_database "
|
|
+ "WHERE datname = ",
|
|
+ username_subquery);
|
|
+ appendStringLiteralAH(dbQry, datname, fout);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ appendPQExpBuffer(dbQry, "SELECT "
|
|
+ "(SELECT oid FROM pg_class WHERE relname = 'pg_database') AS tableoid, "
|
|
+ "oid, "
|
|
+ "(%s datdba) AS dba, "
|
|
+ "pg_encoding_to_char(encoding) AS encoding, "
|
|
+ "NULL AS datcollate, NULL AS datctype, "
|
|
+ "0 AS datfrozenxid, 0 AS datminmxid, "
|
|
+ "NULL AS tablespace "
|
|
+ "FROM pg_database "
|
|
+ "WHERE datname = ",
|
|
+ username_subquery);
|
|
+ appendStringLiteralAH(dbQry, datname, fout);
|
|
+ }
|
|
+
|
|
+ res = ExecuteSqlQueryForSingleRow(fout, dbQry->data);
|
|
+
|
|
+ i_tableoid = PQfnumber(res, "tableoid");
|
|
+ i_oid = PQfnumber(res, "oid");
|
|
+ i_dba = PQfnumber(res, "dba");
|
|
+ i_encoding = PQfnumber(res, "encoding");
|
|
+ i_collate = PQfnumber(res, "datcollate");
|
|
+ i_ctype = PQfnumber(res, "datctype");
|
|
+ i_frozenxid = PQfnumber(res, "datfrozenxid");
|
|
+ i_minmxid = PQfnumber(res, "datminmxid");
|
|
+ i_tablespace = PQfnumber(res, "tablespace");
|
|
+
|
|
+ dbCatId.tableoid = atooid(PQgetvalue(res, 0, i_tableoid));
|
|
+ dbCatId.oid = atooid(PQgetvalue(res, 0, i_oid));
|
|
+ dba = PQgetvalue(res, 0, i_dba);
|
|
+ encoding = PQgetvalue(res, 0, i_encoding);
|
|
+ collate = PQgetvalue(res, 0, i_collate);
|
|
+ ctype = PQgetvalue(res, 0, i_ctype);
|
|
+ frozenxid = atooid(PQgetvalue(res, 0, i_frozenxid));
|
|
+ minmxid = atooid(PQgetvalue(res, 0, i_minmxid));
|
|
+ tablespace = PQgetvalue(res, 0, i_tablespace);
|
|
+
|
|
+ appendPQExpBuffer(creaQry, "CREATE DATABASE %s WITH TEMPLATE = template0",
|
|
+ fmtId(datname));
|
|
+ if (strlen(encoding) > 0)
|
|
+ {
|
|
+ appendPQExpBufferStr(creaQry, " ENCODING = ");
|
|
+ appendStringLiteralAH(creaQry, encoding, fout);
|
|
+ }
|
|
+ if (strlen(collate) > 0)
|
|
+ {
|
|
+ appendPQExpBufferStr(creaQry, " LC_COLLATE = ");
|
|
+ appendStringLiteralAH(creaQry, collate, fout);
|
|
+ }
|
|
+ if (strlen(ctype) > 0)
|
|
+ {
|
|
+ appendPQExpBufferStr(creaQry, " LC_CTYPE = ");
|
|
+ appendStringLiteralAH(creaQry, ctype, fout);
|
|
+ }
|
|
+ if (strlen(tablespace) > 0 && strcmp(tablespace, "pg_default") != 0)
|
|
+ appendPQExpBuffer(creaQry, " TABLESPACE = %s",
|
|
+ fmtId(tablespace));
|
|
+ appendPQExpBufferStr(creaQry, ";\n");
|
|
+
|
|
+ if (binary_upgrade)
|
|
+ {
|
|
+ appendPQExpBufferStr(creaQry, "\n-- For binary upgrade, set datfrozenxid and datminmxid.\n");
|
|
+ appendPQExpBuffer(creaQry, "UPDATE pg_catalog.pg_database\n"
|
|
+ "SET datfrozenxid = '%u', datminmxid = '%u'\n"
|
|
+ "WHERE datname = ",
|
|
+ frozenxid, minmxid);
|
|
+ appendStringLiteralAH(creaQry, datname, fout);
|
|
+ appendPQExpBufferStr(creaQry, ";\n");
|
|
+
|
|
+ }
|
|
+
|
|
+ appendPQExpBuffer(delQry, "DROP DATABASE %s;\n",
|
|
+ fmtId(datname));
|
|
+
|
|
+ dbDumpId = createDumpId();
|
|
+
|
|
+ ArchiveEntry(fout,
|
|
+ dbCatId, /* catalog ID */
|
|
+ dbDumpId, /* dump ID */
|
|
+ datname, /* Name */
|
|
+ NULL, /* Namespace */
|
|
+ NULL, /* Tablespace */
|
|
+ dba, /* Owner */
|
|
+ false, /* with oids */
|
|
+ "DATABASE", /* Desc */
|
|
+ SECTION_PRE_DATA, /* Section */
|
|
+ creaQry->data, /* Create */
|
|
+ delQry->data, /* Del */
|
|
+ NULL, /* Copy */
|
|
+ NULL, /* Deps */
|
|
+ 0, /* # Deps */
|
|
+ NULL, /* Dumper */
|
|
+ NULL); /* Dumper Arg */
|
|
+
|
|
+ /*
|
|
+ * pg_largeobject and pg_largeobject_metadata come from the old system
|
|
+ * intact, so set their relfrozenxids and relminmxids.
|
|
+ */
|
|
+ if (binary_upgrade)
|
|
+ {
|
|
+ PGresult *lo_res;
|
|
+ PQExpBuffer loFrozenQry = createPQExpBuffer();
|
|
+ PQExpBuffer loOutQry = createPQExpBuffer();
|
|
+ int i_relfrozenxid, i_relminmxid;
|
|
+
|
|
+ /*
|
|
+ * pg_largeobject
|
|
+ */
|
|
+ if (fout->remoteVersion >= 90300)
|
|
+ appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, relminmxid\n"
|
|
+ "FROM pg_catalog.pg_class\n"
|
|
+ "WHERE oid = %u;\n",
|
|
+ LargeObjectRelationId);
|
|
+ else
|
|
+ appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, 0 AS relminmxid\n"
|
|
+ "FROM pg_catalog.pg_class\n"
|
|
+ "WHERE oid = %u;\n",
|
|
+ LargeObjectRelationId);
|
|
+
|
|
+ lo_res = ExecuteSqlQueryForSingleRow(fout, loFrozenQry->data);
|
|
+
|
|
+ i_relfrozenxid = PQfnumber(lo_res, "relfrozenxid");
|
|
+ i_relminmxid = PQfnumber(lo_res, "relminmxid");
|
|
+
|
|
+ appendPQExpBufferStr(loOutQry, "\n-- For binary upgrade, set pg_largeobject relfrozenxid and relminmxid\n");
|
|
+ appendPQExpBuffer(loOutQry, "UPDATE pg_catalog.pg_class\n"
|
|
+ "SET relfrozenxid = '%u', relminmxid = '%u'\n"
|
|
+ "WHERE oid = %u;\n",
|
|
+ atoi(PQgetvalue(lo_res, 0, i_relfrozenxid)),
|
|
+ atoi(PQgetvalue(lo_res, 0, i_relminmxid)),
|
|
+ LargeObjectRelationId);
|
|
+ ArchiveEntry(fout, nilCatalogId, createDumpId(),
|
|
+ "pg_largeobject", NULL, NULL, "",
|
|
+ false, "pg_largeobject", SECTION_PRE_DATA,
|
|
+ loOutQry->data, "", NULL,
|
|
+ NULL, 0,
|
|
+ NULL, NULL);
|
|
+
|
|
+ PQclear(lo_res);
|
|
+
|
|
+ /*
|
|
+ * pg_largeobject_metadata
|
|
+ */
|
|
+ if (fout->remoteVersion >= 90000)
|
|
+ {
|
|
+ resetPQExpBuffer(loFrozenQry);
|
|
+ resetPQExpBuffer(loOutQry);
|
|
+
|
|
+ if (fout->remoteVersion >= 90300)
|
|
+ appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, relminmxid\n"
|
|
+ "FROM pg_catalog.pg_class\n"
|
|
+ "WHERE oid = %u;\n",
|
|
+ LargeObjectMetadataRelationId);
|
|
+ else
|
|
+ appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, 0 AS relminmxid\n"
|
|
+ "FROM pg_catalog.pg_class\n"
|
|
+ "WHERE oid = %u;\n",
|
|
+ LargeObjectMetadataRelationId);
|
|
+
|
|
+ lo_res = ExecuteSqlQueryForSingleRow(fout, loFrozenQry->data);
|
|
+
|
|
+ i_relfrozenxid = PQfnumber(lo_res, "relfrozenxid");
|
|
+ i_relminmxid = PQfnumber(lo_res, "relminmxid");
|
|
+
|
|
+ appendPQExpBufferStr(loOutQry, "\n-- For binary upgrade, set pg_largeobject_metadata relfrozenxid and relminmxid\n");
|
|
+ appendPQExpBuffer(loOutQry, "UPDATE pg_catalog.pg_class\n"
|
|
+ "SET relfrozenxid = '%u', relminmxid = '%u'\n"
|
|
+ "WHERE oid = %u;\n",
|
|
+ atoi(PQgetvalue(lo_res, 0, i_relfrozenxid)),
|
|
+ atoi(PQgetvalue(lo_res, 0, i_relminmxid)),
|
|
+ LargeObjectMetadataRelationId);
|
|
+ ArchiveEntry(fout, nilCatalogId, createDumpId(),
|
|
+ "pg_largeobject_metadata", NULL, NULL, "",
|
|
+ false, "pg_largeobject_metadata", SECTION_PRE_DATA,
|
|
+ loOutQry->data, "", NULL,
|
|
+ NULL, 0,
|
|
+ NULL, NULL);
|
|
+
|
|
+ PQclear(lo_res);
|
|
+ }
|
|
+
|
|
+ destroyPQExpBuffer(loFrozenQry);
|
|
+ destroyPQExpBuffer(loOutQry);
|
|
+ }
|
|
+
|
|
+ /* Dump DB comment if any */
|
|
+ if (fout->remoteVersion >= 80200)
|
|
+ {
|
|
+ /*
|
|
+ * 8.2 keeps comments on shared objects in a shared table, so we
|
|
+ * cannot use the dumpComment used for other database objects.
|
|
+ */
|
|
+ char *comment = PQgetvalue(res, 0, PQfnumber(res, "description"));
|
|
+
|
|
+ if (comment && strlen(comment))
|
|
+ {
|
|
+ resetPQExpBuffer(dbQry);
|
|
+
|
|
+ /*
|
|
+ * Generates warning when loaded into a differently-named
|
|
+ * database.
|
|
+ */
|
|
+ appendPQExpBuffer(dbQry, "COMMENT ON DATABASE %s IS ", fmtId(datname));
|
|
+ appendStringLiteralAH(dbQry, comment, fout);
|
|
+ appendPQExpBufferStr(dbQry, ";\n");
|
|
+
|
|
+ ArchiveEntry(fout, dbCatId, createDumpId(), datname, NULL, NULL,
|
|
+ dba, false, "COMMENT", SECTION_NONE,
|
|
+ dbQry->data, "", NULL,
|
|
+ &dbDumpId, 1, NULL, NULL);
|
|
+ }
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ resetPQExpBuffer(dbQry);
|
|
+ appendPQExpBuffer(dbQry, "DATABASE %s", fmtId(datname));
|
|
+ dumpComment(fout, dbQry->data, NULL, "",
|
|
+ dbCatId, 0, dbDumpId);
|
|
+ }
|
|
+
|
|
+ /* Dump shared security label. */
|
|
+ if (!no_security_labels && fout->remoteVersion >= 90200)
|
|
+ {
|
|
+ PGresult *shres;
|
|
+ PQExpBuffer seclabelQry;
|
|
+
|
|
+ seclabelQry = createPQExpBuffer();
|
|
+
|
|
+ buildShSecLabelQuery(conn, "pg_database", dbCatId.oid, seclabelQry);
|
|
+ shres = ExecuteSqlQuery(fout, seclabelQry->data, PGRES_TUPLES_OK);
|
|
+ resetPQExpBuffer(seclabelQry);
|
|
+ emitShSecLabels(conn, shres, seclabelQry, "DATABASE", datname);
|
|
+ if (strlen(seclabelQry->data))
|
|
+ ArchiveEntry(fout, dbCatId, createDumpId(), datname, NULL, NULL,
|
|
+ dba, false, "SECURITY LABEL", SECTION_NONE,
|
|
+ seclabelQry->data, "", NULL,
|
|
+ &dbDumpId, 1, NULL, NULL);
|
|
+ destroyPQExpBuffer(seclabelQry);
|
|
+ PQclear(shres);
|
|
+ }
|
|
+
|
|
+ PQclear(res);
|
|
+
|
|
+ destroyPQExpBuffer(dbQry);
|
|
+ destroyPQExpBuffer(delQry);
|
|
+ destroyPQExpBuffer(creaQry);
|
|
+}
|
|
+
|
|
+
|
|
+/*
|
|
+ * dumpEncoding: put the correct encoding into the archive
|
|
+ */
|
|
+static void
|
|
+dumpEncoding(Archive *AH)
|
|
+{
|
|
+ const char *encname = pg_encoding_to_char(AH->encoding);
|
|
+ PQExpBuffer qry = createPQExpBuffer();
|
|
+
|
|
+ if (g_verbose)
|
|
+ write_msg(NULL, "saving encoding = %s\n", encname);
|
|
+
|
|
+ appendPQExpBufferStr(qry, "SET client_encoding = ");
|
|
+ appendStringLiteralAH(qry, encname, AH);
|
|
+ appendPQExpBufferStr(qry, ";\n");
|
|
+
|
|
+ ArchiveEntry(AH, nilCatalogId, createDumpId(),
|
|
+ "ENCODING", NULL, NULL, "",
|
|
+ false, "ENCODING", SECTION_PRE_DATA,
|
|
+ qry->data, "", NULL,
|
|
+ NULL, 0,
|
|
+ NULL, NULL);
|
|
+
|
|
+ destroyPQExpBuffer(qry);
|
|
+}
|
|
+
|
|
+
|
|
+/*
|
|
+ * dumpStdStrings: put the correct escape string behavior into the archive
|
|
+ */
|
|
+static void
|
|
+dumpStdStrings(Archive *AH)
|
|
+{
|
|
+ const char *stdstrings = AH->std_strings ? "on" : "off";
|
|
+ PQExpBuffer qry = createPQExpBuffer();
|
|
+
|
|
+ if (g_verbose)
|
|
+ write_msg(NULL, "saving standard_conforming_strings = %s\n",
|
|
+ stdstrings);
|
|
+
|
|
+ appendPQExpBuffer(qry, "SET standard_conforming_strings = '%s';\n",
|
|
+ stdstrings);
|
|
+
|
|
+ ArchiveEntry(AH, nilCatalogId, createDumpId(),
|
|
+ "STDSTRINGS", NULL, NULL, "",
|
|
+ false, "STDSTRINGS", SECTION_PRE_DATA,
|
|
+ qry->data, "", NULL,
|
|
+ NULL, 0,
|
|
+ NULL, NULL);
|
|
+
|
|
+ destroyPQExpBuffer(qry);
|
|
+}
|
|
+
|
|
+
|
|
+/*
|
|
+ * getBlobs:
|
|
+ * Collect schema-level data about large objects
|
|
+ */
|
|
+static void
|
|
+getBlobs(Archive *fout)
|
|
+{
|
|
+ PQExpBuffer blobQry = createPQExpBuffer();
|
|
+ BlobInfo *binfo;
|
|
+ DumpableObject *bdata;
|
|
+ PGresult *res;
|
|
+ int ntups;
|
|
+ int i;
|
|
+
|
|
+ /* Verbose message */
|
|
+ if (g_verbose)
|
|
+ write_msg(NULL, "reading large objects\n");
|
|
+
|
|
+ /* Make sure we are in proper schema */
|
|
+ selectSourceSchema(fout, "pg_catalog");
|
|
+
|
|
+ /* Fetch BLOB OIDs, and owner/ACL data if >= 9.0 */
|
|
+ if (fout->remoteVersion >= 90000)
|
|
+ appendPQExpBuffer(blobQry,
|
|
+ "SELECT oid, (%s lomowner) AS rolname, lomacl"
|
|
+ " FROM pg_largeobject_metadata",
|
|
+ username_subquery);
|
|
+ else if (fout->remoteVersion >= 70100)
|
|
+ appendPQExpBufferStr(blobQry,
|
|
+ "SELECT DISTINCT loid, NULL::oid, NULL::oid"
|
|
+ " FROM pg_largeobject");
|
|
+ else
|
|
+ appendPQExpBufferStr(blobQry,
|
|
+ "SELECT oid, NULL::oid, NULL::oid"
|
|
+ " FROM pg_class WHERE relkind = 'l'");
|
|
+
|
|
+ res = ExecuteSqlQuery(fout, blobQry->data, PGRES_TUPLES_OK);
|
|
+
|
|
+ ntups = PQntuples(res);
|
|
+ if (ntups > 0)
|
|
+ {
|
|
+ /*
|
|
+ * Each large object has its own BLOB archive entry.
|
|
+ */
|
|
+ binfo = (BlobInfo *) pg_malloc(ntups * sizeof(BlobInfo));
|
|
+
|
|
+ for (i = 0; i < ntups; i++)
|
|
+ {
|
|
+ binfo[i].dobj.objType = DO_BLOB;
|
|
+ binfo[i].dobj.catId.tableoid = LargeObjectRelationId;
|
|
+ binfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, 0));
|
|
+ AssignDumpId(&binfo[i].dobj);
|
|
+
|
|
+ binfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, 0));
|
|
+ if (!PQgetisnull(res, i, 1))
|
|
+ binfo[i].rolname = pg_strdup(PQgetvalue(res, i, 1));
|
|
+ else
|
|
+ binfo[i].rolname = "";
|
|
+ if (!PQgetisnull(res, i, 2))
|
|
+ binfo[i].blobacl = pg_strdup(PQgetvalue(res, i, 2));
|
|
+ else
|
|
+ binfo[i].blobacl = NULL;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * If we have any large objects, a "BLOBS" archive entry is needed.
|
|
+ * This is just a placeholder for sorting; it carries no data now.
|
|
+ */
|
|
+ bdata = (DumpableObject *) pg_malloc(sizeof(DumpableObject));
|
|
+ bdata->objType = DO_BLOB_DATA;
|
|
+ bdata->catId = nilCatalogId;
|
|
+ AssignDumpId(bdata);
|
|
+ bdata->name = pg_strdup("BLOBS");
|
|
+ }
|
|
+
|
|
+ PQclear(res);
|
|
+ destroyPQExpBuffer(blobQry);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * dumpBlob
|
|
+ *
|
|
+ * dump the definition (metadata) of the given large object
|
|
+ */
|
|
+static void
|
|
+dumpBlob(Archive *fout, BlobInfo *binfo)
|
|
+{
|
|
+ PQExpBuffer cquery = createPQExpBuffer();
|
|
+ PQExpBuffer dquery = createPQExpBuffer();
|
|
+
|
|
+ appendPQExpBuffer(cquery,
|
|
+ "SELECT pg_catalog.lo_create('%s');\n",
|
|
+ binfo->dobj.name);
|
|
+
|
|
+ appendPQExpBuffer(dquery,
|
|
+ "SELECT pg_catalog.lo_unlink('%s');\n",
|
|
+ binfo->dobj.name);
|
|
+
|
|
+ ArchiveEntry(fout, binfo->dobj.catId, binfo->dobj.dumpId,
|
|
+ binfo->dobj.name,
|
|
+ NULL, NULL,
|
|
+ binfo->rolname, false,
|
|
+ "BLOB", SECTION_PRE_DATA,
|
|
+ cquery->data, dquery->data, NULL,
|
|
+ NULL, 0,
|
|
+ NULL, NULL);
|
|
+
|
|
+ /* set up tag for comment and/or ACL */
|
|
+ resetPQExpBuffer(cquery);
|
|
+ appendPQExpBuffer(cquery, "LARGE OBJECT %s", binfo->dobj.name);
|
|
+
|
|
+ /* Dump comment if any */
|
|
+ dumpComment(fout, cquery->data,
|
|
+ NULL, binfo->rolname,
|
|
+ binfo->dobj.catId, 0, binfo->dobj.dumpId);
|
|
+
|
|
+ /* Dump security label if any */
|
|
+ dumpSecLabel(fout, cquery->data,
|
|
+ NULL, binfo->rolname,
|
|
+ binfo->dobj.catId, 0, binfo->dobj.dumpId);
|
|
+
|
|
+ /* Dump ACL if any */
|
|
+ if (binfo->blobacl)
|
|
+ dumpACL(fout, binfo->dobj.catId, binfo->dobj.dumpId, "LARGE OBJECT",
|
|
+ binfo->dobj.name, NULL, cquery->data,
|
|
+ NULL, binfo->rolname, binfo->blobacl);
|
|
+
|
|
+ destroyPQExpBuffer(cquery);
|
|
+ destroyPQExpBuffer(dquery);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * dumpBlobs:
|
|
+ * dump the data contents of all large objects
|
|
+ */
|
|
+static int
|
|
+dumpBlobs(Archive *fout, void *arg)
|
|
+{
|
|
+ const char *blobQry;
|
|
+ const char *blobFetchQry;
|
|
+ PGconn *conn = GetConnection(fout);
|
|
+ PGresult *res;
|
|
+ char buf[LOBBUFSIZE];
|
|
+ int ntups;
|
|
+ int i;
|
|
+ int cnt;
|
|
+
|
|
+ if (g_verbose)
|
|
+ write_msg(NULL, "saving large objects\n");
|
|
+
|
|
+ /* Make sure we are in proper schema */
|
|
+ selectSourceSchema(fout, "pg_catalog");
|
|
+
|
|
+ /*
|
|
+ * Currently, we re-fetch all BLOB OIDs using a cursor. Consider scanning
|
|
+ * the already-in-memory dumpable objects instead...
|
|
+ */
|
|
+ if (fout->remoteVersion >= 90000)
|
|
+ blobQry = "DECLARE bloboid CURSOR FOR SELECT oid FROM pg_largeobject_metadata";
|
|
+ else if (fout->remoteVersion >= 70100)
|
|
+ blobQry = "DECLARE bloboid CURSOR FOR SELECT DISTINCT loid FROM pg_largeobject";
|
|
+ else
|
|
+ blobQry = "DECLARE bloboid CURSOR FOR SELECT oid FROM pg_class WHERE relkind = 'l'";
|
|
+
|
|
+ ExecuteSqlStatement(fout, blobQry);
|
|
+
|
|
+ /* Command to fetch from cursor */
|
|
+ blobFetchQry = "FETCH 1000 IN bloboid";
|
|
+
|
|
+ do
|
|
+ {
|
|
+ /* Do a fetch */
|
|
+ res = ExecuteSqlQuery(fout, blobFetchQry, PGRES_TUPLES_OK);
|
|
+
|
|
+ /* Process the tuples, if any */
|
|
+ ntups = PQntuples(res);
|
|
+ for (i = 0; i < ntups; i++)
|
|
+ {
|
|
+ Oid blobOid;
|
|
+ int loFd;
|
|
+
|
|
+ blobOid = atooid(PQgetvalue(res, i, 0));
|
|
+ /* Open the BLOB */
|
|
+ loFd = lo_open(conn, blobOid, INV_READ);
|
|
+ if (loFd == -1)
|
|
+ exit_horribly(NULL, "could not open large object %u: %s",
|
|
+ blobOid, PQerrorMessage(conn));
|
|
+
|
|
+ StartBlob(fout, blobOid);
|
|
+
|
|
+ /* Now read it in chunks, sending data to archive */
|
|
+ do
|
|
+ {
|
|
+ cnt = lo_read(conn, loFd, buf, LOBBUFSIZE);
|
|
+ if (cnt < 0)
|
|
+ exit_horribly(NULL, "error reading large object %u: %s",
|
|
+ blobOid, PQerrorMessage(conn));
|
|
+
|
|
+ WriteData(fout, buf, cnt);
|
|
+ } while (cnt > 0);
|
|
+
|
|
+ lo_close(conn, loFd);
|
|
+
|
|
+ EndBlob(fout, blobOid);
|
|
+ }
|
|
+
|
|
+ PQclear(res);
|
|
+ } while (ntups > 0);
|
|
+
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+static void
|
|
+binary_upgrade_set_type_oids_by_type_oid(Archive *fout,
|
|
+ PQExpBuffer upgrade_buffer,
|
|
+ Oid pg_type_oid)
|
|
+{
|
|
+ PQExpBuffer upgrade_query = createPQExpBuffer();
|
|
+ PGresult *upgrade_res;
|
|
+ Oid pg_type_array_oid;
|
|
+
|
|
+ appendPQExpBufferStr(upgrade_buffer, "\n-- For binary upgrade, must preserve pg_type oid\n");
|
|
+ appendPQExpBuffer(upgrade_buffer,
|
|
+ "SELECT binary_upgrade.set_next_pg_type_oid('%u'::pg_catalog.oid);\n\n",
|
|
+ pg_type_oid);
|
|
+
|
|
+ /* we only support old >= 8.3 for binary upgrades */
|
|
+ appendPQExpBuffer(upgrade_query,
|
|
+ "SELECT typarray "
|
|
+ "FROM pg_catalog.pg_type "
|
|
+ "WHERE pg_type.oid = '%u'::pg_catalog.oid;",
|
|
+ pg_type_oid);
|
|
+
|
|
+ upgrade_res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
|
|
+
|
|
+ pg_type_array_oid = atooid(PQgetvalue(upgrade_res, 0, PQfnumber(upgrade_res, "typarray")));
|
|
+
|
|
+ if (OidIsValid(pg_type_array_oid))
|
|
+ {
|
|
+ appendPQExpBufferStr(upgrade_buffer,
|
|
+ "\n-- For binary upgrade, must preserve pg_type array oid\n");
|
|
+ appendPQExpBuffer(upgrade_buffer,
|
|
+ "SELECT binary_upgrade.set_next_array_pg_type_oid('%u'::pg_catalog.oid);\n\n",
|
|
+ pg_type_array_oid);
|
|
+ }
|
|
+
|
|
+ PQclear(upgrade_res);
|
|
+ destroyPQExpBuffer(upgrade_query);
|
|
+}
|
|
+
|
|
+static bool
|
|
+binary_upgrade_set_type_oids_by_rel_oid(Archive *fout,
|
|
+ PQExpBuffer upgrade_buffer,
|
|
+ Oid pg_rel_oid)
|
|
+{
|
|
+ PQExpBuffer upgrade_query = createPQExpBuffer();
|
|
+ PGresult *upgrade_res;
|
|
+ Oid pg_type_oid;
|
|
+ bool toast_set = false;
|
|
+
|
|
+ /* we only support old >= 8.3 for binary upgrades */
|
|
+ appendPQExpBuffer(upgrade_query,
|
|
+ "SELECT c.reltype AS crel, t.reltype AS trel "
|
|
+ "FROM pg_catalog.pg_class c "
|
|
+ "LEFT JOIN pg_catalog.pg_class t ON "
|
|
+ " (c.reltoastrelid = t.oid) "
|
|
+ "WHERE c.oid = '%u'::pg_catalog.oid;",
|
|
+ pg_rel_oid);
|
|
+
|
|
+ upgrade_res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
|
|
+
|
|
+ pg_type_oid = atooid(PQgetvalue(upgrade_res, 0, PQfnumber(upgrade_res, "crel")));
|
|
+
|
|
+ binary_upgrade_set_type_oids_by_type_oid(fout, upgrade_buffer,
|
|
+ pg_type_oid);
|
|
+
|
|
+ if (!PQgetisnull(upgrade_res, 0, PQfnumber(upgrade_res, "trel")))
|
|
+ {
|
|
+ /* Toast tables do not have pg_type array rows */
|
|
+ Oid pg_type_toast_oid = atooid(PQgetvalue(upgrade_res, 0,
|
|
+ PQfnumber(upgrade_res, "trel")));
|
|
+
|
|
+ appendPQExpBufferStr(upgrade_buffer, "\n-- For binary upgrade, must preserve pg_type toast oid\n");
|
|
+ appendPQExpBuffer(upgrade_buffer,
|
|
+ "SELECT binary_upgrade.set_next_toast_pg_type_oid('%u'::pg_catalog.oid);\n\n",
|
|
+ pg_type_toast_oid);
|
|
+
|
|
+ toast_set = true;
|
|
+ }
|
|
+
|
|
+ PQclear(upgrade_res);
|
|
+ destroyPQExpBuffer(upgrade_query);
|
|
+
|
|
+ return toast_set;
|
|
+}
|
|
+
|
|
+static void
|
|
+binary_upgrade_set_pg_class_oids(Archive *fout,
|
|
+ PQExpBuffer upgrade_buffer, Oid pg_class_oid,
|
|
+ bool is_index)
|
|
+{
|
|
+ PQExpBuffer upgrade_query = createPQExpBuffer();
|
|
+ PGresult *upgrade_res;
|
|
+ Oid pg_class_reltoastrelid;
|
|
+ Oid pg_index_indexrelid;
|
|
+
|
|
+ appendPQExpBuffer(upgrade_query,
|
|
+ "SELECT c.reltoastrelid, i.indexrelid "
|
|
+ "FROM pg_catalog.pg_class c LEFT JOIN "
|
|
+ "pg_catalog.pg_index i ON (c.reltoastrelid = i.indrelid AND i.indisvalid) "
|
|
+ "WHERE c.oid = '%u'::pg_catalog.oid;",
|
|
+ pg_class_oid);
|
|
+
|
|
+ upgrade_res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
|
|
+
|
|
+ pg_class_reltoastrelid = atooid(PQgetvalue(upgrade_res, 0, PQfnumber(upgrade_res, "reltoastrelid")));
|
|
+ pg_index_indexrelid = atooid(PQgetvalue(upgrade_res, 0, PQfnumber(upgrade_res, "indexrelid")));
|
|
+
|
|
+ appendPQExpBufferStr(upgrade_buffer,
|
|
+ "\n-- For binary upgrade, must preserve pg_class oids\n");
|
|
+
|
|
+ if (!is_index)
|
|
+ {
|
|
+ appendPQExpBuffer(upgrade_buffer,
|
|
+ "SELECT binary_upgrade.set_next_heap_pg_class_oid('%u'::pg_catalog.oid);\n",
|
|
+ pg_class_oid);
|
|
+ /* only tables have toast tables, not indexes */
|
|
+ if (OidIsValid(pg_class_reltoastrelid))
|
|
+ {
|
|
+ /*
|
|
+ * One complexity is that the table definition might not require
|
|
+ * the creation of a TOAST table, and the TOAST table might have
|
|
+ * been created long after table creation, when the table was
|
|
+ * loaded with wide data. By setting the TOAST oid we force
|
|
+ * creation of the TOAST heap and TOAST index by the backend so we
|
|
+ * can cleanly copy the files during binary upgrade.
|
|
+ */
|
|
+
|
|
+ appendPQExpBuffer(upgrade_buffer,
|
|
+ "SELECT binary_upgrade.set_next_toast_pg_class_oid('%u'::pg_catalog.oid);\n",
|
|
+ pg_class_reltoastrelid);
|
|
+
|
|
+ /* every toast table has an index */
|
|
+ appendPQExpBuffer(upgrade_buffer,
|
|
+ "SELECT binary_upgrade.set_next_index_pg_class_oid('%u'::pg_catalog.oid);\n",
|
|
+ pg_index_indexrelid);
|
|
+ }
|
|
+ }
|
|
+ else
|
|
+ appendPQExpBuffer(upgrade_buffer,
|
|
+ "SELECT binary_upgrade.set_next_index_pg_class_oid('%u'::pg_catalog.oid);\n",
|
|
+ pg_class_oid);
|
|
+
|
|
+ appendPQExpBufferChar(upgrade_buffer, '\n');
|
|
+
|
|
+ PQclear(upgrade_res);
|
|
+ destroyPQExpBuffer(upgrade_query);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * If the DumpableObject is a member of an extension, add a suitable
|
|
+ * ALTER EXTENSION ADD command to the creation commands in upgrade_buffer.
|
|
+ */
|
|
+static void
|
|
+binary_upgrade_extension_member(PQExpBuffer upgrade_buffer,
|
|
+ DumpableObject *dobj,
|
|
+ const char *objlabel)
|
|
+{
|
|
+ DumpableObject *extobj = NULL;
|
|
+ int i;
|
|
+
|
|
+ if (!dobj->ext_member)
|
|
+ return;
|
|
+
|
|
+ /*
|
|
+ * Find the parent extension. We could avoid this search if we wanted to
|
|
+ * add a link field to DumpableObject, but the space costs of that would
|
|
+ * be considerable. We assume that member objects could only have a
|
|
+ * direct dependency on their own extension, not any others.
|
|
+ */
|
|
+ for (i = 0; i < dobj->nDeps; i++)
|
|
+ {
|
|
+ extobj = findObjectByDumpId(dobj->dependencies[i]);
|
|
+ if (extobj && extobj->objType == DO_EXTENSION)
|
|
+ break;
|
|
+ extobj = NULL;
|
|
+ }
|
|
+ if (extobj == NULL)
|
|
+ exit_horribly(NULL, "could not find parent extension for %s\n", objlabel);
|
|
+
|
|
+ appendPQExpBufferStr(upgrade_buffer,
|
|
+ "\n-- For binary upgrade, handle extension membership the hard way\n");
|
|
+ appendPQExpBuffer(upgrade_buffer, "ALTER EXTENSION %s ADD %s;\n",
|
|
+ fmtId(extobj->name),
|
|
+ objlabel);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * getNamespaces:
|
|
+ * read all namespaces in the system catalogs and return them in the
|
|
+ * NamespaceInfo* structure
|
|
+ *
|
|
+ * numNamespaces is set to the number of namespaces read in
|
|
+ */
|
|
+NamespaceInfo *
|
|
+getNamespaces(Archive *fout, int *numNamespaces)
|
|
+{
|
|
+ PGresult *res;
|
|
+ int ntups;
|
|
+ int i;
|
|
+ PQExpBuffer query;
|
|
+ NamespaceInfo *nsinfo;
|
|
+ int i_tableoid;
|
|
+ int i_oid;
|
|
+ int i_nspname;
|
|
+ int i_rolname;
|
|
+ int i_nspacl;
|
|
+
|
|
+ /*
|
|
+ * Before 7.3, there are no real namespaces; create two dummy entries, one
|
|
+ * for user stuff and one for system stuff.
|
|
+ */
|
|
+ if (fout->remoteVersion < 70300)
|
|
+ {
|
|
+ nsinfo = (NamespaceInfo *) pg_malloc(2 * sizeof(NamespaceInfo));
|
|
+
|
|
+ nsinfo[0].dobj.objType = DO_NAMESPACE;
|
|
+ nsinfo[0].dobj.catId.tableoid = 0;
|
|
+ nsinfo[0].dobj.catId.oid = 0;
|
|
+ AssignDumpId(&nsinfo[0].dobj);
|
|
+ nsinfo[0].dobj.name = pg_strdup("public");
|
|
+ nsinfo[0].rolname = pg_strdup("");
|
|
+ nsinfo[0].nspacl = pg_strdup("");
|
|
+
|
|
+ selectDumpableNamespace(&nsinfo[0]);
|
|
+
|
|
+ nsinfo[1].dobj.objType = DO_NAMESPACE;
|
|
+ nsinfo[1].dobj.catId.tableoid = 0;
|
|
+ nsinfo[1].dobj.catId.oid = 1;
|
|
+ AssignDumpId(&nsinfo[1].dobj);
|
|
+ nsinfo[1].dobj.name = pg_strdup("pg_catalog");
|
|
+ nsinfo[1].rolname = pg_strdup("");
|
|
+ nsinfo[1].nspacl = pg_strdup("");
|
|
+
|
|
+ selectDumpableNamespace(&nsinfo[1]);
|
|
+
|
|
+ *numNamespaces = 2;
|
|
+
|
|
+ return nsinfo;
|
|
+ }
|
|
+
|
|
+ query = createPQExpBuffer();
|
|
+
|
|
+ /* Make sure we are in proper schema */
|
|
+ selectSourceSchema(fout, "pg_catalog");
|
|
+
|
|
+ /*
|
|
+ * we fetch all namespaces including system ones, so that every object we
|
|
+ * read in can be linked to a containing namespace.
|
|
+ */
|
|
+ appendPQExpBuffer(query, "SELECT tableoid, oid, nspname, "
|
|
+ "(%s nspowner) AS rolname, "
|
|
+ "nspacl FROM pg_namespace",
|
|
+ username_subquery);
|
|
+
|
|
+ res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
|
|
+
|
|
+ ntups = PQntuples(res);
|
|
+
|
|
+ nsinfo = (NamespaceInfo *) pg_malloc(ntups * sizeof(NamespaceInfo));
|
|
+
|
|
+ i_tableoid = PQfnumber(res, "tableoid");
|
|
+ i_oid = PQfnumber(res, "oid");
|
|
+ i_nspname = PQfnumber(res, "nspname");
|
|
+ i_rolname = PQfnumber(res, "rolname");
|
|
+ i_nspacl = PQfnumber(res, "nspacl");
|
|
+
|
|
+ for (i = 0; i < ntups; i++)
|
|
+ {
|
|
+ nsinfo[i].dobj.objType = DO_NAMESPACE;
|
|
+ nsinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
|
|
+ nsinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
|
|
+ AssignDumpId(&nsinfo[i].dobj);
|
|
+ nsinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_nspname));
|
|
+ nsinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
|
|
+ nsinfo[i].nspacl = pg_strdup(PQgetvalue(res, i, i_nspacl));
|
|
+
|
|
+ /* Decide whether to dump this namespace */
|
|
+ selectDumpableNamespace(&nsinfo[i]);
|
|
+
|
|
+ if (strlen(nsinfo[i].rolname) == 0)
|
|
+ write_msg(NULL, "WARNING: owner of schema \"%s\" appears to be invalid\n",
|
|
+ nsinfo[i].dobj.name);
|
|
+ }
|
|
+
|
|
+ PQclear(res);
|
|
+ destroyPQExpBuffer(query);
|
|
+
|
|
+ *numNamespaces = ntups;
|
|
+
|
|
+ return nsinfo;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * findNamespace:
|
|
+ * given a namespace OID and an object OID, look up the info read by
|
|
+ * getNamespaces
|
|
+ *
|
|
+ * NB: for pre-7.3 source database, we use object OID to guess whether it's
|
|
+ * a system object or not. In 7.3 and later there is no guessing, and we
|
|
+ * don't use objoid at all.
|
|
+ */
|
|
+static NamespaceInfo *
|
|
+findNamespace(Archive *fout, Oid nsoid, Oid objoid)
|
|
+{
|
|
+ NamespaceInfo *nsinfo;
|
|
+
|
|
+ if (fout->remoteVersion >= 70300)
|
|
+ {
|
|
+ nsinfo = findNamespaceByOid(nsoid);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ /* This code depends on the dummy objects set up by getNamespaces. */
|
|
+ Oid i;
|
|
+
|
|
+ if (objoid > g_last_builtin_oid)
|
|
+ i = 0; /* user object */
|
|
+ else
|
|
+ i = 1; /* system object */
|
|
+ nsinfo = findNamespaceByOid(i);
|
|
+ }
|
|
+
|
|
+ if (nsinfo == NULL)
|
|
+ exit_horribly(NULL, "schema with OID %u does not exist\n", nsoid);
|
|
+
|
|
+ return nsinfo;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * getExtensions:
|
|
+ * read all extensions in the system catalogs and return them in the
|
|
+ * ExtensionInfo* structure
|
|
+ *
|
|
+ * numExtensions is set to the number of extensions read in
|
|
+ */
|
|
+ExtensionInfo *
|
|
+getExtensions(Archive *fout, int *numExtensions)
|
|
+{
|
|
+ PGresult *res;
|
|
+ int ntups;
|
|
+ int i;
|
|
+ PQExpBuffer query;
|
|
+ ExtensionInfo *extinfo;
|
|
+ int i_tableoid;
|
|
+ int i_oid;
|
|
+ int i_extname;
|
|
+ int i_nspname;
|
|
+ int i_extrelocatable;
|
|
+ int i_extversion;
|
|
+ int i_extconfig;
|
|
+ int i_extcondition;
|
|
+
|
|
+ /*
|
|
+ * Before 9.1, there are no extensions.
|
|
+ */
|
|
+ if (fout->remoteVersion < 90100)
|
|
+ {
|
|
+ *numExtensions = 0;
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ query = createPQExpBuffer();
|
|
+
|
|
+ /* Make sure we are in proper schema */
|
|
+ selectSourceSchema(fout, "pg_catalog");
|
|
+
|
|
+ appendPQExpBufferStr(query, "SELECT x.tableoid, x.oid, "
|
|
+ "x.extname, n.nspname, x.extrelocatable, x.extversion, x.extconfig, x.extcondition "
|
|
+ "FROM pg_extension x "
|
|
+ "JOIN pg_namespace n ON n.oid = x.extnamespace");
|
|
+
|
|
+ res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
|
|
+
|
|
+ ntups = PQntuples(res);
|
|
+
|
|
+ extinfo = (ExtensionInfo *) pg_malloc(ntups * sizeof(ExtensionInfo));
|
|
+
|
|
+ i_tableoid = PQfnumber(res, "tableoid");
|
|
+ i_oid = PQfnumber(res, "oid");
|
|
+ i_extname = PQfnumber(res, "extname");
|
|
+ i_nspname = PQfnumber(res, "nspname");
|
|
+ i_extrelocatable = PQfnumber(res, "extrelocatable");
|
|
+ i_extversion = PQfnumber(res, "extversion");
|
|
+ i_extconfig = PQfnumber(res, "extconfig");
|
|
+ i_extcondition = PQfnumber(res, "extcondition");
|
|
+
|
|
+ for (i = 0; i < ntups; i++)
|
|
+ {
|
|
+ extinfo[i].dobj.objType = DO_EXTENSION;
|
|
+ extinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
|
|
+ extinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
|
|
+ AssignDumpId(&extinfo[i].dobj);
|
|
+ extinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_extname));
|
|
+ extinfo[i].namespace = pg_strdup(PQgetvalue(res, i, i_nspname));
|
|
+ extinfo[i].relocatable = *(PQgetvalue(res, i, i_extrelocatable)) == 't';
|
|
+ extinfo[i].extversion = pg_strdup(PQgetvalue(res, i, i_extversion));
|
|
+ extinfo[i].extconfig = pg_strdup(PQgetvalue(res, i, i_extconfig));
|
|
+ extinfo[i].extcondition = pg_strdup(PQgetvalue(res, i, i_extcondition));
|
|
+
|
|
+ /* Decide whether we want to dump it */
|
|
+ selectDumpableExtension(&(extinfo[i]));
|
|
+ }
|
|
+
|
|
+ PQclear(res);
|
|
+ destroyPQExpBuffer(query);
|
|
+
|
|
+ *numExtensions = ntups;
|
|
+
|
|
+ return extinfo;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * getTypes:
|
|
+ * read all types in the system catalogs and return them in the
|
|
+ * TypeInfo* structure
|
|
+ *
|
|
+ * numTypes is set to the number of types read in
|
|
+ *
|
|
+ * NB: this must run after getFuncs() because we assume we can do
|
|
+ * findFuncByOid().
|
|
+ */
|
|
+TypeInfo *
|
|
+getTypes(Archive *fout, int *numTypes)
|
|
+{
|
|
+ PGresult *res;
|
|
+ int ntups;
|
|
+ int i;
|
|
+ PQExpBuffer query = createPQExpBuffer();
|
|
+ TypeInfo *tyinfo;
|
|
+ ShellTypeInfo *stinfo;
|
|
+ int i_tableoid;
|
|
+ int i_oid;
|
|
+ int i_typname;
|
|
+ int i_typnamespace;
|
|
+ int i_typacl;
|
|
+ int i_rolname;
|
|
+ int i_typinput;
|
|
+ int i_typoutput;
|
|
+ int i_typelem;
|
|
+ int i_typrelid;
|
|
+ int i_typrelkind;
|
|
+ int i_typtype;
|
|
+ int i_typisdefined;
|
|
+ int i_isarray;
|
|
+
|
|
+ /*
|
|
+ * we include even the built-in types because those may be used as array
|
|
+ * elements by user-defined types
|
|
+ *
|
|
+ * we filter out the built-in types when we dump out the types
|
|
+ *
|
|
+ * same approach for undefined (shell) types and array types
|
|
+ *
|
|
+ * Note: as of 8.3 we can reliably detect whether a type is an
|
|
+ * auto-generated array type by checking the element type's typarray.
|
|
+ * (Before that the test is capable of generating false positives.) We
|
|
+ * still check for name beginning with '_', though, so as to avoid the
|
|
+ * cost of the subselect probe for all standard types. This would have to
|
|
+ * be revisited if the backend ever allows renaming of array types.
|
|
+ */
|
|
+
|
|
+ /* Make sure we are in proper schema */
|
|
+ selectSourceSchema(fout, "pg_catalog");
|
|
+
|
|
+ if (fout->remoteVersion >= 90200)
|
|
+ {
|
|
+ appendPQExpBuffer(query, "SELECT tableoid, oid, typname, "
|
|
+ "typnamespace, typacl, "
|
|
+ "(%s typowner) AS rolname, "
|
|
+ "typinput::oid AS typinput, "
|
|
+ "typoutput::oid AS typoutput, typelem, typrelid, "
|
|
+ "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
|
|
+ "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
|
|
+ "typtype, typisdefined, "
|
|
+ "typname[0] = '_' AND typelem != 0 AND "
|
|
+ "(SELECT typarray FROM pg_type te WHERE oid = pg_type.typelem) = oid AS isarray "
|
|
+ "FROM pg_type",
|
|
+ username_subquery);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 80300)
|
|
+ {
|
|
+ appendPQExpBuffer(query, "SELECT tableoid, oid, typname, "
|
|
+ "typnamespace, NULL AS typacl, "
|
|
+ "(%s typowner) AS rolname, "
|
|
+ "typinput::oid AS typinput, "
|
|
+ "typoutput::oid AS typoutput, typelem, typrelid, "
|
|
+ "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
|
|
+ "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
|
|
+ "typtype, typisdefined, "
|
|
+ "typname[0] = '_' AND typelem != 0 AND "
|
|
+ "(SELECT typarray FROM pg_type te WHERE oid = pg_type.typelem) = oid AS isarray "
|
|
+ "FROM pg_type",
|
|
+ username_subquery);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 70300)
|
|
+ {
|
|
+ appendPQExpBuffer(query, "SELECT tableoid, oid, typname, "
|
|
+ "typnamespace, NULL AS typacl, "
|
|
+ "(%s typowner) AS rolname, "
|
|
+ "typinput::oid AS typinput, "
|
|
+ "typoutput::oid AS typoutput, typelem, typrelid, "
|
|
+ "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
|
|
+ "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
|
|
+ "typtype, typisdefined, "
|
|
+ "typname[0] = '_' AND typelem != 0 AS isarray "
|
|
+ "FROM pg_type",
|
|
+ username_subquery);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 70100)
|
|
+ {
|
|
+ appendPQExpBuffer(query, "SELECT tableoid, oid, typname, "
|
|
+ "0::oid AS typnamespace, NULL AS typacl, "
|
|
+ "(%s typowner) AS rolname, "
|
|
+ "typinput::oid AS typinput, "
|
|
+ "typoutput::oid AS typoutput, typelem, typrelid, "
|
|
+ "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
|
|
+ "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
|
|
+ "typtype, typisdefined, "
|
|
+ "typname[0] = '_' AND typelem != 0 AS isarray "
|
|
+ "FROM pg_type",
|
|
+ username_subquery);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ appendPQExpBuffer(query, "SELECT "
|
|
+ "(SELECT oid FROM pg_class WHERE relname = 'pg_type') AS tableoid, "
|
|
+ "oid, typname, "
|
|
+ "0::oid AS typnamespace, NULL AS typacl, "
|
|
+ "(%s typowner) AS rolname, "
|
|
+ "typinput::oid AS typinput, "
|
|
+ "typoutput::oid AS typoutput, typelem, typrelid, "
|
|
+ "CASE WHEN typrelid = 0 THEN ' '::\"char\" "
|
|
+ "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END AS typrelkind, "
|
|
+ "typtype, typisdefined, "
|
|
+ "typname[0] = '_' AND typelem != 0 AS isarray "
|
|
+ "FROM pg_type",
|
|
+ username_subquery);
|
|
+ }
|
|
+
|
|
+ res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
|
|
+
|
|
+ ntups = PQntuples(res);
|
|
+
|
|
+ tyinfo = (TypeInfo *) pg_malloc(ntups * sizeof(TypeInfo));
|
|
+
|
|
+ i_tableoid = PQfnumber(res, "tableoid");
|
|
+ i_oid = PQfnumber(res, "oid");
|
|
+ i_typname = PQfnumber(res, "typname");
|
|
+ i_typnamespace = PQfnumber(res, "typnamespace");
|
|
+ i_typacl = PQfnumber(res, "typacl");
|
|
+ i_rolname = PQfnumber(res, "rolname");
|
|
+ i_typinput = PQfnumber(res, "typinput");
|
|
+ i_typoutput = PQfnumber(res, "typoutput");
|
|
+ i_typelem = PQfnumber(res, "typelem");
|
|
+ i_typrelid = PQfnumber(res, "typrelid");
|
|
+ i_typrelkind = PQfnumber(res, "typrelkind");
|
|
+ i_typtype = PQfnumber(res, "typtype");
|
|
+ i_typisdefined = PQfnumber(res, "typisdefined");
|
|
+ i_isarray = PQfnumber(res, "isarray");
|
|
+
|
|
+ for (i = 0; i < ntups; i++)
|
|
+ {
|
|
+ tyinfo[i].dobj.objType = DO_TYPE;
|
|
+ tyinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
|
|
+ tyinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
|
|
+ AssignDumpId(&tyinfo[i].dobj);
|
|
+ tyinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_typname));
|
|
+ tyinfo[i].dobj.namespace =
|
|
+ findNamespace(fout,
|
|
+ atooid(PQgetvalue(res, i, i_typnamespace)),
|
|
+ tyinfo[i].dobj.catId.oid);
|
|
+ tyinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
|
|
+ tyinfo[i].typacl = pg_strdup(PQgetvalue(res, i, i_typacl));
|
|
+ tyinfo[i].typelem = atooid(PQgetvalue(res, i, i_typelem));
|
|
+ tyinfo[i].typrelid = atooid(PQgetvalue(res, i, i_typrelid));
|
|
+ tyinfo[i].typrelkind = *PQgetvalue(res, i, i_typrelkind);
|
|
+ tyinfo[i].typtype = *PQgetvalue(res, i, i_typtype);
|
|
+ tyinfo[i].shellType = NULL;
|
|
+
|
|
+ if (strcmp(PQgetvalue(res, i, i_typisdefined), "t") == 0)
|
|
+ tyinfo[i].isDefined = true;
|
|
+ else
|
|
+ tyinfo[i].isDefined = false;
|
|
+
|
|
+ if (strcmp(PQgetvalue(res, i, i_isarray), "t") == 0)
|
|
+ tyinfo[i].isArray = true;
|
|
+ else
|
|
+ tyinfo[i].isArray = false;
|
|
+
|
|
+ /* Decide whether we want to dump it */
|
|
+ selectDumpableType(&tyinfo[i]);
|
|
+
|
|
+ /*
|
|
+ * If it's a domain, fetch info about its constraints, if any
|
|
+ */
|
|
+ tyinfo[i].nDomChecks = 0;
|
|
+ tyinfo[i].domChecks = NULL;
|
|
+ if (tyinfo[i].dobj.dump && tyinfo[i].typtype == TYPTYPE_DOMAIN)
|
|
+ getDomainConstraints(fout, &(tyinfo[i]));
|
|
+
|
|
+ /*
|
|
+ * If it's a base type, make a DumpableObject representing a shell
|
|
+ * definition of the type. We will need to dump that ahead of the I/O
|
|
+ * functions for the type. Similarly, range types need a shell
|
|
+ * definition in case they have a canonicalize function.
|
|
+ *
|
|
+ * Note: the shell type doesn't have a catId. You might think it
|
|
+ * should copy the base type's catId, but then it might capture the
|
|
+ * pg_depend entries for the type, which we don't want.
|
|
+ */
|
|
+ if (tyinfo[i].dobj.dump && (tyinfo[i].typtype == TYPTYPE_BASE ||
|
|
+ tyinfo[i].typtype == TYPTYPE_RANGE))
|
|
+ {
|
|
+ stinfo = (ShellTypeInfo *) pg_malloc(sizeof(ShellTypeInfo));
|
|
+ stinfo->dobj.objType = DO_SHELL_TYPE;
|
|
+ stinfo->dobj.catId = nilCatalogId;
|
|
+ AssignDumpId(&stinfo->dobj);
|
|
+ stinfo->dobj.name = pg_strdup(tyinfo[i].dobj.name);
|
|
+ stinfo->dobj.namespace = tyinfo[i].dobj.namespace;
|
|
+ stinfo->baseType = &(tyinfo[i]);
|
|
+ tyinfo[i].shellType = stinfo;
|
|
+
|
|
+ /*
|
|
+ * Initially mark the shell type as not to be dumped. We'll only
|
|
+ * dump it if the I/O or canonicalize functions need to be dumped;
|
|
+ * this is taken care of while sorting dependencies.
|
|
+ */
|
|
+ stinfo->dobj.dump = false;
|
|
+
|
|
+ /*
|
|
+ * However, if dumping from pre-7.3, there will be no dependency
|
|
+ * info so we have to fake it here. We only need to worry about
|
|
+ * typinput and typoutput since the other functions only exist
|
|
+ * post-7.3.
|
|
+ */
|
|
+ if (fout->remoteVersion < 70300)
|
|
+ {
|
|
+ Oid typinput;
|
|
+ Oid typoutput;
|
|
+ FuncInfo *funcInfo;
|
|
+
|
|
+ typinput = atooid(PQgetvalue(res, i, i_typinput));
|
|
+ typoutput = atooid(PQgetvalue(res, i, i_typoutput));
|
|
+
|
|
+ funcInfo = findFuncByOid(typinput);
|
|
+ if (funcInfo && funcInfo->dobj.dump)
|
|
+ {
|
|
+ /* base type depends on function */
|
|
+ addObjectDependency(&tyinfo[i].dobj,
|
|
+ funcInfo->dobj.dumpId);
|
|
+ /* function depends on shell type */
|
|
+ addObjectDependency(&funcInfo->dobj,
|
|
+ stinfo->dobj.dumpId);
|
|
+ /* mark shell type as to be dumped */
|
|
+ stinfo->dobj.dump = true;
|
|
+ }
|
|
+
|
|
+ funcInfo = findFuncByOid(typoutput);
|
|
+ if (funcInfo && funcInfo->dobj.dump)
|
|
+ {
|
|
+ /* base type depends on function */
|
|
+ addObjectDependency(&tyinfo[i].dobj,
|
|
+ funcInfo->dobj.dumpId);
|
|
+ /* function depends on shell type */
|
|
+ addObjectDependency(&funcInfo->dobj,
|
|
+ stinfo->dobj.dumpId);
|
|
+ /* mark shell type as to be dumped */
|
|
+ stinfo->dobj.dump = true;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (strlen(tyinfo[i].rolname) == 0)
|
|
+ write_msg(NULL, "WARNING: owner of data type \"%s\" appears to be invalid\n",
|
|
+ tyinfo[i].dobj.name);
|
|
+ }
|
|
+
|
|
+ *numTypes = ntups;
|
|
+
|
|
+ PQclear(res);
|
|
+
|
|
+ destroyPQExpBuffer(query);
|
|
+
|
|
+ return tyinfo;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * getOperators:
|
|
+ * read all operators in the system catalogs and return them in the
|
|
+ * OprInfo* structure
|
|
+ *
|
|
+ * numOprs is set to the number of operators read in
|
|
+ */
|
|
+OprInfo *
|
|
+getOperators(Archive *fout, int *numOprs)
|
|
+{
|
|
+ PGresult *res;
|
|
+ int ntups;
|
|
+ int i;
|
|
+ PQExpBuffer query = createPQExpBuffer();
|
|
+ OprInfo *oprinfo;
|
|
+ int i_tableoid;
|
|
+ int i_oid;
|
|
+ int i_oprname;
|
|
+ int i_oprnamespace;
|
|
+ int i_rolname;
|
|
+ int i_oprkind;
|
|
+ int i_oprcode;
|
|
+
|
|
+ /*
|
|
+ * find all operators, including builtin operators; we filter out
|
|
+ * system-defined operators at dump-out time.
|
|
+ */
|
|
+
|
|
+ /* Make sure we are in proper schema */
|
|
+ selectSourceSchema(fout, "pg_catalog");
|
|
+
|
|
+ if (fout->remoteVersion >= 70300)
|
|
+ {
|
|
+ appendPQExpBuffer(query, "SELECT tableoid, oid, oprname, "
|
|
+ "oprnamespace, "
|
|
+ "(%s oprowner) AS rolname, "
|
|
+ "oprkind, "
|
|
+ "oprcode::oid AS oprcode "
|
|
+ "FROM pg_operator",
|
|
+ username_subquery);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 70100)
|
|
+ {
|
|
+ appendPQExpBuffer(query, "SELECT tableoid, oid, oprname, "
|
|
+ "0::oid AS oprnamespace, "
|
|
+ "(%s oprowner) AS rolname, "
|
|
+ "oprkind, "
|
|
+ "oprcode::oid AS oprcode "
|
|
+ "FROM pg_operator",
|
|
+ username_subquery);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ appendPQExpBuffer(query, "SELECT "
|
|
+ "(SELECT oid FROM pg_class WHERE relname = 'pg_operator') AS tableoid, "
|
|
+ "oid, oprname, "
|
|
+ "0::oid AS oprnamespace, "
|
|
+ "(%s oprowner) AS rolname, "
|
|
+ "oprkind, "
|
|
+ "oprcode::oid AS oprcode "
|
|
+ "FROM pg_operator",
|
|
+ username_subquery);
|
|
+ }
|
|
+
|
|
+ res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
|
|
+
|
|
+ ntups = PQntuples(res);
|
|
+ *numOprs = ntups;
|
|
+
|
|
+ oprinfo = (OprInfo *) pg_malloc(ntups * sizeof(OprInfo));
|
|
+
|
|
+ i_tableoid = PQfnumber(res, "tableoid");
|
|
+ i_oid = PQfnumber(res, "oid");
|
|
+ i_oprname = PQfnumber(res, "oprname");
|
|
+ i_oprnamespace = PQfnumber(res, "oprnamespace");
|
|
+ i_rolname = PQfnumber(res, "rolname");
|
|
+ i_oprkind = PQfnumber(res, "oprkind");
|
|
+ i_oprcode = PQfnumber(res, "oprcode");
|
|
+
|
|
+ for (i = 0; i < ntups; i++)
|
|
+ {
|
|
+ oprinfo[i].dobj.objType = DO_OPERATOR;
|
|
+ oprinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
|
|
+ oprinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
|
|
+ AssignDumpId(&oprinfo[i].dobj);
|
|
+ oprinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_oprname));
|
|
+ oprinfo[i].dobj.namespace =
|
|
+ findNamespace(fout,
|
|
+ atooid(PQgetvalue(res, i, i_oprnamespace)),
|
|
+ oprinfo[i].dobj.catId.oid);
|
|
+ oprinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
|
|
+ oprinfo[i].oprkind = (PQgetvalue(res, i, i_oprkind))[0];
|
|
+ oprinfo[i].oprcode = atooid(PQgetvalue(res, i, i_oprcode));
|
|
+
|
|
+ /* Decide whether we want to dump it */
|
|
+ selectDumpableObject(&(oprinfo[i].dobj));
|
|
+
|
|
+ if (strlen(oprinfo[i].rolname) == 0)
|
|
+ write_msg(NULL, "WARNING: owner of operator \"%s\" appears to be invalid\n",
|
|
+ oprinfo[i].dobj.name);
|
|
+ }
|
|
+
|
|
+ PQclear(res);
|
|
+
|
|
+ destroyPQExpBuffer(query);
|
|
+
|
|
+ return oprinfo;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * getCollations:
|
|
+ * read all collations in the system catalogs and return them in the
|
|
+ * CollInfo* structure
|
|
+ *
|
|
+ * numCollations is set to the number of collations read in
|
|
+ */
|
|
+CollInfo *
|
|
+getCollations(Archive *fout, int *numCollations)
|
|
+{
|
|
+ PGresult *res;
|
|
+ int ntups;
|
|
+ int i;
|
|
+ PQExpBuffer query;
|
|
+ CollInfo *collinfo;
|
|
+ int i_tableoid;
|
|
+ int i_oid;
|
|
+ int i_collname;
|
|
+ int i_collnamespace;
|
|
+ int i_rolname;
|
|
+
|
|
+ /* Collations didn't exist pre-9.1 */
|
|
+ if (fout->remoteVersion < 90100)
|
|
+ {
|
|
+ *numCollations = 0;
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ query = createPQExpBuffer();
|
|
+
|
|
+ /*
|
|
+ * find all collations, including builtin collations; we filter out
|
|
+ * system-defined collations at dump-out time.
|
|
+ */
|
|
+
|
|
+ /* Make sure we are in proper schema */
|
|
+ selectSourceSchema(fout, "pg_catalog");
|
|
+
|
|
+ appendPQExpBuffer(query, "SELECT tableoid, oid, collname, "
|
|
+ "collnamespace, "
|
|
+ "(%s collowner) AS rolname "
|
|
+ "FROM pg_collation",
|
|
+ username_subquery);
|
|
+
|
|
+ res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
|
|
+
|
|
+ ntups = PQntuples(res);
|
|
+ *numCollations = ntups;
|
|
+
|
|
+ collinfo = (CollInfo *) pg_malloc(ntups * sizeof(CollInfo));
|
|
+
|
|
+ i_tableoid = PQfnumber(res, "tableoid");
|
|
+ i_oid = PQfnumber(res, "oid");
|
|
+ i_collname = PQfnumber(res, "collname");
|
|
+ i_collnamespace = PQfnumber(res, "collnamespace");
|
|
+ i_rolname = PQfnumber(res, "rolname");
|
|
+
|
|
+ for (i = 0; i < ntups; i++)
|
|
+ {
|
|
+ collinfo[i].dobj.objType = DO_COLLATION;
|
|
+ collinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
|
|
+ collinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
|
|
+ AssignDumpId(&collinfo[i].dobj);
|
|
+ collinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_collname));
|
|
+ collinfo[i].dobj.namespace =
|
|
+ findNamespace(fout,
|
|
+ atooid(PQgetvalue(res, i, i_collnamespace)),
|
|
+ collinfo[i].dobj.catId.oid);
|
|
+ collinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
|
|
+
|
|
+ /* Decide whether we want to dump it */
|
|
+ selectDumpableObject(&(collinfo[i].dobj));
|
|
+ }
|
|
+
|
|
+ PQclear(res);
|
|
+
|
|
+ destroyPQExpBuffer(query);
|
|
+
|
|
+ return collinfo;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * getConversions:
|
|
+ * read all conversions in the system catalogs and return them in the
|
|
+ * ConvInfo* structure
|
|
+ *
|
|
+ * numConversions is set to the number of conversions read in
|
|
+ */
|
|
+ConvInfo *
|
|
+getConversions(Archive *fout, int *numConversions)
|
|
+{
|
|
+ PGresult *res;
|
|
+ int ntups;
|
|
+ int i;
|
|
+ PQExpBuffer query;
|
|
+ ConvInfo *convinfo;
|
|
+ int i_tableoid;
|
|
+ int i_oid;
|
|
+ int i_conname;
|
|
+ int i_connamespace;
|
|
+ int i_rolname;
|
|
+
|
|
+ /* Conversions didn't exist pre-7.3 */
|
|
+ if (fout->remoteVersion < 70300)
|
|
+ {
|
|
+ *numConversions = 0;
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ query = createPQExpBuffer();
|
|
+
|
|
+ /*
|
|
+ * find all conversions, including builtin conversions; we filter out
|
|
+ * system-defined conversions at dump-out time.
|
|
+ */
|
|
+
|
|
+ /* Make sure we are in proper schema */
|
|
+ selectSourceSchema(fout, "pg_catalog");
|
|
+
|
|
+ appendPQExpBuffer(query, "SELECT tableoid, oid, conname, "
|
|
+ "connamespace, "
|
|
+ "(%s conowner) AS rolname "
|
|
+ "FROM pg_conversion",
|
|
+ username_subquery);
|
|
+
|
|
+ res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
|
|
+
|
|
+ ntups = PQntuples(res);
|
|
+ *numConversions = ntups;
|
|
+
|
|
+ convinfo = (ConvInfo *) pg_malloc(ntups * sizeof(ConvInfo));
|
|
+
|
|
+ i_tableoid = PQfnumber(res, "tableoid");
|
|
+ i_oid = PQfnumber(res, "oid");
|
|
+ i_conname = PQfnumber(res, "conname");
|
|
+ i_connamespace = PQfnumber(res, "connamespace");
|
|
+ i_rolname = PQfnumber(res, "rolname");
|
|
+
|
|
+ for (i = 0; i < ntups; i++)
|
|
+ {
|
|
+ convinfo[i].dobj.objType = DO_CONVERSION;
|
|
+ convinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
|
|
+ convinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
|
|
+ AssignDumpId(&convinfo[i].dobj);
|
|
+ convinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_conname));
|
|
+ convinfo[i].dobj.namespace =
|
|
+ findNamespace(fout,
|
|
+ atooid(PQgetvalue(res, i, i_connamespace)),
|
|
+ convinfo[i].dobj.catId.oid);
|
|
+ convinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
|
|
+
|
|
+ /* Decide whether we want to dump it */
|
|
+ selectDumpableObject(&(convinfo[i].dobj));
|
|
+ }
|
|
+
|
|
+ PQclear(res);
|
|
+
|
|
+ destroyPQExpBuffer(query);
|
|
+
|
|
+ return convinfo;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * getOpclasses:
|
|
+ * read all opclasses in the system catalogs and return them in the
|
|
+ * OpclassInfo* structure
|
|
+ *
|
|
+ * numOpclasses is set to the number of opclasses read in
|
|
+ */
|
|
+OpclassInfo *
|
|
+getOpclasses(Archive *fout, int *numOpclasses)
|
|
+{
|
|
+ PGresult *res;
|
|
+ int ntups;
|
|
+ int i;
|
|
+ PQExpBuffer query = createPQExpBuffer();
|
|
+ OpclassInfo *opcinfo;
|
|
+ int i_tableoid;
|
|
+ int i_oid;
|
|
+ int i_opcname;
|
|
+ int i_opcnamespace;
|
|
+ int i_rolname;
|
|
+
|
|
+ /*
|
|
+ * find all opclasses, including builtin opclasses; we filter out
|
|
+ * system-defined opclasses at dump-out time.
|
|
+ */
|
|
+
|
|
+ /* Make sure we are in proper schema */
|
|
+ selectSourceSchema(fout, "pg_catalog");
|
|
+
|
|
+ if (fout->remoteVersion >= 70300)
|
|
+ {
|
|
+ appendPQExpBuffer(query, "SELECT tableoid, oid, opcname, "
|
|
+ "opcnamespace, "
|
|
+ "(%s opcowner) AS rolname "
|
|
+ "FROM pg_opclass",
|
|
+ username_subquery);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 70100)
|
|
+ {
|
|
+ appendPQExpBufferStr(query, "SELECT tableoid, oid, opcname, "
|
|
+ "0::oid AS opcnamespace, "
|
|
+ "''::name AS rolname "
|
|
+ "FROM pg_opclass");
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ appendPQExpBufferStr(query, "SELECT "
|
|
+ "(SELECT oid FROM pg_class WHERE relname = 'pg_opclass') AS tableoid, "
|
|
+ "oid, opcname, "
|
|
+ "0::oid AS opcnamespace, "
|
|
+ "''::name AS rolname "
|
|
+ "FROM pg_opclass");
|
|
+ }
|
|
+
|
|
+ res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
|
|
+
|
|
+ ntups = PQntuples(res);
|
|
+ *numOpclasses = ntups;
|
|
+
|
|
+ opcinfo = (OpclassInfo *) pg_malloc(ntups * sizeof(OpclassInfo));
|
|
+
|
|
+ i_tableoid = PQfnumber(res, "tableoid");
|
|
+ i_oid = PQfnumber(res, "oid");
|
|
+ i_opcname = PQfnumber(res, "opcname");
|
|
+ i_opcnamespace = PQfnumber(res, "opcnamespace");
|
|
+ i_rolname = PQfnumber(res, "rolname");
|
|
+
|
|
+ for (i = 0; i < ntups; i++)
|
|
+ {
|
|
+ opcinfo[i].dobj.objType = DO_OPCLASS;
|
|
+ opcinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
|
|
+ opcinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
|
|
+ AssignDumpId(&opcinfo[i].dobj);
|
|
+ opcinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_opcname));
|
|
+ opcinfo[i].dobj.namespace =
|
|
+ findNamespace(fout,
|
|
+ atooid(PQgetvalue(res, i, i_opcnamespace)),
|
|
+ opcinfo[i].dobj.catId.oid);
|
|
+ opcinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
|
|
+
|
|
+ /* Decide whether we want to dump it */
|
|
+ selectDumpableObject(&(opcinfo[i].dobj));
|
|
+
|
|
+ if (fout->remoteVersion >= 70300)
|
|
+ {
|
|
+ if (strlen(opcinfo[i].rolname) == 0)
|
|
+ write_msg(NULL, "WARNING: owner of operator class \"%s\" appears to be invalid\n",
|
|
+ opcinfo[i].dobj.name);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ PQclear(res);
|
|
+
|
|
+ destroyPQExpBuffer(query);
|
|
+
|
|
+ return opcinfo;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * getOpfamilies:
|
|
+ * read all opfamilies in the system catalogs and return them in the
|
|
+ * OpfamilyInfo* structure
|
|
+ *
|
|
+ * numOpfamilies is set to the number of opfamilies read in
|
|
+ */
|
|
+OpfamilyInfo *
|
|
+getOpfamilies(Archive *fout, int *numOpfamilies)
|
|
+{
|
|
+ PGresult *res;
|
|
+ int ntups;
|
|
+ int i;
|
|
+ PQExpBuffer query;
|
|
+ OpfamilyInfo *opfinfo;
|
|
+ int i_tableoid;
|
|
+ int i_oid;
|
|
+ int i_opfname;
|
|
+ int i_opfnamespace;
|
|
+ int i_rolname;
|
|
+
|
|
+ /* Before 8.3, there is no separate concept of opfamilies */
|
|
+ if (fout->remoteVersion < 80300)
|
|
+ {
|
|
+ *numOpfamilies = 0;
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ query = createPQExpBuffer();
|
|
+
|
|
+ /*
|
|
+ * find all opfamilies, including builtin opfamilies; we filter out
|
|
+ * system-defined opfamilies at dump-out time.
|
|
+ */
|
|
+
|
|
+ /* Make sure we are in proper schema */
|
|
+ selectSourceSchema(fout, "pg_catalog");
|
|
+
|
|
+ appendPQExpBuffer(query, "SELECT tableoid, oid, opfname, "
|
|
+ "opfnamespace, "
|
|
+ "(%s opfowner) AS rolname "
|
|
+ "FROM pg_opfamily",
|
|
+ username_subquery);
|
|
+
|
|
+ res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
|
|
+
|
|
+ ntups = PQntuples(res);
|
|
+ *numOpfamilies = ntups;
|
|
+
|
|
+ opfinfo = (OpfamilyInfo *) pg_malloc(ntups * sizeof(OpfamilyInfo));
|
|
+
|
|
+ i_tableoid = PQfnumber(res, "tableoid");
|
|
+ i_oid = PQfnumber(res, "oid");
|
|
+ i_opfname = PQfnumber(res, "opfname");
|
|
+ i_opfnamespace = PQfnumber(res, "opfnamespace");
|
|
+ i_rolname = PQfnumber(res, "rolname");
|
|
+
|
|
+ for (i = 0; i < ntups; i++)
|
|
+ {
|
|
+ opfinfo[i].dobj.objType = DO_OPFAMILY;
|
|
+ opfinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
|
|
+ opfinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
|
|
+ AssignDumpId(&opfinfo[i].dobj);
|
|
+ opfinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_opfname));
|
|
+ opfinfo[i].dobj.namespace =
|
|
+ findNamespace(fout,
|
|
+ atooid(PQgetvalue(res, i, i_opfnamespace)),
|
|
+ opfinfo[i].dobj.catId.oid);
|
|
+ opfinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
|
|
+
|
|
+ /* Decide whether we want to dump it */
|
|
+ selectDumpableObject(&(opfinfo[i].dobj));
|
|
+
|
|
+ if (fout->remoteVersion >= 70300)
|
|
+ {
|
|
+ if (strlen(opfinfo[i].rolname) == 0)
|
|
+ write_msg(NULL, "WARNING: owner of operator family \"%s\" appears to be invalid\n",
|
|
+ opfinfo[i].dobj.name);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ PQclear(res);
|
|
+
|
|
+ destroyPQExpBuffer(query);
|
|
+
|
|
+ return opfinfo;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * getAggregates:
|
|
+ * read all the user-defined aggregates in the system catalogs and
|
|
+ * return them in the AggInfo* structure
|
|
+ *
|
|
+ * numAggs is set to the number of aggregates read in
|
|
+ */
|
|
+AggInfo *
|
|
+getAggregates(Archive *fout, int *numAggs)
|
|
+{
|
|
+ PGresult *res;
|
|
+ int ntups;
|
|
+ int i;
|
|
+ PQExpBuffer query = createPQExpBuffer();
|
|
+ AggInfo *agginfo;
|
|
+ int i_tableoid;
|
|
+ int i_oid;
|
|
+ int i_aggname;
|
|
+ int i_aggnamespace;
|
|
+ int i_pronargs;
|
|
+ int i_proargtypes;
|
|
+ int i_rolname;
|
|
+ int i_aggacl;
|
|
+
|
|
+ /* Make sure we are in proper schema */
|
|
+ selectSourceSchema(fout, "pg_catalog");
|
|
+
|
|
+ /*
|
|
+ * Find all user-defined aggregates. See comment in getFuncs() for the
|
|
+ * rationale behind the filtering logic.
|
|
+ */
|
|
+
|
|
+ if (fout->remoteVersion >= 80200)
|
|
+ {
|
|
+ appendPQExpBuffer(query, "SELECT tableoid, oid, proname AS aggname, "
|
|
+ "pronamespace AS aggnamespace, "
|
|
+ "pronargs, proargtypes, "
|
|
+ "(%s proowner) AS rolname, "
|
|
+ "proacl AS aggacl "
|
|
+ "FROM pg_proc p "
|
|
+ "WHERE proisagg AND ("
|
|
+ "pronamespace != "
|
|
+ "(SELECT oid FROM pg_namespace "
|
|
+ "WHERE nspname = 'pg_catalog')",
|
|
+ username_subquery);
|
|
+ if (binary_upgrade && fout->remoteVersion >= 90100)
|
|
+ appendPQExpBufferStr(query,
|
|
+ " OR EXISTS(SELECT 1 FROM pg_depend WHERE "
|
|
+ "classid = 'pg_proc'::regclass AND "
|
|
+ "objid = p.oid AND "
|
|
+ "refclassid = 'pg_extension'::regclass AND "
|
|
+ "deptype = 'e')");
|
|
+ appendPQExpBufferChar(query, ')');
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 70300)
|
|
+ {
|
|
+ appendPQExpBuffer(query, "SELECT tableoid, oid, proname AS aggname, "
|
|
+ "pronamespace AS aggnamespace, "
|
|
+ "CASE WHEN proargtypes[0] = 'pg_catalog.\"any\"'::pg_catalog.regtype THEN 0 ELSE 1 END AS pronargs, "
|
|
+ "proargtypes, "
|
|
+ "(%s proowner) AS rolname, "
|
|
+ "proacl AS aggacl "
|
|
+ "FROM pg_proc "
|
|
+ "WHERE proisagg "
|
|
+ "AND pronamespace != "
|
|
+ "(SELECT oid FROM pg_namespace WHERE nspname = 'pg_catalog')",
|
|
+ username_subquery);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 70100)
|
|
+ {
|
|
+ appendPQExpBuffer(query, "SELECT tableoid, oid, aggname, "
|
|
+ "0::oid AS aggnamespace, "
|
|
+ "CASE WHEN aggbasetype = 0 THEN 0 ELSE 1 END AS pronargs, "
|
|
+ "aggbasetype AS proargtypes, "
|
|
+ "(%s aggowner) AS rolname, "
|
|
+ "NULL AS aggacl "
|
|
+ "FROM pg_aggregate "
|
|
+ "where oid > '%u'::oid",
|
|
+ username_subquery,
|
|
+ g_last_builtin_oid);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ appendPQExpBuffer(query, "SELECT "
|
|
+ "(SELECT oid FROM pg_class WHERE relname = 'pg_aggregate') AS tableoid, "
|
|
+ "oid, aggname, "
|
|
+ "0::oid AS aggnamespace, "
|
|
+ "CASE WHEN aggbasetype = 0 THEN 0 ELSE 1 END AS pronargs, "
|
|
+ "aggbasetype AS proargtypes, "
|
|
+ "(%s aggowner) AS rolname, "
|
|
+ "NULL AS aggacl "
|
|
+ "FROM pg_aggregate "
|
|
+ "where oid > '%u'::oid",
|
|
+ username_subquery,
|
|
+ g_last_builtin_oid);
|
|
+ }
|
|
+
|
|
+ res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
|
|
+
|
|
+ ntups = PQntuples(res);
|
|
+ *numAggs = ntups;
|
|
+
|
|
+ agginfo = (AggInfo *) pg_malloc(ntups * sizeof(AggInfo));
|
|
+
|
|
+ i_tableoid = PQfnumber(res, "tableoid");
|
|
+ i_oid = PQfnumber(res, "oid");
|
|
+ i_aggname = PQfnumber(res, "aggname");
|
|
+ i_aggnamespace = PQfnumber(res, "aggnamespace");
|
|
+ i_pronargs = PQfnumber(res, "pronargs");
|
|
+ i_proargtypes = PQfnumber(res, "proargtypes");
|
|
+ i_rolname = PQfnumber(res, "rolname");
|
|
+ i_aggacl = PQfnumber(res, "aggacl");
|
|
+
|
|
+ for (i = 0; i < ntups; i++)
|
|
+ {
|
|
+ agginfo[i].aggfn.dobj.objType = DO_AGG;
|
|
+ agginfo[i].aggfn.dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
|
|
+ agginfo[i].aggfn.dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
|
|
+ AssignDumpId(&agginfo[i].aggfn.dobj);
|
|
+ agginfo[i].aggfn.dobj.name = pg_strdup(PQgetvalue(res, i, i_aggname));
|
|
+ agginfo[i].aggfn.dobj.namespace =
|
|
+ findNamespace(fout,
|
|
+ atooid(PQgetvalue(res, i, i_aggnamespace)),
|
|
+ agginfo[i].aggfn.dobj.catId.oid);
|
|
+ agginfo[i].aggfn.rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
|
|
+ if (strlen(agginfo[i].aggfn.rolname) == 0)
|
|
+ write_msg(NULL, "WARNING: owner of aggregate function \"%s\" appears to be invalid\n",
|
|
+ agginfo[i].aggfn.dobj.name);
|
|
+ agginfo[i].aggfn.lang = InvalidOid; /* not currently interesting */
|
|
+ agginfo[i].aggfn.prorettype = InvalidOid; /* not saved */
|
|
+ agginfo[i].aggfn.proacl = pg_strdup(PQgetvalue(res, i, i_aggacl));
|
|
+ agginfo[i].aggfn.nargs = atoi(PQgetvalue(res, i, i_pronargs));
|
|
+ if (agginfo[i].aggfn.nargs == 0)
|
|
+ agginfo[i].aggfn.argtypes = NULL;
|
|
+ else
|
|
+ {
|
|
+ agginfo[i].aggfn.argtypes = (Oid *) pg_malloc(agginfo[i].aggfn.nargs * sizeof(Oid));
|
|
+ if (fout->remoteVersion >= 70300)
|
|
+ parseOidArray(PQgetvalue(res, i, i_proargtypes),
|
|
+ agginfo[i].aggfn.argtypes,
|
|
+ agginfo[i].aggfn.nargs);
|
|
+ else
|
|
+ /* it's just aggbasetype */
|
|
+ agginfo[i].aggfn.argtypes[0] = atooid(PQgetvalue(res, i, i_proargtypes));
|
|
+ }
|
|
+
|
|
+ /* Decide whether we want to dump it */
|
|
+ selectDumpableObject(&(agginfo[i].aggfn.dobj));
|
|
+ }
|
|
+
|
|
+ PQclear(res);
|
|
+
|
|
+ destroyPQExpBuffer(query);
|
|
+
|
|
+ return agginfo;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * getFuncs:
|
|
+ * read all the user-defined functions in the system catalogs and
|
|
+ * return them in the FuncInfo* structure
|
|
+ *
|
|
+ * numFuncs is set to the number of functions read in
|
|
+ */
|
|
+FuncInfo *
|
|
+getFuncs(Archive *fout, int *numFuncs)
|
|
+{
|
|
+ PGresult *res;
|
|
+ int ntups;
|
|
+ int i;
|
|
+ PQExpBuffer query = createPQExpBuffer();
|
|
+ FuncInfo *finfo;
|
|
+ int i_tableoid;
|
|
+ int i_oid;
|
|
+ int i_proname;
|
|
+ int i_pronamespace;
|
|
+ int i_rolname;
|
|
+ int i_prolang;
|
|
+ int i_pronargs;
|
|
+ int i_proargtypes;
|
|
+ int i_prorettype;
|
|
+ int i_proacl;
|
|
+
|
|
+ /* Make sure we are in proper schema */
|
|
+ selectSourceSchema(fout, "pg_catalog");
|
|
+
|
|
+ /*
|
|
+ * Find all user-defined functions. Normally we can exclude functions in
|
|
+ * pg_catalog, which is worth doing since there are several thousand of
|
|
+ * 'em. However, there are some extensions that create functions in
|
|
+ * pg_catalog. In normal dumps we can still ignore those --- but in
|
|
+ * binary-upgrade mode, we must dump the member objects of the extension,
|
|
+ * so be sure to fetch any such functions.
|
|
+ *
|
|
+ * Also, in 9.2 and up, exclude functions that are internally dependent on
|
|
+ * something else, since presumably those will be created as a result of
|
|
+ * creating the something else. This currently only acts to suppress
|
|
+ * constructor functions for range types. Note that this is OK only
|
|
+ * because the constructors don't have any dependencies the range type
|
|
+ * doesn't have; otherwise we might not get creation ordering correct.
|
|
+ */
|
|
+
|
|
+ if (fout->remoteVersion >= 70300)
|
|
+ {
|
|
+ appendPQExpBuffer(query,
|
|
+ "SELECT tableoid, oid, proname, prolang, "
|
|
+ "pronargs, proargtypes, prorettype, proacl, "
|
|
+ "pronamespace, "
|
|
+ "(%s proowner) AS rolname "
|
|
+ "FROM pg_proc p "
|
|
+ "WHERE NOT proisagg AND ("
|
|
+ "pronamespace != "
|
|
+ "(SELECT oid FROM pg_namespace "
|
|
+ "WHERE nspname = 'pg_catalog')",
|
|
+ username_subquery);
|
|
+ if (fout->remoteVersion >= 90200)
|
|
+ appendPQExpBufferStr(query,
|
|
+ "\n AND NOT EXISTS (SELECT 1 FROM pg_depend "
|
|
+ "WHERE classid = 'pg_proc'::regclass AND "
|
|
+ "objid = p.oid AND deptype = 'i')");
|
|
+ if (binary_upgrade && fout->remoteVersion >= 90100)
|
|
+ appendPQExpBufferStr(query,
|
|
+ "\n OR EXISTS(SELECT 1 FROM pg_depend WHERE "
|
|
+ "classid = 'pg_proc'::regclass AND "
|
|
+ "objid = p.oid AND "
|
|
+ "refclassid = 'pg_extension'::regclass AND "
|
|
+ "deptype = 'e')");
|
|
+ appendPQExpBufferChar(query, ')');
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 70100)
|
|
+ {
|
|
+ appendPQExpBuffer(query,
|
|
+ "SELECT tableoid, oid, proname, prolang, "
|
|
+ "pronargs, proargtypes, prorettype, "
|
|
+ "NULL AS proacl, "
|
|
+ "0::oid AS pronamespace, "
|
|
+ "(%s proowner) AS rolname "
|
|
+ "FROM pg_proc "
|
|
+ "WHERE pg_proc.oid > '%u'::oid",
|
|
+ username_subquery,
|
|
+ g_last_builtin_oid);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ appendPQExpBuffer(query,
|
|
+ "SELECT "
|
|
+ "(SELECT oid FROM pg_class "
|
|
+ " WHERE relname = 'pg_proc') AS tableoid, "
|
|
+ "oid, proname, prolang, "
|
|
+ "pronargs, proargtypes, prorettype, "
|
|
+ "NULL AS proacl, "
|
|
+ "0::oid AS pronamespace, "
|
|
+ "(%s proowner) AS rolname "
|
|
+ "FROM pg_proc "
|
|
+ "where pg_proc.oid > '%u'::oid",
|
|
+ username_subquery,
|
|
+ g_last_builtin_oid);
|
|
+ }
|
|
+
|
|
+ res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
|
|
+
|
|
+ ntups = PQntuples(res);
|
|
+
|
|
+ *numFuncs = ntups;
|
|
+
|
|
+ finfo = (FuncInfo *) pg_malloc0(ntups * sizeof(FuncInfo));
|
|
+
|
|
+ i_tableoid = PQfnumber(res, "tableoid");
|
|
+ i_oid = PQfnumber(res, "oid");
|
|
+ i_proname = PQfnumber(res, "proname");
|
|
+ i_pronamespace = PQfnumber(res, "pronamespace");
|
|
+ i_rolname = PQfnumber(res, "rolname");
|
|
+ i_prolang = PQfnumber(res, "prolang");
|
|
+ i_pronargs = PQfnumber(res, "pronargs");
|
|
+ i_proargtypes = PQfnumber(res, "proargtypes");
|
|
+ i_prorettype = PQfnumber(res, "prorettype");
|
|
+ i_proacl = PQfnumber(res, "proacl");
|
|
+
|
|
+ for (i = 0; i < ntups; i++)
|
|
+ {
|
|
+ finfo[i].dobj.objType = DO_FUNC;
|
|
+ finfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
|
|
+ finfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
|
|
+ AssignDumpId(&finfo[i].dobj);
|
|
+ finfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_proname));
|
|
+ finfo[i].dobj.namespace =
|
|
+ findNamespace(fout,
|
|
+ atooid(PQgetvalue(res, i, i_pronamespace)),
|
|
+ finfo[i].dobj.catId.oid);
|
|
+ finfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
|
|
+ finfo[i].lang = atooid(PQgetvalue(res, i, i_prolang));
|
|
+ finfo[i].prorettype = atooid(PQgetvalue(res, i, i_prorettype));
|
|
+ finfo[i].proacl = pg_strdup(PQgetvalue(res, i, i_proacl));
|
|
+ finfo[i].nargs = atoi(PQgetvalue(res, i, i_pronargs));
|
|
+ if (finfo[i].nargs == 0)
|
|
+ finfo[i].argtypes = NULL;
|
|
+ else
|
|
+ {
|
|
+ finfo[i].argtypes = (Oid *) pg_malloc(finfo[i].nargs * sizeof(Oid));
|
|
+ parseOidArray(PQgetvalue(res, i, i_proargtypes),
|
|
+ finfo[i].argtypes, finfo[i].nargs);
|
|
+ }
|
|
+
|
|
+ /* Decide whether we want to dump it */
|
|
+ selectDumpableObject(&(finfo[i].dobj));
|
|
+
|
|
+ if (strlen(finfo[i].rolname) == 0)
|
|
+ write_msg(NULL,
|
|
+ "WARNING: owner of function \"%s\" appears to be invalid\n",
|
|
+ finfo[i].dobj.name);
|
|
+ }
|
|
+
|
|
+ PQclear(res);
|
|
+
|
|
+ destroyPQExpBuffer(query);
|
|
+
|
|
+ return finfo;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * getTables
|
|
+ * read all the user-defined tables (no indexes, no catalogs)
|
|
+ * in the system catalogs return them in the TableInfo* structure
|
|
+ *
|
|
+ * numTables is set to the number of tables read in
|
|
+ */
|
|
+TableInfo *
|
|
+getTables(Archive *fout, int *numTables)
|
|
+{
|
|
+ PGresult *res;
|
|
+ int ntups;
|
|
+ int i;
|
|
+ PQExpBuffer query = createPQExpBuffer();
|
|
+ TableInfo *tblinfo;
|
|
+ int i_reltableoid;
|
|
+ int i_reloid;
|
|
+ int i_relname;
|
|
+ int i_relnamespace;
|
|
+ int i_relkind;
|
|
+ int i_relacl;
|
|
+ int i_rolname;
|
|
+ int i_relchecks;
|
|
+ int i_relhastriggers;
|
|
+ int i_relhasindex;
|
|
+ int i_relhasrules;
|
|
+ int i_relhasoids;
|
|
+ int i_relfrozenxid;
|
|
+ int i_relminmxid;
|
|
+ int i_toastoid;
|
|
+ int i_toastfrozenxid;
|
|
+ int i_toastminmxid;
|
|
+ int i_relpersistence;
|
|
+ int i_relispopulated;
|
|
+ int i_relreplident;
|
|
+ int i_owning_tab;
|
|
+ int i_owning_col;
|
|
+ int i_reltablespace;
|
|
+ int i_reloptions;
|
|
+ int i_checkoption;
|
|
+ int i_toastreloptions;
|
|
+ int i_reloftype;
|
|
+ int i_relpages;
|
|
+
|
|
+ /* Make sure we are in proper schema */
|
|
+ selectSourceSchema(fout, "pg_catalog");
|
|
+
|
|
+ /*
|
|
+ * Find all the tables and table-like objects.
|
|
+ *
|
|
+ * We include system catalogs, so that we can work if a user table is
|
|
+ * defined to inherit from a system catalog (pretty weird, but...)
|
|
+ *
|
|
+ * We ignore relations that are not ordinary tables, sequences, views,
|
|
+ * materialized views, composite types, or foreign tables.
|
|
+ *
|
|
+ * Composite-type table entries won't be dumped as such, but we have to
|
|
+ * make a DumpableObject for them so that we can track dependencies of the
|
|
+ * composite type (pg_depend entries for columns of the composite type
|
|
+ * link to the pg_class entry not the pg_type entry).
|
|
+ *
|
|
+ * Note: in this phase we should collect only a minimal amount of
|
|
+ * information about each table, basically just enough to decide if it is
|
|
+ * interesting. We must fetch all tables in this phase because otherwise
|
|
+ * we cannot correctly identify inherited columns, owned sequences, etc.
|
|
+ */
|
|
+
|
|
+ if (fout->remoteVersion >= 90400)
|
|
+ {
|
|
+ /*
|
|
+ * Left join to pick up dependency info linking sequences to their
|
|
+ * owning column, if any (note this dependency is AUTO as of 8.2)
|
|
+ */
|
|
+ appendPQExpBuffer(query,
|
|
+ "SELECT c.tableoid, c.oid, c.relname, "
|
|
+ "c.relacl, c.relkind, c.relnamespace, "
|
|
+ "(%s c.relowner) AS rolname, "
|
|
+ "c.relchecks, c.relhastriggers, "
|
|
+ "c.relhasindex, c.relhasrules, c.relhasoids, "
|
|
+ "c.relfrozenxid, c.relminmxid, tc.oid AS toid, "
|
|
+ "tc.relfrozenxid AS tfrozenxid, "
|
|
+ "tc.relminmxid AS tminmxid, "
|
|
+ "c.relpersistence, c.relispopulated, "
|
|
+ "c.relreplident, c.relpages, "
|
|
+ "CASE WHEN c.reloftype <> 0 THEN c.reloftype::pg_catalog.regtype ELSE NULL END AS reloftype, "
|
|
+ "d.refobjid AS owning_tab, "
|
|
+ "d.refobjsubid AS owning_col, "
|
|
+ "(SELECT spcname FROM pg_tablespace t WHERE t.oid = c.reltablespace) AS reltablespace, "
|
|
+ "array_to_string(array_remove(array_remove(c.reloptions,'check_option=local'),'check_option=cascaded'), ', ') AS reloptions, "
|
|
+ "CASE WHEN 'check_option=local' = ANY (c.reloptions) THEN 'LOCAL'::text "
|
|
+ "WHEN 'check_option=cascaded' = ANY (c.reloptions) THEN 'CASCADED'::text ELSE NULL END AS checkoption, "
|
|
+ "array_to_string(array(SELECT 'toast.' || x FROM unnest(tc.reloptions) x), ', ') AS toast_reloptions "
|
|
+ "FROM pg_class c "
|
|
+ "LEFT JOIN pg_depend d ON "
|
|
+ "(c.relkind = '%c' AND "
|
|
+ "d.classid = c.tableoid AND d.objid = c.oid AND "
|
|
+ "d.objsubid = 0 AND "
|
|
+ "d.refclassid = c.tableoid AND d.deptype = 'a') "
|
|
+ "LEFT JOIN pg_class tc ON (c.reltoastrelid = tc.oid) "
|
|
+ "WHERE c.relkind in ('%c', '%c', '%c', '%c', '%c', '%c') "
|
|
+ "ORDER BY c.oid",
|
|
+ username_subquery,
|
|
+ RELKIND_SEQUENCE,
|
|
+ RELKIND_RELATION, RELKIND_SEQUENCE,
|
|
+ RELKIND_VIEW, RELKIND_COMPOSITE_TYPE,
|
|
+ RELKIND_MATVIEW, RELKIND_FOREIGN_TABLE);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 90300)
|
|
+ {
|
|
+ /*
|
|
+ * Left join to pick up dependency info linking sequences to their
|
|
+ * owning column, if any (note this dependency is AUTO as of 8.2)
|
|
+ */
|
|
+ appendPQExpBuffer(query,
|
|
+ "SELECT c.tableoid, c.oid, c.relname, "
|
|
+ "c.relacl, c.relkind, c.relnamespace, "
|
|
+ "(%s c.relowner) AS rolname, "
|
|
+ "c.relchecks, c.relhastriggers, "
|
|
+ "c.relhasindex, c.relhasrules, c.relhasoids, "
|
|
+ "c.relfrozenxid, c.relminmxid, tc.oid AS toid, "
|
|
+ "tc.relfrozenxid AS tfrozenxid, "
|
|
+ "tc.relminmxid AS tminmxid, "
|
|
+ "c.relpersistence, c.relispopulated, "
|
|
+ "'d' AS relreplident, c.relpages, "
|
|
+ "CASE WHEN c.reloftype <> 0 THEN c.reloftype::pg_catalog.regtype ELSE NULL END AS reloftype, "
|
|
+ "d.refobjid AS owning_tab, "
|
|
+ "d.refobjsubid AS owning_col, "
|
|
+ "(SELECT spcname FROM pg_tablespace t WHERE t.oid = c.reltablespace) AS reltablespace, "
|
|
+ "array_to_string(array_remove(array_remove(c.reloptions,'check_option=local'),'check_option=cascaded'), ', ') AS reloptions, "
|
|
+ "CASE WHEN 'check_option=local' = ANY (c.reloptions) THEN 'LOCAL'::text "
|
|
+ "WHEN 'check_option=cascaded' = ANY (c.reloptions) THEN 'CASCADED'::text ELSE NULL END AS checkoption, "
|
|
+ "array_to_string(array(SELECT 'toast.' || x FROM unnest(tc.reloptions) x), ', ') AS toast_reloptions "
|
|
+ "FROM pg_class c "
|
|
+ "LEFT JOIN pg_depend d ON "
|
|
+ "(c.relkind = '%c' AND "
|
|
+ "d.classid = c.tableoid AND d.objid = c.oid AND "
|
|
+ "d.objsubid = 0 AND "
|
|
+ "d.refclassid = c.tableoid AND d.deptype = 'a') "
|
|
+ "LEFT JOIN pg_class tc ON (c.reltoastrelid = tc.oid) "
|
|
+ "WHERE c.relkind in ('%c', '%c', '%c', '%c', '%c', '%c') "
|
|
+ "ORDER BY c.oid",
|
|
+ username_subquery,
|
|
+ RELKIND_SEQUENCE,
|
|
+ RELKIND_RELATION, RELKIND_SEQUENCE,
|
|
+ RELKIND_VIEW, RELKIND_COMPOSITE_TYPE,
|
|
+ RELKIND_MATVIEW, RELKIND_FOREIGN_TABLE);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 90100)
|
|
+ {
|
|
+ /*
|
|
+ * Left join to pick up dependency info linking sequences to their
|
|
+ * owning column, if any (note this dependency is AUTO as of 8.2)
|
|
+ */
|
|
+ appendPQExpBuffer(query,
|
|
+ "SELECT c.tableoid, c.oid, c.relname, "
|
|
+ "c.relacl, c.relkind, c.relnamespace, "
|
|
+ "(%s c.relowner) AS rolname, "
|
|
+ "c.relchecks, c.relhastriggers, "
|
|
+ "c.relhasindex, c.relhasrules, c.relhasoids, "
|
|
+ "c.relfrozenxid, 0 AS relminmxid, tc.oid AS toid, "
|
|
+ "tc.relfrozenxid AS tfrozenxid, "
|
|
+ "0 AS tminmxid, "
|
|
+ "c.relpersistence, 't' as relispopulated, "
|
|
+ "'d' AS relreplident, c.relpages, "
|
|
+ "CASE WHEN c.reloftype <> 0 THEN c.reloftype::pg_catalog.regtype ELSE NULL END AS reloftype, "
|
|
+ "d.refobjid AS owning_tab, "
|
|
+ "d.refobjsubid AS owning_col, "
|
|
+ "(SELECT spcname FROM pg_tablespace t WHERE t.oid = c.reltablespace) AS reltablespace, "
|
|
+ "array_to_string(c.reloptions, ', ') AS reloptions, "
|
|
+ "array_to_string(array(SELECT 'toast.' || x FROM unnest(tc.reloptions) x), ', ') AS toast_reloptions "
|
|
+ "FROM pg_class c "
|
|
+ "LEFT JOIN pg_depend d ON "
|
|
+ "(c.relkind = '%c' AND "
|
|
+ "d.classid = c.tableoid AND d.objid = c.oid AND "
|
|
+ "d.objsubid = 0 AND "
|
|
+ "d.refclassid = c.tableoid AND d.deptype = 'a') "
|
|
+ "LEFT JOIN pg_class tc ON (c.reltoastrelid = tc.oid) "
|
|
+ "WHERE c.relkind in ('%c', '%c', '%c', '%c', '%c', '%c') "
|
|
+ "ORDER BY c.oid",
|
|
+ username_subquery,
|
|
+ RELKIND_SEQUENCE,
|
|
+ RELKIND_RELATION, RELKIND_SEQUENCE,
|
|
+ RELKIND_VIEW, RELKIND_COMPOSITE_TYPE,
|
|
+ RELKIND_MATVIEW, RELKIND_FOREIGN_TABLE);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 90000)
|
|
+ {
|
|
+ /*
|
|
+ * Left join to pick up dependency info linking sequences to their
|
|
+ * owning column, if any (note this dependency is AUTO as of 8.2)
|
|
+ */
|
|
+ appendPQExpBuffer(query,
|
|
+ "SELECT c.tableoid, c.oid, c.relname, "
|
|
+ "c.relacl, c.relkind, c.relnamespace, "
|
|
+ "(%s c.relowner) AS rolname, "
|
|
+ "c.relchecks, c.relhastriggers, "
|
|
+ "c.relhasindex, c.relhasrules, c.relhasoids, "
|
|
+ "c.relfrozenxid, 0 AS relminmxid, tc.oid AS toid, "
|
|
+ "tc.relfrozenxid AS tfrozenxid, "
|
|
+ "0 AS tminmxid, "
|
|
+ "'p' AS relpersistence, 't' as relispopulated, "
|
|
+ "'d' AS relreplident, c.relpages, "
|
|
+ "CASE WHEN c.reloftype <> 0 THEN c.reloftype::pg_catalog.regtype ELSE NULL END AS reloftype, "
|
|
+ "d.refobjid AS owning_tab, "
|
|
+ "d.refobjsubid AS owning_col, "
|
|
+ "(SELECT spcname FROM pg_tablespace t WHERE t.oid = c.reltablespace) AS reltablespace, "
|
|
+ "array_to_string(c.reloptions, ', ') AS reloptions, "
|
|
+ "array_to_string(array(SELECT 'toast.' || x FROM unnest(tc.reloptions) x), ', ') AS toast_reloptions "
|
|
+ "FROM pg_class c "
|
|
+ "LEFT JOIN pg_depend d ON "
|
|
+ "(c.relkind = '%c' AND "
|
|
+ "d.classid = c.tableoid AND d.objid = c.oid AND "
|
|
+ "d.objsubid = 0 AND "
|
|
+ "d.refclassid = c.tableoid AND d.deptype = 'a') "
|
|
+ "LEFT JOIN pg_class tc ON (c.reltoastrelid = tc.oid) "
|
|
+ "WHERE c.relkind in ('%c', '%c', '%c', '%c') "
|
|
+ "ORDER BY c.oid",
|
|
+ username_subquery,
|
|
+ RELKIND_SEQUENCE,
|
|
+ RELKIND_RELATION, RELKIND_SEQUENCE,
|
|
+ RELKIND_VIEW, RELKIND_COMPOSITE_TYPE);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 80400)
|
|
+ {
|
|
+ /*
|
|
+ * Left join to pick up dependency info linking sequences to their
|
|
+ * owning column, if any (note this dependency is AUTO as of 8.2)
|
|
+ */
|
|
+ appendPQExpBuffer(query,
|
|
+ "SELECT c.tableoid, c.oid, c.relname, "
|
|
+ "c.relacl, c.relkind, c.relnamespace, "
|
|
+ "(%s c.relowner) AS rolname, "
|
|
+ "c.relchecks, c.relhastriggers, "
|
|
+ "c.relhasindex, c.relhasrules, c.relhasoids, "
|
|
+ "c.relfrozenxid, 0 AS relminmxid, tc.oid AS toid, "
|
|
+ "tc.relfrozenxid AS tfrozenxid, "
|
|
+ "0 AS tminmxid, "
|
|
+ "'p' AS relpersistence, 't' as relispopulated, "
|
|
+ "'d' AS relreplident, c.relpages, "
|
|
+ "NULL AS reloftype, "
|
|
+ "d.refobjid AS owning_tab, "
|
|
+ "d.refobjsubid AS owning_col, "
|
|
+ "(SELECT spcname FROM pg_tablespace t WHERE t.oid = c.reltablespace) AS reltablespace, "
|
|
+ "array_to_string(c.reloptions, ', ') AS reloptions, "
|
|
+ "array_to_string(array(SELECT 'toast.' || x FROM unnest(tc.reloptions) x), ', ') AS toast_reloptions "
|
|
+ "FROM pg_class c "
|
|
+ "LEFT JOIN pg_depend d ON "
|
|
+ "(c.relkind = '%c' AND "
|
|
+ "d.classid = c.tableoid AND d.objid = c.oid AND "
|
|
+ "d.objsubid = 0 AND "
|
|
+ "d.refclassid = c.tableoid AND d.deptype = 'a') "
|
|
+ "LEFT JOIN pg_class tc ON (c.reltoastrelid = tc.oid) "
|
|
+ "WHERE c.relkind in ('%c', '%c', '%c', '%c') "
|
|
+ "ORDER BY c.oid",
|
|
+ username_subquery,
|
|
+ RELKIND_SEQUENCE,
|
|
+ RELKIND_RELATION, RELKIND_SEQUENCE,
|
|
+ RELKIND_VIEW, RELKIND_COMPOSITE_TYPE);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 80200)
|
|
+ {
|
|
+ /*
|
|
+ * Left join to pick up dependency info linking sequences to their
|
|
+ * owning column, if any (note this dependency is AUTO as of 8.2)
|
|
+ */
|
|
+ appendPQExpBuffer(query,
|
|
+ "SELECT c.tableoid, c.oid, c.relname, "
|
|
+ "c.relacl, c.relkind, c.relnamespace, "
|
|
+ "(%s c.relowner) AS rolname, "
|
|
+ "c.relchecks, (c.reltriggers <> 0) AS relhastriggers, "
|
|
+ "c.relhasindex, c.relhasrules, c.relhasoids, "
|
|
+ "c.relfrozenxid, 0 AS relminmxid, tc.oid AS toid, "
|
|
+ "tc.relfrozenxid AS tfrozenxid, "
|
|
+ "0 AS tminmxid, "
|
|
+ "'p' AS relpersistence, 't' as relispopulated, "
|
|
+ "'d' AS relreplident, c.relpages, "
|
|
+ "NULL AS reloftype, "
|
|
+ "d.refobjid AS owning_tab, "
|
|
+ "d.refobjsubid AS owning_col, "
|
|
+ "(SELECT spcname FROM pg_tablespace t WHERE t.oid = c.reltablespace) AS reltablespace, "
|
|
+ "array_to_string(c.reloptions, ', ') AS reloptions, "
|
|
+ "NULL AS toast_reloptions "
|
|
+ "FROM pg_class c "
|
|
+ "LEFT JOIN pg_depend d ON "
|
|
+ "(c.relkind = '%c' AND "
|
|
+ "d.classid = c.tableoid AND d.objid = c.oid AND "
|
|
+ "d.objsubid = 0 AND "
|
|
+ "d.refclassid = c.tableoid AND d.deptype = 'a') "
|
|
+ "LEFT JOIN pg_class tc ON (c.reltoastrelid = tc.oid) "
|
|
+ "WHERE c.relkind in ('%c', '%c', '%c', '%c') "
|
|
+ "ORDER BY c.oid",
|
|
+ username_subquery,
|
|
+ RELKIND_SEQUENCE,
|
|
+ RELKIND_RELATION, RELKIND_SEQUENCE,
|
|
+ RELKIND_VIEW, RELKIND_COMPOSITE_TYPE);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 80000)
|
|
+ {
|
|
+ /*
|
|
+ * Left join to pick up dependency info linking sequences to their
|
|
+ * owning column, if any
|
|
+ */
|
|
+ appendPQExpBuffer(query,
|
|
+ "SELECT c.tableoid, c.oid, relname, "
|
|
+ "relacl, relkind, relnamespace, "
|
|
+ "(%s relowner) AS rolname, "
|
|
+ "relchecks, (reltriggers <> 0) AS relhastriggers, "
|
|
+ "relhasindex, relhasrules, relhasoids, "
|
|
+ "0 AS relfrozenxid, 0 AS relminmxid,"
|
|
+ "0 AS toid, "
|
|
+ "0 AS tfrozenxid, 0 AS tminmxid,"
|
|
+ "'p' AS relpersistence, 't' as relispopulated, "
|
|
+ "'d' AS relreplident, relpages, "
|
|
+ "NULL AS reloftype, "
|
|
+ "d.refobjid AS owning_tab, "
|
|
+ "d.refobjsubid AS owning_col, "
|
|
+ "(SELECT spcname FROM pg_tablespace t WHERE t.oid = c.reltablespace) AS reltablespace, "
|
|
+ "NULL AS reloptions, "
|
|
+ "NULL AS toast_reloptions "
|
|
+ "FROM pg_class c "
|
|
+ "LEFT JOIN pg_depend d ON "
|
|
+ "(c.relkind = '%c' AND "
|
|
+ "d.classid = c.tableoid AND d.objid = c.oid AND "
|
|
+ "d.objsubid = 0 AND "
|
|
+ "d.refclassid = c.tableoid AND d.deptype = 'i') "
|
|
+ "WHERE relkind in ('%c', '%c', '%c', '%c') "
|
|
+ "ORDER BY c.oid",
|
|
+ username_subquery,
|
|
+ RELKIND_SEQUENCE,
|
|
+ RELKIND_RELATION, RELKIND_SEQUENCE,
|
|
+ RELKIND_VIEW, RELKIND_COMPOSITE_TYPE);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 70300)
|
|
+ {
|
|
+ /*
|
|
+ * Left join to pick up dependency info linking sequences to their
|
|
+ * owning column, if any
|
|
+ */
|
|
+ appendPQExpBuffer(query,
|
|
+ "SELECT c.tableoid, c.oid, relname, "
|
|
+ "relacl, relkind, relnamespace, "
|
|
+ "(%s relowner) AS rolname, "
|
|
+ "relchecks, (reltriggers <> 0) AS relhastriggers, "
|
|
+ "relhasindex, relhasrules, relhasoids, "
|
|
+ "0 AS relfrozenxid, 0 AS relminmxid,"
|
|
+ "0 AS toid, "
|
|
+ "0 AS tfrozenxid, 0 AS tminmxid,"
|
|
+ "'p' AS relpersistence, 't' as relispopulated, "
|
|
+ "'d' AS relreplident, relpages, "
|
|
+ "NULL AS reloftype, "
|
|
+ "d.refobjid AS owning_tab, "
|
|
+ "d.refobjsubid AS owning_col, "
|
|
+ "NULL AS reltablespace, "
|
|
+ "NULL AS reloptions, "
|
|
+ "NULL AS toast_reloptions "
|
|
+ "FROM pg_class c "
|
|
+ "LEFT JOIN pg_depend d ON "
|
|
+ "(c.relkind = '%c' AND "
|
|
+ "d.classid = c.tableoid AND d.objid = c.oid AND "
|
|
+ "d.objsubid = 0 AND "
|
|
+ "d.refclassid = c.tableoid AND d.deptype = 'i') "
|
|
+ "WHERE relkind IN ('%c', '%c', '%c', '%c') "
|
|
+ "ORDER BY c.oid",
|
|
+ username_subquery,
|
|
+ RELKIND_SEQUENCE,
|
|
+ RELKIND_RELATION, RELKIND_SEQUENCE,
|
|
+ RELKIND_VIEW, RELKIND_COMPOSITE_TYPE);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 70200)
|
|
+ {
|
|
+ appendPQExpBuffer(query,
|
|
+ "SELECT tableoid, oid, relname, relacl, relkind, "
|
|
+ "0::oid AS relnamespace, "
|
|
+ "(%s relowner) AS rolname, "
|
|
+ "relchecks, (reltriggers <> 0) AS relhastriggers, "
|
|
+ "relhasindex, relhasrules, relhasoids, "
|
|
+ "0 AS relfrozenxid, 0 AS relminmxid,"
|
|
+ "0 AS toid, "
|
|
+ "0 AS tfrozenxid, 0 AS tminmxid,"
|
|
+ "'p' AS relpersistence, 't' as relispopulated, "
|
|
+ "'d' AS relreplident, relpages, "
|
|
+ "NULL AS reloftype, "
|
|
+ "NULL::oid AS owning_tab, "
|
|
+ "NULL::int4 AS owning_col, "
|
|
+ "NULL AS reltablespace, "
|
|
+ "NULL AS reloptions, "
|
|
+ "NULL AS toast_reloptions "
|
|
+ "FROM pg_class "
|
|
+ "WHERE relkind IN ('%c', '%c', '%c') "
|
|
+ "ORDER BY oid",
|
|
+ username_subquery,
|
|
+ RELKIND_RELATION, RELKIND_SEQUENCE, RELKIND_VIEW);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 70100)
|
|
+ {
|
|
+ /* all tables have oids in 7.1 */
|
|
+ appendPQExpBuffer(query,
|
|
+ "SELECT tableoid, oid, relname, relacl, relkind, "
|
|
+ "0::oid AS relnamespace, "
|
|
+ "(%s relowner) AS rolname, "
|
|
+ "relchecks, (reltriggers <> 0) AS relhastriggers, "
|
|
+ "relhasindex, relhasrules, "
|
|
+ "'t'::bool AS relhasoids, "
|
|
+ "0 AS relfrozenxid, 0 AS relminmxid,"
|
|
+ "0 AS toid, "
|
|
+ "0 AS tfrozenxid, 0 AS tminmxid,"
|
|
+ "'p' AS relpersistence, 't' as relispopulated, "
|
|
+ "'d' AS relreplident, relpages, "
|
|
+ "NULL AS reloftype, "
|
|
+ "NULL::oid AS owning_tab, "
|
|
+ "NULL::int4 AS owning_col, "
|
|
+ "NULL AS reltablespace, "
|
|
+ "NULL AS reloptions, "
|
|
+ "NULL AS toast_reloptions "
|
|
+ "FROM pg_class "
|
|
+ "WHERE relkind IN ('%c', '%c', '%c') "
|
|
+ "ORDER BY oid",
|
|
+ username_subquery,
|
|
+ RELKIND_RELATION, RELKIND_SEQUENCE, RELKIND_VIEW);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ /*
|
|
+ * Before 7.1, view relkind was not set to 'v', so we must check if we
|
|
+ * have a view by looking for a rule in pg_rewrite.
|
|
+ */
|
|
+ appendPQExpBuffer(query,
|
|
+ "SELECT "
|
|
+ "(SELECT oid FROM pg_class WHERE relname = 'pg_class') AS tableoid, "
|
|
+ "oid, relname, relacl, "
|
|
+ "CASE WHEN relhasrules and relkind = 'r' "
|
|
+ " and EXISTS(SELECT rulename FROM pg_rewrite r WHERE "
|
|
+ " r.ev_class = c.oid AND r.ev_type = '1') "
|
|
+ "THEN '%c'::\"char\" "
|
|
+ "ELSE relkind END AS relkind,"
|
|
+ "0::oid AS relnamespace, "
|
|
+ "(%s relowner) AS rolname, "
|
|
+ "relchecks, (reltriggers <> 0) AS relhastriggers, "
|
|
+ "relhasindex, relhasrules, "
|
|
+ "'t'::bool AS relhasoids, "
|
|
+ "0 AS relfrozenxid, 0 AS relminmxid,"
|
|
+ "0 AS toid, "
|
|
+ "0 AS tfrozenxid, 0 AS tminmxid,"
|
|
+ "'p' AS relpersistence, 't' as relispopulated, "
|
|
+ "'d' AS relreplident, 0 AS relpages, "
|
|
+ "NULL AS reloftype, "
|
|
+ "NULL::oid AS owning_tab, "
|
|
+ "NULL::int4 AS owning_col, "
|
|
+ "NULL AS reltablespace, "
|
|
+ "NULL AS reloptions, "
|
|
+ "NULL AS toast_reloptions "
|
|
+ "FROM pg_class c "
|
|
+ "WHERE relkind IN ('%c', '%c') "
|
|
+ "ORDER BY oid",
|
|
+ RELKIND_VIEW,
|
|
+ username_subquery,
|
|
+ RELKIND_RELATION, RELKIND_SEQUENCE);
|
|
+ }
|
|
+
|
|
+ res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
|
|
+
|
|
+ ntups = PQntuples(res);
|
|
+
|
|
+ *numTables = ntups;
|
|
+
|
|
+ /*
|
|
+ * Extract data from result and lock dumpable tables. We do the locking
|
|
+ * before anything else, to minimize the window wherein a table could
|
|
+ * disappear under us.
|
|
+ *
|
|
+ * Note that we have to save info about all tables here, even when dumping
|
|
+ * only one, because we don't yet know which tables might be inheritance
|
|
+ * ancestors of the target table.
|
|
+ */
|
|
+ tblinfo = (TableInfo *) pg_malloc0(ntups * sizeof(TableInfo));
|
|
+
|
|
+ i_reltableoid = PQfnumber(res, "tableoid");
|
|
+ i_reloid = PQfnumber(res, "oid");
|
|
+ i_relname = PQfnumber(res, "relname");
|
|
+ i_relnamespace = PQfnumber(res, "relnamespace");
|
|
+ i_relacl = PQfnumber(res, "relacl");
|
|
+ i_relkind = PQfnumber(res, "relkind");
|
|
+ i_rolname = PQfnumber(res, "rolname");
|
|
+ i_relchecks = PQfnumber(res, "relchecks");
|
|
+ i_relhastriggers = PQfnumber(res, "relhastriggers");
|
|
+ i_relhasindex = PQfnumber(res, "relhasindex");
|
|
+ i_relhasrules = PQfnumber(res, "relhasrules");
|
|
+ i_relhasoids = PQfnumber(res, "relhasoids");
|
|
+ i_relfrozenxid = PQfnumber(res, "relfrozenxid");
|
|
+ i_relminmxid = PQfnumber(res, "relminmxid");
|
|
+ i_toastoid = PQfnumber(res, "toid");
|
|
+ i_toastfrozenxid = PQfnumber(res, "tfrozenxid");
|
|
+ i_toastminmxid = PQfnumber(res, "tminmxid");
|
|
+ i_relpersistence = PQfnumber(res, "relpersistence");
|
|
+ i_relispopulated = PQfnumber(res, "relispopulated");
|
|
+ i_relreplident = PQfnumber(res, "relreplident");
|
|
+ i_relpages = PQfnumber(res, "relpages");
|
|
+ i_owning_tab = PQfnumber(res, "owning_tab");
|
|
+ i_owning_col = PQfnumber(res, "owning_col");
|
|
+ i_reltablespace = PQfnumber(res, "reltablespace");
|
|
+ i_reloptions = PQfnumber(res, "reloptions");
|
|
+ i_checkoption = PQfnumber(res, "checkoption");
|
|
+ i_toastreloptions = PQfnumber(res, "toast_reloptions");
|
|
+ i_reloftype = PQfnumber(res, "reloftype");
|
|
+
|
|
+ if (lockWaitTimeout && fout->remoteVersion >= 70300)
|
|
+ {
|
|
+ /*
|
|
+ * Arrange to fail instead of waiting forever for a table lock.
|
|
+ *
|
|
+ * NB: this coding assumes that the only queries issued within the
|
|
+ * following loop are LOCK TABLEs; else the timeout may be undesirably
|
|
+ * applied to other things too.
|
|
+ */
|
|
+ resetPQExpBuffer(query);
|
|
+ appendPQExpBufferStr(query, "SET statement_timeout = ");
|
|
+ appendStringLiteralConn(query, lockWaitTimeout, GetConnection(fout));
|
|
+ ExecuteSqlStatement(fout, query->data);
|
|
+ }
|
|
+
|
|
+ for (i = 0; i < ntups; i++)
|
|
+ {
|
|
+ tblinfo[i].dobj.objType = DO_TABLE;
|
|
+ tblinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_reltableoid));
|
|
+ tblinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_reloid));
|
|
+ AssignDumpId(&tblinfo[i].dobj);
|
|
+ tblinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_relname));
|
|
+ tblinfo[i].dobj.namespace =
|
|
+ findNamespace(fout,
|
|
+ atooid(PQgetvalue(res, i, i_relnamespace)),
|
|
+ tblinfo[i].dobj.catId.oid);
|
|
+ tblinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
|
|
+ tblinfo[i].relacl = pg_strdup(PQgetvalue(res, i, i_relacl));
|
|
+ tblinfo[i].relkind = *(PQgetvalue(res, i, i_relkind));
|
|
+ tblinfo[i].relpersistence = *(PQgetvalue(res, i, i_relpersistence));
|
|
+ tblinfo[i].hasindex = (strcmp(PQgetvalue(res, i, i_relhasindex), "t") == 0);
|
|
+ tblinfo[i].hasrules = (strcmp(PQgetvalue(res, i, i_relhasrules), "t") == 0);
|
|
+ tblinfo[i].hastriggers = (strcmp(PQgetvalue(res, i, i_relhastriggers), "t") == 0);
|
|
+ tblinfo[i].hasoids = (strcmp(PQgetvalue(res, i, i_relhasoids), "t") == 0);
|
|
+ tblinfo[i].relispopulated = (strcmp(PQgetvalue(res, i, i_relispopulated), "t") == 0);
|
|
+ tblinfo[i].relreplident = *(PQgetvalue(res, i, i_relreplident));
|
|
+ tblinfo[i].relpages = atoi(PQgetvalue(res, i, i_relpages));
|
|
+ tblinfo[i].frozenxid = atooid(PQgetvalue(res, i, i_relfrozenxid));
|
|
+ tblinfo[i].minmxid = atooid(PQgetvalue(res, i, i_relminmxid));
|
|
+ tblinfo[i].toast_oid = atooid(PQgetvalue(res, i, i_toastoid));
|
|
+ tblinfo[i].toast_frozenxid = atooid(PQgetvalue(res, i, i_toastfrozenxid));
|
|
+ tblinfo[i].toast_minmxid = atooid(PQgetvalue(res, i, i_toastminmxid));
|
|
+ if (PQgetisnull(res, i, i_reloftype))
|
|
+ tblinfo[i].reloftype = NULL;
|
|
+ else
|
|
+ tblinfo[i].reloftype = pg_strdup(PQgetvalue(res, i, i_reloftype));
|
|
+ tblinfo[i].ncheck = atoi(PQgetvalue(res, i, i_relchecks));
|
|
+ if (PQgetisnull(res, i, i_owning_tab))
|
|
+ {
|
|
+ tblinfo[i].owning_tab = InvalidOid;
|
|
+ tblinfo[i].owning_col = 0;
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ tblinfo[i].owning_tab = atooid(PQgetvalue(res, i, i_owning_tab));
|
|
+ tblinfo[i].owning_col = atoi(PQgetvalue(res, i, i_owning_col));
|
|
+ }
|
|
+ tblinfo[i].reltablespace = pg_strdup(PQgetvalue(res, i, i_reltablespace));
|
|
+ tblinfo[i].reloptions = pg_strdup(PQgetvalue(res, i, i_reloptions));
|
|
+ if (i_checkoption == -1 || PQgetisnull(res, i, i_checkoption))
|
|
+ tblinfo[i].checkoption = NULL;
|
|
+ else
|
|
+ tblinfo[i].checkoption = pg_strdup(PQgetvalue(res, i, i_checkoption));
|
|
+ tblinfo[i].toast_reloptions = pg_strdup(PQgetvalue(res, i, i_toastreloptions));
|
|
+
|
|
+ /* other fields were zeroed above */
|
|
+
|
|
+ /*
|
|
+ * Decide whether we want to dump this table.
|
|
+ */
|
|
+ if (tblinfo[i].relkind == RELKIND_COMPOSITE_TYPE)
|
|
+ tblinfo[i].dobj.dump = false;
|
|
+ else
|
|
+ selectDumpableTable(&tblinfo[i]);
|
|
+ tblinfo[i].interesting = tblinfo[i].dobj.dump;
|
|
+
|
|
+ tblinfo[i].postponed_def = false; /* might get set during sort */
|
|
+
|
|
+ /*
|
|
+ * Read-lock target tables to make sure they aren't DROPPED or altered
|
|
+ * in schema before we get around to dumping them.
|
|
+ *
|
|
+ * Note that we don't explicitly lock parents of the target tables; we
|
|
+ * assume our lock on the child is enough to prevent schema
|
|
+ * alterations to parent tables.
|
|
+ *
|
|
+ * NOTE: it'd be kinda nice to lock other relations too, not only
|
|
+ * plain tables, but the backend doesn't presently allow that.
|
|
+ */
|
|
+ if (tblinfo[i].dobj.dump && tblinfo[i].relkind == RELKIND_RELATION)
|
|
+ {
|
|
+ resetPQExpBuffer(query);
|
|
+ appendPQExpBuffer(query,
|
|
+ "LOCK TABLE %s IN ACCESS SHARE MODE",
|
|
+ fmtQualifiedId(fout->remoteVersion,
|
|
+ tblinfo[i].dobj.namespace->dobj.name,
|
|
+ tblinfo[i].dobj.name));
|
|
+ ExecuteSqlStatement(fout, query->data);
|
|
+ }
|
|
+
|
|
+ /* Emit notice if join for owner failed */
|
|
+ if (strlen(tblinfo[i].rolname) == 0)
|
|
+ write_msg(NULL, "WARNING: owner of table \"%s\" appears to be invalid\n",
|
|
+ tblinfo[i].dobj.name);
|
|
+ }
|
|
+
|
|
+ if (lockWaitTimeout && fout->remoteVersion >= 70300)
|
|
+ {
|
|
+ ExecuteSqlStatement(fout, "SET statement_timeout = 0");
|
|
+ }
|
|
+
|
|
+ PQclear(res);
|
|
+
|
|
+ destroyPQExpBuffer(query);
|
|
+
|
|
+ return tblinfo;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * getOwnedSeqs
|
|
+ * identify owned sequences and mark them as dumpable if owning table is
|
|
+ *
|
|
+ * We used to do this in getTables(), but it's better to do it after the
|
|
+ * index used by findTableByOid() has been set up.
|
|
+ */
|
|
+void
|
|
+getOwnedSeqs(Archive *fout, TableInfo tblinfo[], int numTables)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ /*
|
|
+ * Force sequences that are "owned" by table columns to be dumped whenever
|
|
+ * their owning table is being dumped.
|
|
+ */
|
|
+ for (i = 0; i < numTables; i++)
|
|
+ {
|
|
+ TableInfo *seqinfo = &tblinfo[i];
|
|
+ TableInfo *owning_tab;
|
|
+
|
|
+ if (!OidIsValid(seqinfo->owning_tab))
|
|
+ continue; /* not an owned sequence */
|
|
+ if (seqinfo->dobj.dump)
|
|
+ continue; /* no need to search */
|
|
+ owning_tab = findTableByOid(seqinfo->owning_tab);
|
|
+ if (owning_tab && owning_tab->dobj.dump)
|
|
+ {
|
|
+ seqinfo->interesting = true;
|
|
+ seqinfo->dobj.dump = true;
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+/*
|
|
+ * getInherits
|
|
+ * read all the inheritance information
|
|
+ * from the system catalogs return them in the InhInfo* structure
|
|
+ *
|
|
+ * numInherits is set to the number of pairs read in
|
|
+ */
|
|
+InhInfo *
|
|
+getInherits(Archive *fout, int *numInherits)
|
|
+{
|
|
+ PGresult *res;
|
|
+ int ntups;
|
|
+ int i;
|
|
+ PQExpBuffer query = createPQExpBuffer();
|
|
+ InhInfo *inhinfo;
|
|
+
|
|
+ int i_inhrelid;
|
|
+ int i_inhparent;
|
|
+
|
|
+ /* Make sure we are in proper schema */
|
|
+ selectSourceSchema(fout, "pg_catalog");
|
|
+
|
|
+ /* find all the inheritance information */
|
|
+
|
|
+ appendPQExpBufferStr(query, "SELECT inhrelid, inhparent FROM pg_inherits");
|
|
+
|
|
+ res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
|
|
+
|
|
+ ntups = PQntuples(res);
|
|
+
|
|
+ *numInherits = ntups;
|
|
+
|
|
+ inhinfo = (InhInfo *) pg_malloc(ntups * sizeof(InhInfo));
|
|
+
|
|
+ i_inhrelid = PQfnumber(res, "inhrelid");
|
|
+ i_inhparent = PQfnumber(res, "inhparent");
|
|
+
|
|
+ for (i = 0; i < ntups; i++)
|
|
+ {
|
|
+ inhinfo[i].inhrelid = atooid(PQgetvalue(res, i, i_inhrelid));
|
|
+ inhinfo[i].inhparent = atooid(PQgetvalue(res, i, i_inhparent));
|
|
+ }
|
|
+
|
|
+ PQclear(res);
|
|
+
|
|
+ destroyPQExpBuffer(query);
|
|
+
|
|
+ return inhinfo;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * getIndexes
|
|
+ * get information about every index on a dumpable table
|
|
+ *
|
|
+ * Note: index data is not returned directly to the caller, but it
|
|
+ * does get entered into the DumpableObject tables.
|
|
+ */
|
|
+void
|
|
+getIndexes(Archive *fout, TableInfo tblinfo[], int numTables)
|
|
+{
|
|
+ int i,
|
|
+ j;
|
|
+ PQExpBuffer query = createPQExpBuffer();
|
|
+ PGresult *res;
|
|
+ IndxInfo *indxinfo;
|
|
+ ConstraintInfo *constrinfo;
|
|
+ int i_tableoid,
|
|
+ i_oid,
|
|
+ i_indexname,
|
|
+ i_indexdef,
|
|
+ i_indnkeys,
|
|
+ i_indkey,
|
|
+ i_indisclustered,
|
|
+ i_indisreplident,
|
|
+ i_contype,
|
|
+ i_conname,
|
|
+ i_condeferrable,
|
|
+ i_condeferred,
|
|
+ i_contableoid,
|
|
+ i_conoid,
|
|
+ i_condef,
|
|
+ i_tablespace,
|
|
+ i_options,
|
|
+ i_relpages;
|
|
+ int ntups;
|
|
+
|
|
+ for (i = 0; i < numTables; i++)
|
|
+ {
|
|
+ TableInfo *tbinfo = &tblinfo[i];
|
|
+
|
|
+ /* Only plain tables and materialized views have indexes. */
|
|
+ if (tbinfo->relkind != RELKIND_RELATION &&
|
|
+ tbinfo->relkind != RELKIND_MATVIEW)
|
|
+ continue;
|
|
+ if (!tbinfo->hasindex)
|
|
+ continue;
|
|
+
|
|
+ /* Ignore indexes of tables not to be dumped */
|
|
+ if (!tbinfo->dobj.dump)
|
|
+ continue;
|
|
+
|
|
+ if (g_verbose)
|
|
+ write_msg(NULL, "reading indexes for table \"%s\"\n",
|
|
+ tbinfo->dobj.name);
|
|
+
|
|
+ /* Make sure we are in proper schema so indexdef is right */
|
|
+ selectSourceSchema(fout, tbinfo->dobj.namespace->dobj.name);
|
|
+
|
|
+ /*
|
|
+ * The point of the messy-looking outer join is to find a constraint
|
|
+ * that is related by an internal dependency link to the index. If we
|
|
+ * find one, create a CONSTRAINT entry linked to the INDEX entry. We
|
|
+ * assume an index won't have more than one internal dependency.
|
|
+ *
|
|
+ * As of 9.0 we don't need to look at pg_depend but can check for a
|
|
+ * match to pg_constraint.conindid. The check on conrelid is
|
|
+ * redundant but useful because that column is indexed while conindid
|
|
+ * is not.
|
|
+ */
|
|
+ resetPQExpBuffer(query);
|
|
+ if (fout->remoteVersion >= 90400)
|
|
+ {
|
|
+ /*
|
|
+ * the test on indisready is necessary in 9.2, and harmless in
|
|
+ * earlier/later versions
|
|
+ */
|
|
+ appendPQExpBuffer(query,
|
|
+ "SELECT t.tableoid, t.oid, "
|
|
+ "t.relname AS indexname, "
|
|
+ "pg_catalog.pg_get_indexdef(i.indexrelid) AS indexdef, "
|
|
+ "t.relnatts AS indnkeys, "
|
|
+ "i.indkey, i.indisclustered, "
|
|
+ "i.indisreplident, t.relpages, "
|
|
+ "c.contype, c.conname, "
|
|
+ "c.condeferrable, c.condeferred, "
|
|
+ "c.tableoid AS contableoid, "
|
|
+ "c.oid AS conoid, "
|
|
+ "pg_catalog.pg_get_constraintdef(c.oid, false) AS condef, "
|
|
+ "(SELECT spcname FROM pg_catalog.pg_tablespace s WHERE s.oid = t.reltablespace) AS tablespace, "
|
|
+ "array_to_string(t.reloptions, ', ') AS options "
|
|
+ "FROM pg_catalog.pg_index i "
|
|
+ "JOIN pg_catalog.pg_class t ON (t.oid = i.indexrelid) "
|
|
+ "LEFT JOIN pg_catalog.pg_constraint c "
|
|
+ "ON (i.indrelid = c.conrelid AND "
|
|
+ "i.indexrelid = c.conindid AND "
|
|
+ "c.contype IN ('p','u','x')) "
|
|
+ "WHERE i.indrelid = '%u'::pg_catalog.oid "
|
|
+ "AND i.indisvalid AND i.indisready "
|
|
+ "ORDER BY indexname",
|
|
+ tbinfo->dobj.catId.oid);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 90000)
|
|
+ {
|
|
+ /*
|
|
+ * the test on indisready is necessary in 9.2, and harmless in
|
|
+ * earlier/later versions
|
|
+ */
|
|
+ appendPQExpBuffer(query,
|
|
+ "SELECT t.tableoid, t.oid, "
|
|
+ "t.relname AS indexname, "
|
|
+ "pg_catalog.pg_get_indexdef(i.indexrelid) AS indexdef, "
|
|
+ "t.relnatts AS indnkeys, "
|
|
+ "i.indkey, i.indisclustered, "
|
|
+ "false AS indisreplident, t.relpages, "
|
|
+ "c.contype, c.conname, "
|
|
+ "c.condeferrable, c.condeferred, "
|
|
+ "c.tableoid AS contableoid, "
|
|
+ "c.oid AS conoid, "
|
|
+ "pg_catalog.pg_get_constraintdef(c.oid, false) AS condef, "
|
|
+ "(SELECT spcname FROM pg_catalog.pg_tablespace s WHERE s.oid = t.reltablespace) AS tablespace, "
|
|
+ "array_to_string(t.reloptions, ', ') AS options "
|
|
+ "FROM pg_catalog.pg_index i "
|
|
+ "JOIN pg_catalog.pg_class t ON (t.oid = i.indexrelid) "
|
|
+ "LEFT JOIN pg_catalog.pg_constraint c "
|
|
+ "ON (i.indrelid = c.conrelid AND "
|
|
+ "i.indexrelid = c.conindid AND "
|
|
+ "c.contype IN ('p','u','x')) "
|
|
+ "WHERE i.indrelid = '%u'::pg_catalog.oid "
|
|
+ "AND i.indisvalid AND i.indisready "
|
|
+ "ORDER BY indexname",
|
|
+ tbinfo->dobj.catId.oid);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 80200)
|
|
+ {
|
|
+ appendPQExpBuffer(query,
|
|
+ "SELECT t.tableoid, t.oid, "
|
|
+ "t.relname AS indexname, "
|
|
+ "pg_catalog.pg_get_indexdef(i.indexrelid) AS indexdef, "
|
|
+ "t.relnatts AS indnkeys, "
|
|
+ "i.indkey, i.indisclustered, "
|
|
+ "false AS indisreplident, t.relpages, "
|
|
+ "c.contype, c.conname, "
|
|
+ "c.condeferrable, c.condeferred, "
|
|
+ "c.tableoid AS contableoid, "
|
|
+ "c.oid AS conoid, "
|
|
+ "null AS condef, "
|
|
+ "(SELECT spcname FROM pg_catalog.pg_tablespace s WHERE s.oid = t.reltablespace) AS tablespace, "
|
|
+ "array_to_string(t.reloptions, ', ') AS options "
|
|
+ "FROM pg_catalog.pg_index i "
|
|
+ "JOIN pg_catalog.pg_class t ON (t.oid = i.indexrelid) "
|
|
+ "LEFT JOIN pg_catalog.pg_depend d "
|
|
+ "ON (d.classid = t.tableoid "
|
|
+ "AND d.objid = t.oid "
|
|
+ "AND d.deptype = 'i') "
|
|
+ "LEFT JOIN pg_catalog.pg_constraint c "
|
|
+ "ON (d.refclassid = c.tableoid "
|
|
+ "AND d.refobjid = c.oid) "
|
|
+ "WHERE i.indrelid = '%u'::pg_catalog.oid "
|
|
+ "AND i.indisvalid "
|
|
+ "ORDER BY indexname",
|
|
+ tbinfo->dobj.catId.oid);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 80000)
|
|
+ {
|
|
+ appendPQExpBuffer(query,
|
|
+ "SELECT t.tableoid, t.oid, "
|
|
+ "t.relname AS indexname, "
|
|
+ "pg_catalog.pg_get_indexdef(i.indexrelid) AS indexdef, "
|
|
+ "t.relnatts AS indnkeys, "
|
|
+ "i.indkey, i.indisclustered, "
|
|
+ "false AS indisreplident, t.relpages, "
|
|
+ "c.contype, c.conname, "
|
|
+ "c.condeferrable, c.condeferred, "
|
|
+ "c.tableoid AS contableoid, "
|
|
+ "c.oid AS conoid, "
|
|
+ "null AS condef, "
|
|
+ "(SELECT spcname FROM pg_catalog.pg_tablespace s WHERE s.oid = t.reltablespace) AS tablespace, "
|
|
+ "null AS options "
|
|
+ "FROM pg_catalog.pg_index i "
|
|
+ "JOIN pg_catalog.pg_class t ON (t.oid = i.indexrelid) "
|
|
+ "LEFT JOIN pg_catalog.pg_depend d "
|
|
+ "ON (d.classid = t.tableoid "
|
|
+ "AND d.objid = t.oid "
|
|
+ "AND d.deptype = 'i') "
|
|
+ "LEFT JOIN pg_catalog.pg_constraint c "
|
|
+ "ON (d.refclassid = c.tableoid "
|
|
+ "AND d.refobjid = c.oid) "
|
|
+ "WHERE i.indrelid = '%u'::pg_catalog.oid "
|
|
+ "ORDER BY indexname",
|
|
+ tbinfo->dobj.catId.oid);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 70300)
|
|
+ {
|
|
+ appendPQExpBuffer(query,
|
|
+ "SELECT t.tableoid, t.oid, "
|
|
+ "t.relname AS indexname, "
|
|
+ "pg_catalog.pg_get_indexdef(i.indexrelid) AS indexdef, "
|
|
+ "t.relnatts AS indnkeys, "
|
|
+ "i.indkey, i.indisclustered, "
|
|
+ "false AS indisreplident, t.relpages, "
|
|
+ "c.contype, c.conname, "
|
|
+ "c.condeferrable, c.condeferred, "
|
|
+ "c.tableoid AS contableoid, "
|
|
+ "c.oid AS conoid, "
|
|
+ "null AS condef, "
|
|
+ "NULL AS tablespace, "
|
|
+ "null AS options "
|
|
+ "FROM pg_catalog.pg_index i "
|
|
+ "JOIN pg_catalog.pg_class t ON (t.oid = i.indexrelid) "
|
|
+ "LEFT JOIN pg_catalog.pg_depend d "
|
|
+ "ON (d.classid = t.tableoid "
|
|
+ "AND d.objid = t.oid "
|
|
+ "AND d.deptype = 'i') "
|
|
+ "LEFT JOIN pg_catalog.pg_constraint c "
|
|
+ "ON (d.refclassid = c.tableoid "
|
|
+ "AND d.refobjid = c.oid) "
|
|
+ "WHERE i.indrelid = '%u'::pg_catalog.oid "
|
|
+ "ORDER BY indexname",
|
|
+ tbinfo->dobj.catId.oid);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 70100)
|
|
+ {
|
|
+ appendPQExpBuffer(query,
|
|
+ "SELECT t.tableoid, t.oid, "
|
|
+ "t.relname AS indexname, "
|
|
+ "pg_get_indexdef(i.indexrelid) AS indexdef, "
|
|
+ "t.relnatts AS indnkeys, "
|
|
+ "i.indkey, false AS indisclustered, "
|
|
+ "false AS indisreplident, t.relpages, "
|
|
+ "CASE WHEN i.indisprimary THEN 'p'::char "
|
|
+ "ELSE '0'::char END AS contype, "
|
|
+ "t.relname AS conname, "
|
|
+ "false AS condeferrable, "
|
|
+ "false AS condeferred, "
|
|
+ "0::oid AS contableoid, "
|
|
+ "t.oid AS conoid, "
|
|
+ "null AS condef, "
|
|
+ "NULL AS tablespace, "
|
|
+ "null AS options "
|
|
+ "FROM pg_index i, pg_class t "
|
|
+ "WHERE t.oid = i.indexrelid "
|
|
+ "AND i.indrelid = '%u'::oid "
|
|
+ "ORDER BY indexname",
|
|
+ tbinfo->dobj.catId.oid);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ appendPQExpBuffer(query,
|
|
+ "SELECT "
|
|
+ "(SELECT oid FROM pg_class WHERE relname = 'pg_class') AS tableoid, "
|
|
+ "t.oid, "
|
|
+ "t.relname AS indexname, "
|
|
+ "pg_get_indexdef(i.indexrelid) AS indexdef, "
|
|
+ "t.relnatts AS indnkeys, "
|
|
+ "i.indkey, false AS indisclustered, "
|
|
+ "false AS indisreplident, t.relpages, "
|
|
+ "CASE WHEN i.indisprimary THEN 'p'::char "
|
|
+ "ELSE '0'::char END AS contype, "
|
|
+ "t.relname AS conname, "
|
|
+ "false AS condeferrable, "
|
|
+ "false AS condeferred, "
|
|
+ "0::oid AS contableoid, "
|
|
+ "t.oid AS conoid, "
|
|
+ "null AS condef, "
|
|
+ "NULL AS tablespace, "
|
|
+ "null AS options "
|
|
+ "FROM pg_index i, pg_class t "
|
|
+ "WHERE t.oid = i.indexrelid "
|
|
+ "AND i.indrelid = '%u'::oid "
|
|
+ "ORDER BY indexname",
|
|
+ tbinfo->dobj.catId.oid);
|
|
+ }
|
|
+
|
|
+ res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
|
|
+
|
|
+ ntups = PQntuples(res);
|
|
+
|
|
+ i_tableoid = PQfnumber(res, "tableoid");
|
|
+ i_oid = PQfnumber(res, "oid");
|
|
+ i_indexname = PQfnumber(res, "indexname");
|
|
+ i_indexdef = PQfnumber(res, "indexdef");
|
|
+ i_indnkeys = PQfnumber(res, "indnkeys");
|
|
+ i_indkey = PQfnumber(res, "indkey");
|
|
+ i_indisclustered = PQfnumber(res, "indisclustered");
|
|
+ i_indisreplident = PQfnumber(res, "indisreplident");
|
|
+ i_relpages = PQfnumber(res, "relpages");
|
|
+ i_contype = PQfnumber(res, "contype");
|
|
+ i_conname = PQfnumber(res, "conname");
|
|
+ i_condeferrable = PQfnumber(res, "condeferrable");
|
|
+ i_condeferred = PQfnumber(res, "condeferred");
|
|
+ i_contableoid = PQfnumber(res, "contableoid");
|
|
+ i_conoid = PQfnumber(res, "conoid");
|
|
+ i_condef = PQfnumber(res, "condef");
|
|
+ i_tablespace = PQfnumber(res, "tablespace");
|
|
+ i_options = PQfnumber(res, "options");
|
|
+
|
|
+ indxinfo = (IndxInfo *) pg_malloc(ntups * sizeof(IndxInfo));
|
|
+ constrinfo = (ConstraintInfo *) pg_malloc(ntups * sizeof(ConstraintInfo));
|
|
+
|
|
+ for (j = 0; j < ntups; j++)
|
|
+ {
|
|
+ char contype;
|
|
+
|
|
+ indxinfo[j].dobj.objType = DO_INDEX;
|
|
+ indxinfo[j].dobj.catId.tableoid = atooid(PQgetvalue(res, j, i_tableoid));
|
|
+ indxinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
|
|
+ AssignDumpId(&indxinfo[j].dobj);
|
|
+ indxinfo[j].dobj.name = pg_strdup(PQgetvalue(res, j, i_indexname));
|
|
+ indxinfo[j].dobj.namespace = tbinfo->dobj.namespace;
|
|
+ indxinfo[j].indextable = tbinfo;
|
|
+ indxinfo[j].indexdef = pg_strdup(PQgetvalue(res, j, i_indexdef));
|
|
+ indxinfo[j].indnkeys = atoi(PQgetvalue(res, j, i_indnkeys));
|
|
+ indxinfo[j].tablespace = pg_strdup(PQgetvalue(res, j, i_tablespace));
|
|
+ indxinfo[j].options = pg_strdup(PQgetvalue(res, j, i_options));
|
|
+
|
|
+ /*
|
|
+ * In pre-7.4 releases, indkeys may contain more entries than
|
|
+ * indnkeys says (since indnkeys will be 1 for a functional
|
|
+ * index). We don't actually care about this case since we don't
|
|
+ * examine indkeys except for indexes associated with PRIMARY and
|
|
+ * UNIQUE constraints, which are never functional indexes. But we
|
|
+ * have to allocate enough space to keep parseOidArray from
|
|
+ * complaining.
|
|
+ */
|
|
+ indxinfo[j].indkeys = (Oid *) pg_malloc(INDEX_MAX_KEYS * sizeof(Oid));
|
|
+ parseOidArray(PQgetvalue(res, j, i_indkey),
|
|
+ indxinfo[j].indkeys, INDEX_MAX_KEYS);
|
|
+ indxinfo[j].indisclustered = (PQgetvalue(res, j, i_indisclustered)[0] == 't');
|
|
+ indxinfo[j].indisreplident = (PQgetvalue(res, j, i_indisreplident)[0] == 't');
|
|
+ indxinfo[j].relpages = atoi(PQgetvalue(res, j, i_relpages));
|
|
+ contype = *(PQgetvalue(res, j, i_contype));
|
|
+
|
|
+ if (contype == 'p' || contype == 'u' || contype == 'x')
|
|
+ {
|
|
+ /*
|
|
+ * If we found a constraint matching the index, create an
|
|
+ * entry for it.
|
|
+ *
|
|
+ * In a pre-7.3 database, we take this path iff the index was
|
|
+ * marked indisprimary.
|
|
+ */
|
|
+ constrinfo[j].dobj.objType = DO_CONSTRAINT;
|
|
+ constrinfo[j].dobj.catId.tableoid = atooid(PQgetvalue(res, j, i_contableoid));
|
|
+ constrinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_conoid));
|
|
+ AssignDumpId(&constrinfo[j].dobj);
|
|
+ constrinfo[j].dobj.name = pg_strdup(PQgetvalue(res, j, i_conname));
|
|
+ constrinfo[j].dobj.namespace = tbinfo->dobj.namespace;
|
|
+ constrinfo[j].contable = tbinfo;
|
|
+ constrinfo[j].condomain = NULL;
|
|
+ constrinfo[j].contype = contype;
|
|
+ if (contype == 'x')
|
|
+ constrinfo[j].condef = pg_strdup(PQgetvalue(res, j, i_condef));
|
|
+ else
|
|
+ constrinfo[j].condef = NULL;
|
|
+ constrinfo[j].confrelid = InvalidOid;
|
|
+ constrinfo[j].conindex = indxinfo[j].dobj.dumpId;
|
|
+ constrinfo[j].condeferrable = *(PQgetvalue(res, j, i_condeferrable)) == 't';
|
|
+ constrinfo[j].condeferred = *(PQgetvalue(res, j, i_condeferred)) == 't';
|
|
+ constrinfo[j].conislocal = true;
|
|
+ constrinfo[j].separate = true;
|
|
+
|
|
+ indxinfo[j].indexconstraint = constrinfo[j].dobj.dumpId;
|
|
+
|
|
+ /* If pre-7.3 DB, better make sure table comes first */
|
|
+ addObjectDependency(&constrinfo[j].dobj,
|
|
+ tbinfo->dobj.dumpId);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ /* Plain secondary index */
|
|
+ indxinfo[j].indexconstraint = 0;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ PQclear(res);
|
|
+ }
|
|
+
|
|
+ destroyPQExpBuffer(query);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * getConstraints
|
|
+ *
|
|
+ * Get info about constraints on dumpable tables.
|
|
+ *
|
|
+ * Currently handles foreign keys only.
|
|
+ * Unique and primary key constraints are handled with indexes,
|
|
+ * while check constraints are processed in getTableAttrs().
|
|
+ */
|
|
+void
|
|
+getConstraints(Archive *fout, TableInfo tblinfo[], int numTables)
|
|
+{
|
|
+ int i,
|
|
+ j;
|
|
+ ConstraintInfo *constrinfo;
|
|
+ PQExpBuffer query;
|
|
+ PGresult *res;
|
|
+ int i_contableoid,
|
|
+ i_conoid,
|
|
+ i_conname,
|
|
+ i_confrelid,
|
|
+ i_condef;
|
|
+ int ntups;
|
|
+
|
|
+ /* pg_constraint was created in 7.3, so nothing to do if older */
|
|
+ if (fout->remoteVersion < 70300)
|
|
+ return;
|
|
+
|
|
+ query = createPQExpBuffer();
|
|
+
|
|
+ for (i = 0; i < numTables; i++)
|
|
+ {
|
|
+ TableInfo *tbinfo = &tblinfo[i];
|
|
+
|
|
+ if (!tbinfo->hastriggers || !tbinfo->dobj.dump)
|
|
+ continue;
|
|
+
|
|
+ if (g_verbose)
|
|
+ write_msg(NULL, "reading foreign key constraints for table \"%s\"\n",
|
|
+ tbinfo->dobj.name);
|
|
+
|
|
+ /*
|
|
+ * select table schema to ensure constraint expr is qualified if
|
|
+ * needed
|
|
+ */
|
|
+ selectSourceSchema(fout, tbinfo->dobj.namespace->dobj.name);
|
|
+
|
|
+ resetPQExpBuffer(query);
|
|
+ appendPQExpBuffer(query,
|
|
+ "SELECT tableoid, oid, conname, confrelid, "
|
|
+ "pg_catalog.pg_get_constraintdef(oid) AS condef "
|
|
+ "FROM pg_catalog.pg_constraint "
|
|
+ "WHERE conrelid = '%u'::pg_catalog.oid "
|
|
+ "AND contype = 'f'",
|
|
+ tbinfo->dobj.catId.oid);
|
|
+ res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
|
|
+
|
|
+ ntups = PQntuples(res);
|
|
+
|
|
+ i_contableoid = PQfnumber(res, "tableoid");
|
|
+ i_conoid = PQfnumber(res, "oid");
|
|
+ i_conname = PQfnumber(res, "conname");
|
|
+ i_confrelid = PQfnumber(res, "confrelid");
|
|
+ i_condef = PQfnumber(res, "condef");
|
|
+
|
|
+ constrinfo = (ConstraintInfo *) pg_malloc(ntups * sizeof(ConstraintInfo));
|
|
+
|
|
+ for (j = 0; j < ntups; j++)
|
|
+ {
|
|
+ constrinfo[j].dobj.objType = DO_FK_CONSTRAINT;
|
|
+ constrinfo[j].dobj.catId.tableoid = atooid(PQgetvalue(res, j, i_contableoid));
|
|
+ constrinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_conoid));
|
|
+ AssignDumpId(&constrinfo[j].dobj);
|
|
+ constrinfo[j].dobj.name = pg_strdup(PQgetvalue(res, j, i_conname));
|
|
+ constrinfo[j].dobj.namespace = tbinfo->dobj.namespace;
|
|
+ constrinfo[j].contable = tbinfo;
|
|
+ constrinfo[j].condomain = NULL;
|
|
+ constrinfo[j].contype = 'f';
|
|
+ constrinfo[j].condef = pg_strdup(PQgetvalue(res, j, i_condef));
|
|
+ constrinfo[j].confrelid = atooid(PQgetvalue(res, j, i_confrelid));
|
|
+ constrinfo[j].conindex = 0;
|
|
+ constrinfo[j].condeferrable = false;
|
|
+ constrinfo[j].condeferred = false;
|
|
+ constrinfo[j].conislocal = true;
|
|
+ constrinfo[j].separate = true;
|
|
+ }
|
|
+
|
|
+ PQclear(res);
|
|
+ }
|
|
+
|
|
+ destroyPQExpBuffer(query);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * getDomainConstraints
|
|
+ *
|
|
+ * Get info about constraints on a domain.
|
|
+ */
|
|
+static void
|
|
+getDomainConstraints(Archive *fout, TypeInfo *tyinfo)
|
|
+{
|
|
+ int i;
|
|
+ ConstraintInfo *constrinfo;
|
|
+ PQExpBuffer query;
|
|
+ PGresult *res;
|
|
+ int i_tableoid,
|
|
+ i_oid,
|
|
+ i_conname,
|
|
+ i_consrc;
|
|
+ int ntups;
|
|
+
|
|
+ /* pg_constraint was created in 7.3, so nothing to do if older */
|
|
+ if (fout->remoteVersion < 70300)
|
|
+ return;
|
|
+
|
|
+ /*
|
|
+ * select appropriate schema to ensure names in constraint are properly
|
|
+ * qualified
|
|
+ */
|
|
+ selectSourceSchema(fout, tyinfo->dobj.namespace->dobj.name);
|
|
+
|
|
+ query = createPQExpBuffer();
|
|
+
|
|
+ if (fout->remoteVersion >= 90100)
|
|
+ appendPQExpBuffer(query, "SELECT tableoid, oid, conname, "
|
|
+ "pg_catalog.pg_get_constraintdef(oid) AS consrc, "
|
|
+ "convalidated "
|
|
+ "FROM pg_catalog.pg_constraint "
|
|
+ "WHERE contypid = '%u'::pg_catalog.oid "
|
|
+ "ORDER BY conname",
|
|
+ tyinfo->dobj.catId.oid);
|
|
+
|
|
+ else if (fout->remoteVersion >= 70400)
|
|
+ appendPQExpBuffer(query, "SELECT tableoid, oid, conname, "
|
|
+ "pg_catalog.pg_get_constraintdef(oid) AS consrc, "
|
|
+ "true as convalidated "
|
|
+ "FROM pg_catalog.pg_constraint "
|
|
+ "WHERE contypid = '%u'::pg_catalog.oid "
|
|
+ "ORDER BY conname",
|
|
+ tyinfo->dobj.catId.oid);
|
|
+ else
|
|
+ appendPQExpBuffer(query, "SELECT tableoid, oid, conname, "
|
|
+ "'CHECK (' || consrc || ')' AS consrc, "
|
|
+ "true as convalidated "
|
|
+ "FROM pg_catalog.pg_constraint "
|
|
+ "WHERE contypid = '%u'::pg_catalog.oid "
|
|
+ "ORDER BY conname",
|
|
+ tyinfo->dobj.catId.oid);
|
|
+
|
|
+ res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
|
|
+
|
|
+ ntups = PQntuples(res);
|
|
+
|
|
+ i_tableoid = PQfnumber(res, "tableoid");
|
|
+ i_oid = PQfnumber(res, "oid");
|
|
+ i_conname = PQfnumber(res, "conname");
|
|
+ i_consrc = PQfnumber(res, "consrc");
|
|
+
|
|
+ constrinfo = (ConstraintInfo *) pg_malloc(ntups * sizeof(ConstraintInfo));
|
|
+
|
|
+ tyinfo->nDomChecks = ntups;
|
|
+ tyinfo->domChecks = constrinfo;
|
|
+
|
|
+ for (i = 0; i < ntups; i++)
|
|
+ {
|
|
+ bool validated = PQgetvalue(res, i, 4)[0] == 't';
|
|
+
|
|
+ constrinfo[i].dobj.objType = DO_CONSTRAINT;
|
|
+ constrinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
|
|
+ constrinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
|
|
+ AssignDumpId(&constrinfo[i].dobj);
|
|
+ constrinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_conname));
|
|
+ constrinfo[i].dobj.namespace = tyinfo->dobj.namespace;
|
|
+ constrinfo[i].contable = NULL;
|
|
+ constrinfo[i].condomain = tyinfo;
|
|
+ constrinfo[i].contype = 'c';
|
|
+ constrinfo[i].condef = pg_strdup(PQgetvalue(res, i, i_consrc));
|
|
+ constrinfo[i].confrelid = InvalidOid;
|
|
+ constrinfo[i].conindex = 0;
|
|
+ constrinfo[i].condeferrable = false;
|
|
+ constrinfo[i].condeferred = false;
|
|
+ constrinfo[i].conislocal = true;
|
|
+
|
|
+ constrinfo[i].separate = !validated;
|
|
+
|
|
+ /*
|
|
+ * Make the domain depend on the constraint, ensuring it won't be
|
|
+ * output till any constraint dependencies are OK. If the constraint
|
|
+ * has not been validated, it's going to be dumped after the domain
|
|
+ * anyway, so this doesn't matter.
|
|
+ */
|
|
+ if (validated)
|
|
+ addObjectDependency(&tyinfo->dobj,
|
|
+ constrinfo[i].dobj.dumpId);
|
|
+ }
|
|
+
|
|
+ PQclear(res);
|
|
+
|
|
+ destroyPQExpBuffer(query);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * getRules
|
|
+ * get basic information about every rule in the system
|
|
+ *
|
|
+ * numRules is set to the number of rules read in
|
|
+ */
|
|
+RuleInfo *
|
|
+getRules(Archive *fout, int *numRules)
|
|
+{
|
|
+ PGresult *res;
|
|
+ int ntups;
|
|
+ int i;
|
|
+ PQExpBuffer query = createPQExpBuffer();
|
|
+ RuleInfo *ruleinfo;
|
|
+ int i_tableoid;
|
|
+ int i_oid;
|
|
+ int i_rulename;
|
|
+ int i_ruletable;
|
|
+ int i_ev_type;
|
|
+ int i_is_instead;
|
|
+ int i_ev_enabled;
|
|
+
|
|
+ /* Make sure we are in proper schema */
|
|
+ selectSourceSchema(fout, "pg_catalog");
|
|
+
|
|
+ if (fout->remoteVersion >= 80300)
|
|
+ {
|
|
+ appendPQExpBufferStr(query, "SELECT "
|
|
+ "tableoid, oid, rulename, "
|
|
+ "ev_class AS ruletable, ev_type, is_instead, "
|
|
+ "ev_enabled "
|
|
+ "FROM pg_rewrite "
|
|
+ "ORDER BY oid");
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 70100)
|
|
+ {
|
|
+ appendPQExpBufferStr(query, "SELECT "
|
|
+ "tableoid, oid, rulename, "
|
|
+ "ev_class AS ruletable, ev_type, is_instead, "
|
|
+ "'O'::char AS ev_enabled "
|
|
+ "FROM pg_rewrite "
|
|
+ "ORDER BY oid");
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ appendPQExpBufferStr(query, "SELECT "
|
|
+ "(SELECT oid FROM pg_class WHERE relname = 'pg_rewrite') AS tableoid, "
|
|
+ "oid, rulename, "
|
|
+ "ev_class AS ruletable, ev_type, is_instead, "
|
|
+ "'O'::char AS ev_enabled "
|
|
+ "FROM pg_rewrite "
|
|
+ "ORDER BY oid");
|
|
+ }
|
|
+
|
|
+ res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
|
|
+
|
|
+ ntups = PQntuples(res);
|
|
+
|
|
+ *numRules = ntups;
|
|
+
|
|
+ ruleinfo = (RuleInfo *) pg_malloc(ntups * sizeof(RuleInfo));
|
|
+
|
|
+ i_tableoid = PQfnumber(res, "tableoid");
|
|
+ i_oid = PQfnumber(res, "oid");
|
|
+ i_rulename = PQfnumber(res, "rulename");
|
|
+ i_ruletable = PQfnumber(res, "ruletable");
|
|
+ i_ev_type = PQfnumber(res, "ev_type");
|
|
+ i_is_instead = PQfnumber(res, "is_instead");
|
|
+ i_ev_enabled = PQfnumber(res, "ev_enabled");
|
|
+
|
|
+ for (i = 0; i < ntups; i++)
|
|
+ {
|
|
+ Oid ruletableoid;
|
|
+
|
|
+ ruleinfo[i].dobj.objType = DO_RULE;
|
|
+ ruleinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
|
|
+ ruleinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
|
|
+ AssignDumpId(&ruleinfo[i].dobj);
|
|
+ ruleinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_rulename));
|
|
+ ruletableoid = atooid(PQgetvalue(res, i, i_ruletable));
|
|
+ ruleinfo[i].ruletable = findTableByOid(ruletableoid);
|
|
+ if (ruleinfo[i].ruletable == NULL)
|
|
+ exit_horribly(NULL, "failed sanity check, parent table OID %u of pg_rewrite entry OID %u not found\n",
|
|
+ ruletableoid, ruleinfo[i].dobj.catId.oid);
|
|
+ ruleinfo[i].dobj.namespace = ruleinfo[i].ruletable->dobj.namespace;
|
|
+ ruleinfo[i].dobj.dump = ruleinfo[i].ruletable->dobj.dump;
|
|
+ ruleinfo[i].ev_type = *(PQgetvalue(res, i, i_ev_type));
|
|
+ ruleinfo[i].is_instead = *(PQgetvalue(res, i, i_is_instead)) == 't';
|
|
+ ruleinfo[i].ev_enabled = *(PQgetvalue(res, i, i_ev_enabled));
|
|
+ if (ruleinfo[i].ruletable)
|
|
+ {
|
|
+ /*
|
|
+ * If the table is a view or materialized view, force its ON
|
|
+ * SELECT rule to be sorted before the view itself --- this
|
|
+ * ensures that any dependencies for the rule affect the table's
|
|
+ * positioning. Other rules are forced to appear after their
|
|
+ * table.
|
|
+ */
|
|
+ if ((ruleinfo[i].ruletable->relkind == RELKIND_VIEW ||
|
|
+ ruleinfo[i].ruletable->relkind == RELKIND_MATVIEW) &&
|
|
+ ruleinfo[i].ev_type == '1' && ruleinfo[i].is_instead)
|
|
+ {
|
|
+ addObjectDependency(&ruleinfo[i].ruletable->dobj,
|
|
+ ruleinfo[i].dobj.dumpId);
|
|
+ /* We'll merge the rule into CREATE VIEW, if possible */
|
|
+ ruleinfo[i].separate = false;
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ addObjectDependency(&ruleinfo[i].dobj,
|
|
+ ruleinfo[i].ruletable->dobj.dumpId);
|
|
+ ruleinfo[i].separate = true;
|
|
+ }
|
|
+ }
|
|
+ else
|
|
+ ruleinfo[i].separate = true;
|
|
+
|
|
+ /*
|
|
+ * If we're forced to break a dependency loop by dumping a view as a
|
|
+ * table and separate _RETURN rule, we'll move the view's reloptions
|
|
+ * to the rule. (This is necessary because tables and views have
|
|
+ * different valid reloptions, so we can't apply the options until the
|
|
+ * backend knows it's a view.) Otherwise the rule's reloptions stay
|
|
+ * NULL.
|
|
+ */
|
|
+ ruleinfo[i].reloptions = NULL;
|
|
+ }
|
|
+
|
|
+ PQclear(res);
|
|
+
|
|
+ destroyPQExpBuffer(query);
|
|
+
|
|
+ return ruleinfo;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * getTriggers
|
|
+ * get information about every trigger on a dumpable table
|
|
+ *
|
|
+ * Note: trigger data is not returned directly to the caller, but it
|
|
+ * does get entered into the DumpableObject tables.
|
|
+ */
|
|
+void
|
|
+getTriggers(Archive *fout, TableInfo tblinfo[], int numTables)
|
|
+{
|
|
+ int i,
|
|
+ j;
|
|
+ PQExpBuffer query = createPQExpBuffer();
|
|
+ PGresult *res;
|
|
+ TriggerInfo *tginfo;
|
|
+ int i_tableoid,
|
|
+ i_oid,
|
|
+ i_tgname,
|
|
+ i_tgfname,
|
|
+ i_tgtype,
|
|
+ i_tgnargs,
|
|
+ i_tgargs,
|
|
+ i_tgisconstraint,
|
|
+ i_tgconstrname,
|
|
+ i_tgconstrrelid,
|
|
+ i_tgconstrrelname,
|
|
+ i_tgenabled,
|
|
+ i_tgdeferrable,
|
|
+ i_tginitdeferred,
|
|
+ i_tgdef;
|
|
+ int ntups;
|
|
+
|
|
+ for (i = 0; i < numTables; i++)
|
|
+ {
|
|
+ TableInfo *tbinfo = &tblinfo[i];
|
|
+
|
|
+ if (!tbinfo->hastriggers || !tbinfo->dobj.dump)
|
|
+ continue;
|
|
+
|
|
+ if (g_verbose)
|
|
+ write_msg(NULL, "reading triggers for table \"%s\"\n",
|
|
+ tbinfo->dobj.name);
|
|
+
|
|
+ /*
|
|
+ * select table schema to ensure regproc name is qualified if needed
|
|
+ */
|
|
+ selectSourceSchema(fout, tbinfo->dobj.namespace->dobj.name);
|
|
+
|
|
+ resetPQExpBuffer(query);
|
|
+ if (fout->remoteVersion >= 90000)
|
|
+ {
|
|
+ /*
|
|
+ * NB: think not to use pretty=true in pg_get_triggerdef. It
|
|
+ * could result in non-forward-compatible dumps of WHEN clauses
|
|
+ * due to under-parenthesization.
|
|
+ */
|
|
+ appendPQExpBuffer(query,
|
|
+ "SELECT tgname, "
|
|
+ "tgfoid::pg_catalog.regproc AS tgfname, "
|
|
+ "pg_catalog.pg_get_triggerdef(oid, false) AS tgdef, "
|
|
+ "tgenabled, tableoid, oid "
|
|
+ "FROM pg_catalog.pg_trigger t "
|
|
+ "WHERE tgrelid = '%u'::pg_catalog.oid "
|
|
+ "AND NOT tgisinternal",
|
|
+ tbinfo->dobj.catId.oid);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 80300)
|
|
+ {
|
|
+ /*
|
|
+ * We ignore triggers that are tied to a foreign-key constraint
|
|
+ */
|
|
+ appendPQExpBuffer(query,
|
|
+ "SELECT tgname, "
|
|
+ "tgfoid::pg_catalog.regproc AS tgfname, "
|
|
+ "tgtype, tgnargs, tgargs, tgenabled, "
|
|
+ "tgisconstraint, tgconstrname, tgdeferrable, "
|
|
+ "tgconstrrelid, tginitdeferred, tableoid, oid, "
|
|
+ "tgconstrrelid::pg_catalog.regclass AS tgconstrrelname "
|
|
+ "FROM pg_catalog.pg_trigger t "
|
|
+ "WHERE tgrelid = '%u'::pg_catalog.oid "
|
|
+ "AND tgconstraint = 0",
|
|
+ tbinfo->dobj.catId.oid);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 70300)
|
|
+ {
|
|
+ /*
|
|
+ * We ignore triggers that are tied to a foreign-key constraint,
|
|
+ * but in these versions we have to grovel through pg_constraint
|
|
+ * to find out
|
|
+ */
|
|
+ appendPQExpBuffer(query,
|
|
+ "SELECT tgname, "
|
|
+ "tgfoid::pg_catalog.regproc AS tgfname, "
|
|
+ "tgtype, tgnargs, tgargs, tgenabled, "
|
|
+ "tgisconstraint, tgconstrname, tgdeferrable, "
|
|
+ "tgconstrrelid, tginitdeferred, tableoid, oid, "
|
|
+ "tgconstrrelid::pg_catalog.regclass AS tgconstrrelname "
|
|
+ "FROM pg_catalog.pg_trigger t "
|
|
+ "WHERE tgrelid = '%u'::pg_catalog.oid "
|
|
+ "AND (NOT tgisconstraint "
|
|
+ " OR NOT EXISTS"
|
|
+ " (SELECT 1 FROM pg_catalog.pg_depend d "
|
|
+ " JOIN pg_catalog.pg_constraint c ON (d.refclassid = c.tableoid AND d.refobjid = c.oid) "
|
|
+ " WHERE d.classid = t.tableoid AND d.objid = t.oid AND d.deptype = 'i' AND c.contype = 'f'))",
|
|
+ tbinfo->dobj.catId.oid);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 70100)
|
|
+ {
|
|
+ appendPQExpBuffer(query,
|
|
+ "SELECT tgname, tgfoid::regproc AS tgfname, "
|
|
+ "tgtype, tgnargs, tgargs, tgenabled, "
|
|
+ "tgisconstraint, tgconstrname, tgdeferrable, "
|
|
+ "tgconstrrelid, tginitdeferred, tableoid, oid, "
|
|
+ "(SELECT relname FROM pg_class WHERE oid = tgconstrrelid) "
|
|
+ " AS tgconstrrelname "
|
|
+ "FROM pg_trigger "
|
|
+ "WHERE tgrelid = '%u'::oid",
|
|
+ tbinfo->dobj.catId.oid);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ appendPQExpBuffer(query,
|
|
+ "SELECT tgname, tgfoid::regproc AS tgfname, "
|
|
+ "tgtype, tgnargs, tgargs, tgenabled, "
|
|
+ "tgisconstraint, tgconstrname, tgdeferrable, "
|
|
+ "tgconstrrelid, tginitdeferred, "
|
|
+ "(SELECT oid FROM pg_class WHERE relname = 'pg_trigger') AS tableoid, "
|
|
+ "oid, "
|
|
+ "(SELECT relname FROM pg_class WHERE oid = tgconstrrelid) "
|
|
+ " AS tgconstrrelname "
|
|
+ "FROM pg_trigger "
|
|
+ "WHERE tgrelid = '%u'::oid",
|
|
+ tbinfo->dobj.catId.oid);
|
|
+ }
|
|
+ res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
|
|
+
|
|
+ ntups = PQntuples(res);
|
|
+
|
|
+ i_tableoid = PQfnumber(res, "tableoid");
|
|
+ i_oid = PQfnumber(res, "oid");
|
|
+ i_tgname = PQfnumber(res, "tgname");
|
|
+ i_tgfname = PQfnumber(res, "tgfname");
|
|
+ i_tgtype = PQfnumber(res, "tgtype");
|
|
+ i_tgnargs = PQfnumber(res, "tgnargs");
|
|
+ i_tgargs = PQfnumber(res, "tgargs");
|
|
+ i_tgisconstraint = PQfnumber(res, "tgisconstraint");
|
|
+ i_tgconstrname = PQfnumber(res, "tgconstrname");
|
|
+ i_tgconstrrelid = PQfnumber(res, "tgconstrrelid");
|
|
+ i_tgconstrrelname = PQfnumber(res, "tgconstrrelname");
|
|
+ i_tgenabled = PQfnumber(res, "tgenabled");
|
|
+ i_tgdeferrable = PQfnumber(res, "tgdeferrable");
|
|
+ i_tginitdeferred = PQfnumber(res, "tginitdeferred");
|
|
+ i_tgdef = PQfnumber(res, "tgdef");
|
|
+
|
|
+ tginfo = (TriggerInfo *) pg_malloc(ntups * sizeof(TriggerInfo));
|
|
+
|
|
+ for (j = 0; j < ntups; j++)
|
|
+ {
|
|
+ tginfo[j].dobj.objType = DO_TRIGGER;
|
|
+ tginfo[j].dobj.catId.tableoid = atooid(PQgetvalue(res, j, i_tableoid));
|
|
+ tginfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid));
|
|
+ AssignDumpId(&tginfo[j].dobj);
|
|
+ tginfo[j].dobj.name = pg_strdup(PQgetvalue(res, j, i_tgname));
|
|
+ tginfo[j].dobj.namespace = tbinfo->dobj.namespace;
|
|
+ tginfo[j].tgtable = tbinfo;
|
|
+ tginfo[j].tgenabled = *(PQgetvalue(res, j, i_tgenabled));
|
|
+ if (i_tgdef >= 0)
|
|
+ {
|
|
+ tginfo[j].tgdef = pg_strdup(PQgetvalue(res, j, i_tgdef));
|
|
+
|
|
+ /* remaining fields are not valid if we have tgdef */
|
|
+ tginfo[j].tgfname = NULL;
|
|
+ tginfo[j].tgtype = 0;
|
|
+ tginfo[j].tgnargs = 0;
|
|
+ tginfo[j].tgargs = NULL;
|
|
+ tginfo[j].tgisconstraint = false;
|
|
+ tginfo[j].tgdeferrable = false;
|
|
+ tginfo[j].tginitdeferred = false;
|
|
+ tginfo[j].tgconstrname = NULL;
|
|
+ tginfo[j].tgconstrrelid = InvalidOid;
|
|
+ tginfo[j].tgconstrrelname = NULL;
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ tginfo[j].tgdef = NULL;
|
|
+
|
|
+ tginfo[j].tgfname = pg_strdup(PQgetvalue(res, j, i_tgfname));
|
|
+ tginfo[j].tgtype = atoi(PQgetvalue(res, j, i_tgtype));
|
|
+ tginfo[j].tgnargs = atoi(PQgetvalue(res, j, i_tgnargs));
|
|
+ tginfo[j].tgargs = pg_strdup(PQgetvalue(res, j, i_tgargs));
|
|
+ tginfo[j].tgisconstraint = *(PQgetvalue(res, j, i_tgisconstraint)) == 't';
|
|
+ tginfo[j].tgdeferrable = *(PQgetvalue(res, j, i_tgdeferrable)) == 't';
|
|
+ tginfo[j].tginitdeferred = *(PQgetvalue(res, j, i_tginitdeferred)) == 't';
|
|
+
|
|
+ if (tginfo[j].tgisconstraint)
|
|
+ {
|
|
+ tginfo[j].tgconstrname = pg_strdup(PQgetvalue(res, j, i_tgconstrname));
|
|
+ tginfo[j].tgconstrrelid = atooid(PQgetvalue(res, j, i_tgconstrrelid));
|
|
+ if (OidIsValid(tginfo[j].tgconstrrelid))
|
|
+ {
|
|
+ if (PQgetisnull(res, j, i_tgconstrrelname))
|
|
+ exit_horribly(NULL, "query produced null referenced table name for foreign key trigger \"%s\" on table \"%s\" (OID of table: %u)\n",
|
|
+ tginfo[j].dobj.name,
|
|
+ tbinfo->dobj.name,
|
|
+ tginfo[j].tgconstrrelid);
|
|
+ tginfo[j].tgconstrrelname = pg_strdup(PQgetvalue(res, j, i_tgconstrrelname));
|
|
+ }
|
|
+ else
|
|
+ tginfo[j].tgconstrrelname = NULL;
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ tginfo[j].tgconstrname = NULL;
|
|
+ tginfo[j].tgconstrrelid = InvalidOid;
|
|
+ tginfo[j].tgconstrrelname = NULL;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ PQclear(res);
|
|
+ }
|
|
+
|
|
+ destroyPQExpBuffer(query);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * getEventTriggers
|
|
+ * get information about event triggers
|
|
+ */
|
|
+EventTriggerInfo *
|
|
+getEventTriggers(Archive *fout, int *numEventTriggers)
|
|
+{
|
|
+ int i;
|
|
+ PQExpBuffer query;
|
|
+ PGresult *res;
|
|
+ EventTriggerInfo *evtinfo;
|
|
+ int i_tableoid,
|
|
+ i_oid,
|
|
+ i_evtname,
|
|
+ i_evtevent,
|
|
+ i_evtowner,
|
|
+ i_evttags,
|
|
+ i_evtfname,
|
|
+ i_evtenabled;
|
|
+ int ntups;
|
|
+
|
|
+ /* Before 9.3, there are no event triggers */
|
|
+ if (fout->remoteVersion < 90300)
|
|
+ {
|
|
+ *numEventTriggers = 0;
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ query = createPQExpBuffer();
|
|
+
|
|
+ /* Make sure we are in proper schema */
|
|
+ selectSourceSchema(fout, "pg_catalog");
|
|
+
|
|
+ appendPQExpBuffer(query,
|
|
+ "SELECT e.tableoid, e.oid, evtname, evtenabled, "
|
|
+ "evtevent, (%s evtowner) AS evtowner, "
|
|
+ "array_to_string(array("
|
|
+ "select quote_literal(x) "
|
|
+ " from unnest(evttags) as t(x)), ', ') as evttags, "
|
|
+ "e.evtfoid::regproc as evtfname "
|
|
+ "FROM pg_event_trigger e "
|
|
+ "ORDER BY e.oid",
|
|
+ username_subquery);
|
|
+
|
|
+ res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
|
|
+
|
|
+ ntups = PQntuples(res);
|
|
+
|
|
+ *numEventTriggers = ntups;
|
|
+
|
|
+ evtinfo = (EventTriggerInfo *) pg_malloc(ntups * sizeof(EventTriggerInfo));
|
|
+
|
|
+ i_tableoid = PQfnumber(res, "tableoid");
|
|
+ i_oid = PQfnumber(res, "oid");
|
|
+ i_evtname = PQfnumber(res, "evtname");
|
|
+ i_evtevent = PQfnumber(res, "evtevent");
|
|
+ i_evtowner = PQfnumber(res, "evtowner");
|
|
+ i_evttags = PQfnumber(res, "evttags");
|
|
+ i_evtfname = PQfnumber(res, "evtfname");
|
|
+ i_evtenabled = PQfnumber(res, "evtenabled");
|
|
+
|
|
+ for (i = 0; i < ntups; i++)
|
|
+ {
|
|
+ evtinfo[i].dobj.objType = DO_EVENT_TRIGGER;
|
|
+ evtinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
|
|
+ evtinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
|
|
+ AssignDumpId(&evtinfo[i].dobj);
|
|
+ evtinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_evtname));
|
|
+ evtinfo[i].evtname = pg_strdup(PQgetvalue(res, i, i_evtname));
|
|
+ evtinfo[i].evtevent = pg_strdup(PQgetvalue(res, i, i_evtevent));
|
|
+ evtinfo[i].evtowner = pg_strdup(PQgetvalue(res, i, i_evtowner));
|
|
+ evtinfo[i].evttags = pg_strdup(PQgetvalue(res, i, i_evttags));
|
|
+ evtinfo[i].evtfname = pg_strdup(PQgetvalue(res, i, i_evtfname));
|
|
+ evtinfo[i].evtenabled = *(PQgetvalue(res, i, i_evtenabled));
|
|
+ }
|
|
+
|
|
+ PQclear(res);
|
|
+
|
|
+ destroyPQExpBuffer(query);
|
|
+
|
|
+ return evtinfo;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * getProcLangs
|
|
+ * get basic information about every procedural language in the system
|
|
+ *
|
|
+ * numProcLangs is set to the number of langs read in
|
|
+ *
|
|
+ * NB: this must run after getFuncs() because we assume we can do
|
|
+ * findFuncByOid().
|
|
+ */
|
|
+ProcLangInfo *
|
|
+getProcLangs(Archive *fout, int *numProcLangs)
|
|
+{
|
|
+ PGresult *res;
|
|
+ int ntups;
|
|
+ int i;
|
|
+ PQExpBuffer query = createPQExpBuffer();
|
|
+ ProcLangInfo *planginfo;
|
|
+ int i_tableoid;
|
|
+ int i_oid;
|
|
+ int i_lanname;
|
|
+ int i_lanpltrusted;
|
|
+ int i_lanplcallfoid;
|
|
+ int i_laninline;
|
|
+ int i_lanvalidator;
|
|
+ int i_lanacl;
|
|
+ int i_lanowner;
|
|
+
|
|
+ /* Make sure we are in proper schema */
|
|
+ selectSourceSchema(fout, "pg_catalog");
|
|
+
|
|
+ if (fout->remoteVersion >= 90000)
|
|
+ {
|
|
+ /* pg_language has a laninline column */
|
|
+ appendPQExpBuffer(query, "SELECT tableoid, oid, "
|
|
+ "lanname, lanpltrusted, lanplcallfoid, "
|
|
+ "laninline, lanvalidator, lanacl, "
|
|
+ "(%s lanowner) AS lanowner "
|
|
+ "FROM pg_language "
|
|
+ "WHERE lanispl "
|
|
+ "ORDER BY oid",
|
|
+ username_subquery);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 80300)
|
|
+ {
|
|
+ /* pg_language has a lanowner column */
|
|
+ appendPQExpBuffer(query, "SELECT tableoid, oid, "
|
|
+ "lanname, lanpltrusted, lanplcallfoid, "
|
|
+ "0 AS laninline, lanvalidator, lanacl, "
|
|
+ "(%s lanowner) AS lanowner "
|
|
+ "FROM pg_language "
|
|
+ "WHERE lanispl "
|
|
+ "ORDER BY oid",
|
|
+ username_subquery);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 80100)
|
|
+ {
|
|
+ /* Languages are owned by the bootstrap superuser, OID 10 */
|
|
+ appendPQExpBuffer(query, "SELECT tableoid, oid, "
|
|
+ "lanname, lanpltrusted, lanplcallfoid, "
|
|
+ "0 AS laninline, lanvalidator, lanacl, "
|
|
+ "(%s '10') AS lanowner "
|
|
+ "FROM pg_language "
|
|
+ "WHERE lanispl "
|
|
+ "ORDER BY oid",
|
|
+ username_subquery);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 70400)
|
|
+ {
|
|
+ /* Languages are owned by the bootstrap superuser, sysid 1 */
|
|
+ appendPQExpBuffer(query, "SELECT tableoid, oid, "
|
|
+ "lanname, lanpltrusted, lanplcallfoid, "
|
|
+ "0 AS laninline, lanvalidator, lanacl, "
|
|
+ "(%s '1') AS lanowner "
|
|
+ "FROM pg_language "
|
|
+ "WHERE lanispl "
|
|
+ "ORDER BY oid",
|
|
+ username_subquery);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 70300)
|
|
+ {
|
|
+ /* No clear notion of an owner at all before 7.4 ... */
|
|
+ appendPQExpBuffer(query, "SELECT tableoid, oid, "
|
|
+ "lanname, lanpltrusted, lanplcallfoid, "
|
|
+ "0 AS laninline, lanvalidator, lanacl, "
|
|
+ "NULL AS lanowner "
|
|
+ "FROM pg_language "
|
|
+ "WHERE lanispl "
|
|
+ "ORDER BY oid");
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 70100)
|
|
+ {
|
|
+ appendPQExpBuffer(query, "SELECT tableoid, oid, "
|
|
+ "lanname, lanpltrusted, lanplcallfoid, "
|
|
+ "0 AS laninline, 0 AS lanvalidator, NULL AS lanacl, "
|
|
+ "NULL AS lanowner "
|
|
+ "FROM pg_language "
|
|
+ "WHERE lanispl "
|
|
+ "ORDER BY oid");
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ appendPQExpBuffer(query, "SELECT "
|
|
+ "(SELECT oid FROM pg_class WHERE relname = 'pg_language') AS tableoid, "
|
|
+ "oid, "
|
|
+ "lanname, lanpltrusted, lanplcallfoid, "
|
|
+ "0 AS laninline, 0 AS lanvalidator, NULL AS lanacl, "
|
|
+ "NULL AS lanowner "
|
|
+ "FROM pg_language "
|
|
+ "WHERE lanispl "
|
|
+ "ORDER BY oid");
|
|
+ }
|
|
+
|
|
+ res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
|
|
+
|
|
+ ntups = PQntuples(res);
|
|
+
|
|
+ *numProcLangs = ntups;
|
|
+
|
|
+ planginfo = (ProcLangInfo *) pg_malloc(ntups * sizeof(ProcLangInfo));
|
|
+
|
|
+ i_tableoid = PQfnumber(res, "tableoid");
|
|
+ i_oid = PQfnumber(res, "oid");
|
|
+ i_lanname = PQfnumber(res, "lanname");
|
|
+ i_lanpltrusted = PQfnumber(res, "lanpltrusted");
|
|
+ i_lanplcallfoid = PQfnumber(res, "lanplcallfoid");
|
|
+ i_laninline = PQfnumber(res, "laninline");
|
|
+ i_lanvalidator = PQfnumber(res, "lanvalidator");
|
|
+ i_lanacl = PQfnumber(res, "lanacl");
|
|
+ i_lanowner = PQfnumber(res, "lanowner");
|
|
+
|
|
+ for (i = 0; i < ntups; i++)
|
|
+ {
|
|
+ planginfo[i].dobj.objType = DO_PROCLANG;
|
|
+ planginfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
|
|
+ planginfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
|
|
+ AssignDumpId(&planginfo[i].dobj);
|
|
+
|
|
+ planginfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_lanname));
|
|
+ planginfo[i].lanpltrusted = *(PQgetvalue(res, i, i_lanpltrusted)) == 't';
|
|
+ planginfo[i].lanplcallfoid = atooid(PQgetvalue(res, i, i_lanplcallfoid));
|
|
+ planginfo[i].laninline = atooid(PQgetvalue(res, i, i_laninline));
|
|
+ planginfo[i].lanvalidator = atooid(PQgetvalue(res, i, i_lanvalidator));
|
|
+ planginfo[i].lanacl = pg_strdup(PQgetvalue(res, i, i_lanacl));
|
|
+ planginfo[i].lanowner = pg_strdup(PQgetvalue(res, i, i_lanowner));
|
|
+
|
|
+ if (fout->remoteVersion < 70300)
|
|
+ {
|
|
+ /*
|
|
+ * We need to make a dependency to ensure the function will be
|
|
+ * dumped first. (In 7.3 and later the regular dependency
|
|
+ * mechanism will handle this for us.)
|
|
+ */
|
|
+ FuncInfo *funcInfo = findFuncByOid(planginfo[i].lanplcallfoid);
|
|
+
|
|
+ if (funcInfo)
|
|
+ addObjectDependency(&planginfo[i].dobj,
|
|
+ funcInfo->dobj.dumpId);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ PQclear(res);
|
|
+
|
|
+ destroyPQExpBuffer(query);
|
|
+
|
|
+ return planginfo;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * getCasts
|
|
+ * get basic information about every cast in the system
|
|
+ *
|
|
+ * numCasts is set to the number of casts read in
|
|
+ */
|
|
+CastInfo *
|
|
+getCasts(Archive *fout, int *numCasts)
|
|
+{
|
|
+ PGresult *res;
|
|
+ int ntups;
|
|
+ int i;
|
|
+ PQExpBuffer query = createPQExpBuffer();
|
|
+ CastInfo *castinfo;
|
|
+ int i_tableoid;
|
|
+ int i_oid;
|
|
+ int i_castsource;
|
|
+ int i_casttarget;
|
|
+ int i_castfunc;
|
|
+ int i_castcontext;
|
|
+ int i_castmethod;
|
|
+
|
|
+ /* Make sure we are in proper schema */
|
|
+ selectSourceSchema(fout, "pg_catalog");
|
|
+
|
|
+ if (fout->remoteVersion >= 80400)
|
|
+ {
|
|
+ appendPQExpBufferStr(query, "SELECT tableoid, oid, "
|
|
+ "castsource, casttarget, castfunc, castcontext, "
|
|
+ "castmethod "
|
|
+ "FROM pg_cast ORDER BY 3,4");
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 70300)
|
|
+ {
|
|
+ appendPQExpBufferStr(query, "SELECT tableoid, oid, "
|
|
+ "castsource, casttarget, castfunc, castcontext, "
|
|
+ "CASE WHEN castfunc = 0 THEN 'b' ELSE 'f' END AS castmethod "
|
|
+ "FROM pg_cast ORDER BY 3,4");
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ appendPQExpBufferStr(query, "SELECT 0 AS tableoid, p.oid, "
|
|
+ "t1.oid AS castsource, t2.oid AS casttarget, "
|
|
+ "p.oid AS castfunc, 'e' AS castcontext, "
|
|
+ "'f' AS castmethod "
|
|
+ "FROM pg_type t1, pg_type t2, pg_proc p "
|
|
+ "WHERE p.pronargs = 1 AND "
|
|
+ "p.proargtypes[0] = t1.oid AND "
|
|
+ "p.prorettype = t2.oid AND p.proname = t2.typname "
|
|
+ "ORDER BY 3,4");
|
|
+ }
|
|
+
|
|
+ res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
|
|
+
|
|
+ ntups = PQntuples(res);
|
|
+
|
|
+ *numCasts = ntups;
|
|
+
|
|
+ castinfo = (CastInfo *) pg_malloc(ntups * sizeof(CastInfo));
|
|
+
|
|
+ i_tableoid = PQfnumber(res, "tableoid");
|
|
+ i_oid = PQfnumber(res, "oid");
|
|
+ i_castsource = PQfnumber(res, "castsource");
|
|
+ i_casttarget = PQfnumber(res, "casttarget");
|
|
+ i_castfunc = PQfnumber(res, "castfunc");
|
|
+ i_castcontext = PQfnumber(res, "castcontext");
|
|
+ i_castmethod = PQfnumber(res, "castmethod");
|
|
+
|
|
+ for (i = 0; i < ntups; i++)
|
|
+ {
|
|
+ PQExpBufferData namebuf;
|
|
+ TypeInfo *sTypeInfo;
|
|
+ TypeInfo *tTypeInfo;
|
|
+
|
|
+ castinfo[i].dobj.objType = DO_CAST;
|
|
+ castinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
|
|
+ castinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
|
|
+ AssignDumpId(&castinfo[i].dobj);
|
|
+ castinfo[i].castsource = atooid(PQgetvalue(res, i, i_castsource));
|
|
+ castinfo[i].casttarget = atooid(PQgetvalue(res, i, i_casttarget));
|
|
+ castinfo[i].castfunc = atooid(PQgetvalue(res, i, i_castfunc));
|
|
+ castinfo[i].castcontext = *(PQgetvalue(res, i, i_castcontext));
|
|
+ castinfo[i].castmethod = *(PQgetvalue(res, i, i_castmethod));
|
|
+
|
|
+ /*
|
|
+ * Try to name cast as concatenation of typnames. This is only used
|
|
+ * for purposes of sorting. If we fail to find either type, the name
|
|
+ * will be an empty string.
|
|
+ */
|
|
+ initPQExpBuffer(&namebuf);
|
|
+ sTypeInfo = findTypeByOid(castinfo[i].castsource);
|
|
+ tTypeInfo = findTypeByOid(castinfo[i].casttarget);
|
|
+ if (sTypeInfo && tTypeInfo)
|
|
+ appendPQExpBuffer(&namebuf, "%s %s",
|
|
+ sTypeInfo->dobj.name, tTypeInfo->dobj.name);
|
|
+ castinfo[i].dobj.name = namebuf.data;
|
|
+
|
|
+ if (fout->remoteVersion < 70300 &&
|
|
+ OidIsValid(castinfo[i].castfunc))
|
|
+ {
|
|
+ /*
|
|
+ * We need to make a dependency to ensure the function will be
|
|
+ * dumped first. (In 7.3 and later the regular dependency
|
|
+ * mechanism handles this for us.)
|
|
+ */
|
|
+ FuncInfo *funcInfo;
|
|
+
|
|
+ funcInfo = findFuncByOid(castinfo[i].castfunc);
|
|
+ if (funcInfo)
|
|
+ addObjectDependency(&castinfo[i].dobj,
|
|
+ funcInfo->dobj.dumpId);
|
|
+ }
|
|
+
|
|
+ /* Decide whether we want to dump it */
|
|
+ selectDumpableCast(&(castinfo[i]));
|
|
+ }
|
|
+
|
|
+ PQclear(res);
|
|
+
|
|
+ destroyPQExpBuffer(query);
|
|
+
|
|
+ return castinfo;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * getTableAttrs -
|
|
+ * for each interesting table, read info about its attributes
|
|
+ * (names, types, default values, CHECK constraints, etc)
|
|
+ *
|
|
+ * This is implemented in a very inefficient way right now, looping
|
|
+ * through the tblinfo and doing a join per table to find the attrs and their
|
|
+ * types. However, because we want type names and so forth to be named
|
|
+ * relative to the schema of each table, we couldn't do it in just one
|
|
+ * query. (Maybe one query per schema?)
|
|
+ *
|
|
+ * modifies tblinfo
|
|
+ */
|
|
+void
|
|
+getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
|
|
+{
|
|
+ int i,
|
|
+ j;
|
|
+ PQExpBuffer q = createPQExpBuffer();
|
|
+ int i_attnum;
|
|
+ int i_attname;
|
|
+ int i_atttypname;
|
|
+ int i_atttypmod;
|
|
+ int i_attstattarget;
|
|
+ int i_attstorage;
|
|
+ int i_typstorage;
|
|
+ int i_attnotnull;
|
|
+ int i_atthasdef;
|
|
+ int i_attisdropped;
|
|
+ int i_attlen;
|
|
+ int i_attalign;
|
|
+ int i_attislocal;
|
|
+ int i_attoptions;
|
|
+ int i_attcollation;
|
|
+ int i_attfdwoptions;
|
|
+ PGresult *res;
|
|
+ int ntups;
|
|
+ bool hasdefaults;
|
|
+
|
|
+ for (i = 0; i < numTables; i++)
|
|
+ {
|
|
+ TableInfo *tbinfo = &tblinfo[i];
|
|
+
|
|
+ /* Don't bother to collect info for sequences */
|
|
+ if (tbinfo->relkind == RELKIND_SEQUENCE)
|
|
+ continue;
|
|
+
|
|
+ /* Don't bother with uninteresting tables, either */
|
|
+ if (!tbinfo->interesting)
|
|
+ continue;
|
|
+
|
|
+ /*
|
|
+ * Make sure we are in proper schema for this table; this allows
|
|
+ * correct retrieval of formatted type names and default exprs
|
|
+ */
|
|
+ selectSourceSchema(fout, tbinfo->dobj.namespace->dobj.name);
|
|
+
|
|
+ /* find all the user attributes and their types */
|
|
+
|
|
+ /*
|
|
+ * we must read the attribute names in attribute number order! because
|
|
+ * we will use the attnum to index into the attnames array later. We
|
|
+ * actually ask to order by "attrelid, attnum" because (at least up to
|
|
+ * 7.3) the planner is not smart enough to realize it needn't re-sort
|
|
+ * the output of an indexscan on pg_attribute_relid_attnum_index.
|
|
+ */
|
|
+ if (g_verbose)
|
|
+ write_msg(NULL, "finding the columns and types of table \"%s\"\n",
|
|
+ tbinfo->dobj.name);
|
|
+
|
|
+ resetPQExpBuffer(q);
|
|
+
|
|
+ if (fout->remoteVersion >= 90200)
|
|
+ {
|
|
+ /*
|
|
+ * attfdwoptions is new in 9.2.
|
|
+ */
|
|
+ appendPQExpBuffer(q, "SELECT a.attnum, a.attname, a.atttypmod, "
|
|
+ "a.attstattarget, a.attstorage, t.typstorage, "
|
|
+ "a.attnotnull, a.atthasdef, a.attisdropped, "
|
|
+ "a.attlen, a.attalign, a.attislocal, "
|
|
+ "pg_catalog.format_type(t.oid,a.atttypmod) AS atttypname, "
|
|
+ "array_to_string(a.attoptions, ', ') AS attoptions, "
|
|
+ "CASE WHEN a.attcollation <> t.typcollation "
|
|
+ "THEN a.attcollation ELSE 0 END AS attcollation, "
|
|
+ "pg_catalog.array_to_string(ARRAY("
|
|
+ "SELECT pg_catalog.quote_ident(option_name) || "
|
|
+ "' ' || pg_catalog.quote_literal(option_value) "
|
|
+ "FROM pg_catalog.pg_options_to_table(attfdwoptions) "
|
|
+ "ORDER BY option_name"
|
|
+ "), E',\n ') AS attfdwoptions "
|
|
+ "FROM pg_catalog.pg_attribute a LEFT JOIN pg_catalog.pg_type t "
|
|
+ "ON a.atttypid = t.oid "
|
|
+ "WHERE a.attrelid = '%u'::pg_catalog.oid "
|
|
+ "AND a.attnum > 0::pg_catalog.int2 "
|
|
+ "ORDER BY a.attrelid, a.attnum",
|
|
+ tbinfo->dobj.catId.oid);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 90100)
|
|
+ {
|
|
+ /*
|
|
+ * attcollation is new in 9.1. Since we only want to dump COLLATE
|
|
+ * clauses for attributes whose collation is different from their
|
|
+ * type's default, we use a CASE here to suppress uninteresting
|
|
+ * attcollations cheaply.
|
|
+ */
|
|
+ appendPQExpBuffer(q, "SELECT a.attnum, a.attname, a.atttypmod, "
|
|
+ "a.attstattarget, a.attstorage, t.typstorage, "
|
|
+ "a.attnotnull, a.atthasdef, a.attisdropped, "
|
|
+ "a.attlen, a.attalign, a.attislocal, "
|
|
+ "pg_catalog.format_type(t.oid,a.atttypmod) AS atttypname, "
|
|
+ "array_to_string(a.attoptions, ', ') AS attoptions, "
|
|
+ "CASE WHEN a.attcollation <> t.typcollation "
|
|
+ "THEN a.attcollation ELSE 0 END AS attcollation, "
|
|
+ "NULL AS attfdwoptions "
|
|
+ "FROM pg_catalog.pg_attribute a LEFT JOIN pg_catalog.pg_type t "
|
|
+ "ON a.atttypid = t.oid "
|
|
+ "WHERE a.attrelid = '%u'::pg_catalog.oid "
|
|
+ "AND a.attnum > 0::pg_catalog.int2 "
|
|
+ "ORDER BY a.attrelid, a.attnum",
|
|
+ tbinfo->dobj.catId.oid);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 90000)
|
|
+ {
|
|
+ /* attoptions is new in 9.0 */
|
|
+ appendPQExpBuffer(q, "SELECT a.attnum, a.attname, a.atttypmod, "
|
|
+ "a.attstattarget, a.attstorage, t.typstorage, "
|
|
+ "a.attnotnull, a.atthasdef, a.attisdropped, "
|
|
+ "a.attlen, a.attalign, a.attislocal, "
|
|
+ "pg_catalog.format_type(t.oid,a.atttypmod) AS atttypname, "
|
|
+ "array_to_string(a.attoptions, ', ') AS attoptions, "
|
|
+ "0 AS attcollation, "
|
|
+ "NULL AS attfdwoptions "
|
|
+ "FROM pg_catalog.pg_attribute a LEFT JOIN pg_catalog.pg_type t "
|
|
+ "ON a.atttypid = t.oid "
|
|
+ "WHERE a.attrelid = '%u'::pg_catalog.oid "
|
|
+ "AND a.attnum > 0::pg_catalog.int2 "
|
|
+ "ORDER BY a.attrelid, a.attnum",
|
|
+ tbinfo->dobj.catId.oid);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 70300)
|
|
+ {
|
|
+ /* need left join here to not fail on dropped columns ... */
|
|
+ appendPQExpBuffer(q, "SELECT a.attnum, a.attname, a.atttypmod, "
|
|
+ "a.attstattarget, a.attstorage, t.typstorage, "
|
|
+ "a.attnotnull, a.atthasdef, a.attisdropped, "
|
|
+ "a.attlen, a.attalign, a.attislocal, "
|
|
+ "pg_catalog.format_type(t.oid,a.atttypmod) AS atttypname, "
|
|
+ "'' AS attoptions, 0 AS attcollation, "
|
|
+ "NULL AS attfdwoptions "
|
|
+ "FROM pg_catalog.pg_attribute a LEFT JOIN pg_catalog.pg_type t "
|
|
+ "ON a.atttypid = t.oid "
|
|
+ "WHERE a.attrelid = '%u'::pg_catalog.oid "
|
|
+ "AND a.attnum > 0::pg_catalog.int2 "
|
|
+ "ORDER BY a.attrelid, a.attnum",
|
|
+ tbinfo->dobj.catId.oid);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 70100)
|
|
+ {
|
|
+ /*
|
|
+ * attstattarget doesn't exist in 7.1. It does exist in 7.2, but
|
|
+ * we don't dump it because we can't tell whether it's been
|
|
+ * explicitly set or was just a default.
|
|
+ *
|
|
+ * attislocal doesn't exist before 7.3, either; in older databases
|
|
+ * we assume it's TRUE, else we'd fail to dump non-inherited atts.
|
|
+ */
|
|
+ appendPQExpBuffer(q, "SELECT a.attnum, a.attname, a.atttypmod, "
|
|
+ "-1 AS attstattarget, a.attstorage, "
|
|
+ "t.typstorage, a.attnotnull, a.atthasdef, "
|
|
+ "false AS attisdropped, a.attlen, "
|
|
+ "a.attalign, true AS attislocal, "
|
|
+ "format_type(t.oid,a.atttypmod) AS atttypname, "
|
|
+ "'' AS attoptions, 0 AS attcollation, "
|
|
+ "NULL AS attfdwoptions "
|
|
+ "FROM pg_attribute a LEFT JOIN pg_type t "
|
|
+ "ON a.atttypid = t.oid "
|
|
+ "WHERE a.attrelid = '%u'::oid "
|
|
+ "AND a.attnum > 0::int2 "
|
|
+ "ORDER BY a.attrelid, a.attnum",
|
|
+ tbinfo->dobj.catId.oid);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ /* format_type not available before 7.1 */
|
|
+ appendPQExpBuffer(q, "SELECT attnum, attname, atttypmod, "
|
|
+ "-1 AS attstattarget, "
|
|
+ "attstorage, attstorage AS typstorage, "
|
|
+ "attnotnull, atthasdef, false AS attisdropped, "
|
|
+ "attlen, attalign, "
|
|
+ "true AS attislocal, "
|
|
+ "(SELECT typname FROM pg_type WHERE oid = atttypid) AS atttypname, "
|
|
+ "'' AS attoptions, 0 AS attcollation, "
|
|
+ "NULL AS attfdwoptions "
|
|
+ "FROM pg_attribute a "
|
|
+ "WHERE attrelid = '%u'::oid "
|
|
+ "AND attnum > 0::int2 "
|
|
+ "ORDER BY attrelid, attnum",
|
|
+ tbinfo->dobj.catId.oid);
|
|
+ }
|
|
+
|
|
+ res = ExecuteSqlQuery(fout, q->data, PGRES_TUPLES_OK);
|
|
+
|
|
+ ntups = PQntuples(res);
|
|
+
|
|
+ i_attnum = PQfnumber(res, "attnum");
|
|
+ i_attname = PQfnumber(res, "attname");
|
|
+ i_atttypname = PQfnumber(res, "atttypname");
|
|
+ i_atttypmod = PQfnumber(res, "atttypmod");
|
|
+ i_attstattarget = PQfnumber(res, "attstattarget");
|
|
+ i_attstorage = PQfnumber(res, "attstorage");
|
|
+ i_typstorage = PQfnumber(res, "typstorage");
|
|
+ i_attnotnull = PQfnumber(res, "attnotnull");
|
|
+ i_atthasdef = PQfnumber(res, "atthasdef");
|
|
+ i_attisdropped = PQfnumber(res, "attisdropped");
|
|
+ i_attlen = PQfnumber(res, "attlen");
|
|
+ i_attalign = PQfnumber(res, "attalign");
|
|
+ i_attislocal = PQfnumber(res, "attislocal");
|
|
+ i_attoptions = PQfnumber(res, "attoptions");
|
|
+ i_attcollation = PQfnumber(res, "attcollation");
|
|
+ i_attfdwoptions = PQfnumber(res, "attfdwoptions");
|
|
+
|
|
+ tbinfo->numatts = ntups;
|
|
+ tbinfo->attnames = (char **) pg_malloc(ntups * sizeof(char *));
|
|
+ tbinfo->atttypnames = (char **) pg_malloc(ntups * sizeof(char *));
|
|
+ tbinfo->atttypmod = (int *) pg_malloc(ntups * sizeof(int));
|
|
+ tbinfo->attstattarget = (int *) pg_malloc(ntups * sizeof(int));
|
|
+ tbinfo->attstorage = (char *) pg_malloc(ntups * sizeof(char));
|
|
+ tbinfo->typstorage = (char *) pg_malloc(ntups * sizeof(char));
|
|
+ tbinfo->attisdropped = (bool *) pg_malloc(ntups * sizeof(bool));
|
|
+ tbinfo->attlen = (int *) pg_malloc(ntups * sizeof(int));
|
|
+ tbinfo->attalign = (char *) pg_malloc(ntups * sizeof(char));
|
|
+ tbinfo->attislocal = (bool *) pg_malloc(ntups * sizeof(bool));
|
|
+ tbinfo->attoptions = (char **) pg_malloc(ntups * sizeof(char *));
|
|
+ tbinfo->attcollation = (Oid *) pg_malloc(ntups * sizeof(Oid));
|
|
+ tbinfo->attfdwoptions = (char **) pg_malloc(ntups * sizeof(char *));
|
|
+ tbinfo->notnull = (bool *) pg_malloc(ntups * sizeof(bool));
|
|
+ tbinfo->inhNotNull = (bool *) pg_malloc(ntups * sizeof(bool));
|
|
+ tbinfo->attrdefs = (AttrDefInfo **) pg_malloc(ntups * sizeof(AttrDefInfo *));
|
|
+ hasdefaults = false;
|
|
+
|
|
+ for (j = 0; j < ntups; j++)
|
|
+ {
|
|
+ if (j + 1 != atoi(PQgetvalue(res, j, i_attnum)))
|
|
+ exit_horribly(NULL,
|
|
+ "invalid column numbering in table \"%s\"\n",
|
|
+ tbinfo->dobj.name);
|
|
+ tbinfo->attnames[j] = pg_strdup(PQgetvalue(res, j, i_attname));
|
|
+ tbinfo->atttypnames[j] = pg_strdup(PQgetvalue(res, j, i_atttypname));
|
|
+ tbinfo->atttypmod[j] = atoi(PQgetvalue(res, j, i_atttypmod));
|
|
+ tbinfo->attstattarget[j] = atoi(PQgetvalue(res, j, i_attstattarget));
|
|
+ tbinfo->attstorage[j] = *(PQgetvalue(res, j, i_attstorage));
|
|
+ tbinfo->typstorage[j] = *(PQgetvalue(res, j, i_typstorage));
|
|
+ tbinfo->attisdropped[j] = (PQgetvalue(res, j, i_attisdropped)[0] == 't');
|
|
+ tbinfo->attlen[j] = atoi(PQgetvalue(res, j, i_attlen));
|
|
+ tbinfo->attalign[j] = *(PQgetvalue(res, j, i_attalign));
|
|
+ tbinfo->attislocal[j] = (PQgetvalue(res, j, i_attislocal)[0] == 't');
|
|
+ tbinfo->notnull[j] = (PQgetvalue(res, j, i_attnotnull)[0] == 't');
|
|
+ tbinfo->attoptions[j] = pg_strdup(PQgetvalue(res, j, i_attoptions));
|
|
+ tbinfo->attcollation[j] = atooid(PQgetvalue(res, j, i_attcollation));
|
|
+ tbinfo->attfdwoptions[j] = pg_strdup(PQgetvalue(res, j, i_attfdwoptions));
|
|
+ tbinfo->attrdefs[j] = NULL; /* fix below */
|
|
+ if (PQgetvalue(res, j, i_atthasdef)[0] == 't')
|
|
+ hasdefaults = true;
|
|
+ /* these flags will be set in flagInhAttrs() */
|
|
+ tbinfo->inhNotNull[j] = false;
|
|
+ }
|
|
+
|
|
+ PQclear(res);
|
|
+
|
|
+ /*
|
|
+ * Get info about column defaults
|
|
+ */
|
|
+ if (hasdefaults)
|
|
+ {
|
|
+ AttrDefInfo *attrdefs;
|
|
+ int numDefaults;
|
|
+
|
|
+ if (g_verbose)
|
|
+ write_msg(NULL, "finding default expressions of table \"%s\"\n",
|
|
+ tbinfo->dobj.name);
|
|
+
|
|
+ resetPQExpBuffer(q);
|
|
+ if (fout->remoteVersion >= 70300)
|
|
+ {
|
|
+ appendPQExpBuffer(q, "SELECT tableoid, oid, adnum, "
|
|
+ "pg_catalog.pg_get_expr(adbin, adrelid) AS adsrc "
|
|
+ "FROM pg_catalog.pg_attrdef "
|
|
+ "WHERE adrelid = '%u'::pg_catalog.oid",
|
|
+ tbinfo->dobj.catId.oid);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 70200)
|
|
+ {
|
|
+ /* 7.2 did not have OIDs in pg_attrdef */
|
|
+ appendPQExpBuffer(q, "SELECT tableoid, 0 AS oid, adnum, "
|
|
+ "pg_get_expr(adbin, adrelid) AS adsrc "
|
|
+ "FROM pg_attrdef "
|
|
+ "WHERE adrelid = '%u'::oid",
|
|
+ tbinfo->dobj.catId.oid);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 70100)
|
|
+ {
|
|
+ /* no pg_get_expr, so must rely on adsrc */
|
|
+ appendPQExpBuffer(q, "SELECT tableoid, oid, adnum, adsrc "
|
|
+ "FROM pg_attrdef "
|
|
+ "WHERE adrelid = '%u'::oid",
|
|
+ tbinfo->dobj.catId.oid);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ /* no pg_get_expr, no tableoid either */
|
|
+ appendPQExpBuffer(q, "SELECT "
|
|
+ "(SELECT oid FROM pg_class WHERE relname = 'pg_attrdef') AS tableoid, "
|
|
+ "oid, adnum, adsrc "
|
|
+ "FROM pg_attrdef "
|
|
+ "WHERE adrelid = '%u'::oid",
|
|
+ tbinfo->dobj.catId.oid);
|
|
+ }
|
|
+ res = ExecuteSqlQuery(fout, q->data, PGRES_TUPLES_OK);
|
|
+
|
|
+ numDefaults = PQntuples(res);
|
|
+ attrdefs = (AttrDefInfo *) pg_malloc(numDefaults * sizeof(AttrDefInfo));
|
|
+
|
|
+ for (j = 0; j < numDefaults; j++)
|
|
+ {
|
|
+ int adnum;
|
|
+
|
|
+ adnum = atoi(PQgetvalue(res, j, 2));
|
|
+
|
|
+ if (adnum <= 0 || adnum > ntups)
|
|
+ exit_horribly(NULL,
|
|
+ "invalid adnum value %d for table \"%s\"\n",
|
|
+ adnum, tbinfo->dobj.name);
|
|
+
|
|
+ /*
|
|
+ * dropped columns shouldn't have defaults, but just in case,
|
|
+ * ignore 'em
|
|
+ */
|
|
+ if (tbinfo->attisdropped[adnum - 1])
|
|
+ continue;
|
|
+
|
|
+ attrdefs[j].dobj.objType = DO_ATTRDEF;
|
|
+ attrdefs[j].dobj.catId.tableoid = atooid(PQgetvalue(res, j, 0));
|
|
+ attrdefs[j].dobj.catId.oid = atooid(PQgetvalue(res, j, 1));
|
|
+ AssignDumpId(&attrdefs[j].dobj);
|
|
+ attrdefs[j].adtable = tbinfo;
|
|
+ attrdefs[j].adnum = adnum;
|
|
+ attrdefs[j].adef_expr = pg_strdup(PQgetvalue(res, j, 3));
|
|
+
|
|
+ attrdefs[j].dobj.name = pg_strdup(tbinfo->dobj.name);
|
|
+ attrdefs[j].dobj.namespace = tbinfo->dobj.namespace;
|
|
+
|
|
+ attrdefs[j].dobj.dump = tbinfo->dobj.dump;
|
|
+
|
|
+ /*
|
|
+ * Defaults on a VIEW must always be dumped as separate ALTER
|
|
+ * TABLE commands. Defaults on regular tables are dumped as
|
|
+ * part of the CREATE TABLE if possible, which it won't be if
|
|
+ * the column is not going to be emitted explicitly.
|
|
+ */
|
|
+ if (tbinfo->relkind == RELKIND_VIEW)
|
|
+ {
|
|
+ attrdefs[j].separate = true;
|
|
+ /* needed in case pre-7.3 DB: */
|
|
+ addObjectDependency(&attrdefs[j].dobj,
|
|
+ tbinfo->dobj.dumpId);
|
|
+ }
|
|
+ else if (!shouldPrintColumn(tbinfo, adnum - 1))
|
|
+ {
|
|
+ /* column will be suppressed, print default separately */
|
|
+ attrdefs[j].separate = true;
|
|
+ /* needed in case pre-7.3 DB: */
|
|
+ addObjectDependency(&attrdefs[j].dobj,
|
|
+ tbinfo->dobj.dumpId);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ attrdefs[j].separate = false;
|
|
+
|
|
+ /*
|
|
+ * Mark the default as needing to appear before the table,
|
|
+ * so that any dependencies it has must be emitted before
|
|
+ * the CREATE TABLE. If this is not possible, we'll
|
|
+ * change to "separate" mode while sorting dependencies.
|
|
+ */
|
|
+ addObjectDependency(&tbinfo->dobj,
|
|
+ attrdefs[j].dobj.dumpId);
|
|
+ }
|
|
+
|
|
+ tbinfo->attrdefs[adnum - 1] = &attrdefs[j];
|
|
+ }
|
|
+ PQclear(res);
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Get info about table CHECK constraints
|
|
+ */
|
|
+ if (tbinfo->ncheck > 0)
|
|
+ {
|
|
+ ConstraintInfo *constrs;
|
|
+ int numConstrs;
|
|
+
|
|
+ if (g_verbose)
|
|
+ write_msg(NULL, "finding check constraints for table \"%s\"\n",
|
|
+ tbinfo->dobj.name);
|
|
+
|
|
+ resetPQExpBuffer(q);
|
|
+ if (fout->remoteVersion >= 90200)
|
|
+ {
|
|
+ /*
|
|
+ * convalidated is new in 9.2 (actually, it is there in 9.1,
|
|
+ * but it wasn't ever false for check constraints until 9.2).
|
|
+ */
|
|
+ appendPQExpBuffer(q, "SELECT tableoid, oid, conname, "
|
|
+ "pg_catalog.pg_get_constraintdef(oid) AS consrc, "
|
|
+ "conislocal, convalidated "
|
|
+ "FROM pg_catalog.pg_constraint "
|
|
+ "WHERE conrelid = '%u'::pg_catalog.oid "
|
|
+ " AND contype = 'c' "
|
|
+ "ORDER BY conname",
|
|
+ tbinfo->dobj.catId.oid);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 80400)
|
|
+ {
|
|
+ /* conislocal is new in 8.4 */
|
|
+ appendPQExpBuffer(q, "SELECT tableoid, oid, conname, "
|
|
+ "pg_catalog.pg_get_constraintdef(oid) AS consrc, "
|
|
+ "conislocal, true AS convalidated "
|
|
+ "FROM pg_catalog.pg_constraint "
|
|
+ "WHERE conrelid = '%u'::pg_catalog.oid "
|
|
+ " AND contype = 'c' "
|
|
+ "ORDER BY conname",
|
|
+ tbinfo->dobj.catId.oid);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 70400)
|
|
+ {
|
|
+ appendPQExpBuffer(q, "SELECT tableoid, oid, conname, "
|
|
+ "pg_catalog.pg_get_constraintdef(oid) AS consrc, "
|
|
+ "true AS conislocal, true AS convalidated "
|
|
+ "FROM pg_catalog.pg_constraint "
|
|
+ "WHERE conrelid = '%u'::pg_catalog.oid "
|
|
+ " AND contype = 'c' "
|
|
+ "ORDER BY conname",
|
|
+ tbinfo->dobj.catId.oid);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 70300)
|
|
+ {
|
|
+ /* no pg_get_constraintdef, must use consrc */
|
|
+ appendPQExpBuffer(q, "SELECT tableoid, oid, conname, "
|
|
+ "'CHECK (' || consrc || ')' AS consrc, "
|
|
+ "true AS conislocal, true AS convalidated "
|
|
+ "FROM pg_catalog.pg_constraint "
|
|
+ "WHERE conrelid = '%u'::pg_catalog.oid "
|
|
+ " AND contype = 'c' "
|
|
+ "ORDER BY conname",
|
|
+ tbinfo->dobj.catId.oid);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 70200)
|
|
+ {
|
|
+ /* 7.2 did not have OIDs in pg_relcheck */
|
|
+ appendPQExpBuffer(q, "SELECT tableoid, 0 AS oid, "
|
|
+ "rcname AS conname, "
|
|
+ "'CHECK (' || rcsrc || ')' AS consrc, "
|
|
+ "true AS conislocal, true AS convalidated "
|
|
+ "FROM pg_relcheck "
|
|
+ "WHERE rcrelid = '%u'::oid "
|
|
+ "ORDER BY rcname",
|
|
+ tbinfo->dobj.catId.oid);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 70100)
|
|
+ {
|
|
+ appendPQExpBuffer(q, "SELECT tableoid, oid, "
|
|
+ "rcname AS conname, "
|
|
+ "'CHECK (' || rcsrc || ')' AS consrc, "
|
|
+ "true AS conislocal, true AS convalidated "
|
|
+ "FROM pg_relcheck "
|
|
+ "WHERE rcrelid = '%u'::oid "
|
|
+ "ORDER BY rcname",
|
|
+ tbinfo->dobj.catId.oid);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ /* no tableoid in 7.0 */
|
|
+ appendPQExpBuffer(q, "SELECT "
|
|
+ "(SELECT oid FROM pg_class WHERE relname = 'pg_relcheck') AS tableoid, "
|
|
+ "oid, rcname AS conname, "
|
|
+ "'CHECK (' || rcsrc || ')' AS consrc, "
|
|
+ "true AS conislocal, true AS convalidated "
|
|
+ "FROM pg_relcheck "
|
|
+ "WHERE rcrelid = '%u'::oid "
|
|
+ "ORDER BY rcname",
|
|
+ tbinfo->dobj.catId.oid);
|
|
+ }
|
|
+ res = ExecuteSqlQuery(fout, q->data, PGRES_TUPLES_OK);
|
|
+
|
|
+ numConstrs = PQntuples(res);
|
|
+ if (numConstrs != tbinfo->ncheck)
|
|
+ {
|
|
+ write_msg(NULL, ngettext("expected %d check constraint on table \"%s\" but found %d\n",
|
|
+ "expected %d check constraints on table \"%s\" but found %d\n",
|
|
+ tbinfo->ncheck),
|
|
+ tbinfo->ncheck, tbinfo->dobj.name, numConstrs);
|
|
+ write_msg(NULL, "(The system catalogs might be corrupted.)\n");
|
|
+ exit_nicely(1);
|
|
+ }
|
|
+
|
|
+ constrs = (ConstraintInfo *) pg_malloc(numConstrs * sizeof(ConstraintInfo));
|
|
+ tbinfo->checkexprs = constrs;
|
|
+
|
|
+ for (j = 0; j < numConstrs; j++)
|
|
+ {
|
|
+ bool validated = PQgetvalue(res, j, 5)[0] == 't';
|
|
+
|
|
+ constrs[j].dobj.objType = DO_CONSTRAINT;
|
|
+ constrs[j].dobj.catId.tableoid = atooid(PQgetvalue(res, j, 0));
|
|
+ constrs[j].dobj.catId.oid = atooid(PQgetvalue(res, j, 1));
|
|
+ AssignDumpId(&constrs[j].dobj);
|
|
+ constrs[j].dobj.name = pg_strdup(PQgetvalue(res, j, 2));
|
|
+ constrs[j].dobj.namespace = tbinfo->dobj.namespace;
|
|
+ constrs[j].contable = tbinfo;
|
|
+ constrs[j].condomain = NULL;
|
|
+ constrs[j].contype = 'c';
|
|
+ constrs[j].condef = pg_strdup(PQgetvalue(res, j, 3));
|
|
+ constrs[j].confrelid = InvalidOid;
|
|
+ constrs[j].conindex = 0;
|
|
+ constrs[j].condeferrable = false;
|
|
+ constrs[j].condeferred = false;
|
|
+ constrs[j].conislocal = (PQgetvalue(res, j, 4)[0] == 't');
|
|
+
|
|
+ /*
|
|
+ * An unvalidated constraint needs to be dumped separately, so
|
|
+ * that potentially-violating existing data is loaded before
|
|
+ * the constraint.
|
|
+ */
|
|
+ constrs[j].separate = !validated;
|
|
+
|
|
+ constrs[j].dobj.dump = tbinfo->dobj.dump;
|
|
+
|
|
+ /*
|
|
+ * Mark the constraint as needing to appear before the table
|
|
+ * --- this is so that any other dependencies of the
|
|
+ * constraint will be emitted before we try to create the
|
|
+ * table. If the constraint is to be dumped separately, it
|
|
+ * will be dumped after data is loaded anyway, so don't do it.
|
|
+ * (There's an automatic dependency in the opposite direction
|
|
+ * anyway, so don't need to add one manually here.)
|
|
+ */
|
|
+ if (!constrs[j].separate)
|
|
+ addObjectDependency(&tbinfo->dobj,
|
|
+ constrs[j].dobj.dumpId);
|
|
+
|
|
+ /*
|
|
+ * If the constraint is inherited, this will be detected later
|
|
+ * (in pre-8.4 databases). We also detect later if the
|
|
+ * constraint must be split out from the table definition.
|
|
+ */
|
|
+ }
|
|
+ PQclear(res);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ destroyPQExpBuffer(q);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Test whether a column should be printed as part of table's CREATE TABLE.
|
|
+ * Column number is zero-based.
|
|
+ *
|
|
+ * Normally this is always true, but it's false for dropped columns, as well
|
|
+ * as those that were inherited without any local definition. (If we print
|
|
+ * such a column it will mistakenly get pg_attribute.attislocal set to true.)
|
|
+ * However, in binary_upgrade mode, we must print all such columns anyway and
|
|
+ * fix the attislocal/attisdropped state later, so as to keep control of the
|
|
+ * physical column order.
|
|
+ *
|
|
+ * This function exists because there are scattered nonobvious places that
|
|
+ * must be kept in sync with this decision.
|
|
+ */
|
|
+bool
|
|
+shouldPrintColumn(TableInfo *tbinfo, int colno)
|
|
+{
|
|
+ if (binary_upgrade)
|
|
+ return true;
|
|
+ return (tbinfo->attislocal[colno] && !tbinfo->attisdropped[colno]);
|
|
+}
|
|
+
|
|
+
|
|
+/*
|
|
+ * getTSParsers:
|
|
+ * read all text search parsers in the system catalogs and return them
|
|
+ * in the TSParserInfo* structure
|
|
+ *
|
|
+ * numTSParsers is set to the number of parsers read in
|
|
+ */
|
|
+TSParserInfo *
|
|
+getTSParsers(Archive *fout, int *numTSParsers)
|
|
+{
|
|
+ PGresult *res;
|
|
+ int ntups;
|
|
+ int i;
|
|
+ PQExpBuffer query;
|
|
+ TSParserInfo *prsinfo;
|
|
+ int i_tableoid;
|
|
+ int i_oid;
|
|
+ int i_prsname;
|
|
+ int i_prsnamespace;
|
|
+ int i_prsstart;
|
|
+ int i_prstoken;
|
|
+ int i_prsend;
|
|
+ int i_prsheadline;
|
|
+ int i_prslextype;
|
|
+
|
|
+ /* Before 8.3, there is no built-in text search support */
|
|
+ if (fout->remoteVersion < 80300)
|
|
+ {
|
|
+ *numTSParsers = 0;
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ query = createPQExpBuffer();
|
|
+
|
|
+ /*
|
|
+ * find all text search objects, including builtin ones; we filter out
|
|
+ * system-defined objects at dump-out time.
|
|
+ */
|
|
+
|
|
+ /* Make sure we are in proper schema */
|
|
+ selectSourceSchema(fout, "pg_catalog");
|
|
+
|
|
+ appendPQExpBufferStr(query, "SELECT tableoid, oid, prsname, prsnamespace, "
|
|
+ "prsstart::oid, prstoken::oid, "
|
|
+ "prsend::oid, prsheadline::oid, prslextype::oid "
|
|
+ "FROM pg_ts_parser");
|
|
+
|
|
+ res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
|
|
+
|
|
+ ntups = PQntuples(res);
|
|
+ *numTSParsers = ntups;
|
|
+
|
|
+ prsinfo = (TSParserInfo *) pg_malloc(ntups * sizeof(TSParserInfo));
|
|
+
|
|
+ i_tableoid = PQfnumber(res, "tableoid");
|
|
+ i_oid = PQfnumber(res, "oid");
|
|
+ i_prsname = PQfnumber(res, "prsname");
|
|
+ i_prsnamespace = PQfnumber(res, "prsnamespace");
|
|
+ i_prsstart = PQfnumber(res, "prsstart");
|
|
+ i_prstoken = PQfnumber(res, "prstoken");
|
|
+ i_prsend = PQfnumber(res, "prsend");
|
|
+ i_prsheadline = PQfnumber(res, "prsheadline");
|
|
+ i_prslextype = PQfnumber(res, "prslextype");
|
|
+
|
|
+ for (i = 0; i < ntups; i++)
|
|
+ {
|
|
+ prsinfo[i].dobj.objType = DO_TSPARSER;
|
|
+ prsinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
|
|
+ prsinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
|
|
+ AssignDumpId(&prsinfo[i].dobj);
|
|
+ prsinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_prsname));
|
|
+ prsinfo[i].dobj.namespace =
|
|
+ findNamespace(fout,
|
|
+ atooid(PQgetvalue(res, i, i_prsnamespace)),
|
|
+ prsinfo[i].dobj.catId.oid);
|
|
+ prsinfo[i].prsstart = atooid(PQgetvalue(res, i, i_prsstart));
|
|
+ prsinfo[i].prstoken = atooid(PQgetvalue(res, i, i_prstoken));
|
|
+ prsinfo[i].prsend = atooid(PQgetvalue(res, i, i_prsend));
|
|
+ prsinfo[i].prsheadline = atooid(PQgetvalue(res, i, i_prsheadline));
|
|
+ prsinfo[i].prslextype = atooid(PQgetvalue(res, i, i_prslextype));
|
|
+
|
|
+ /* Decide whether we want to dump it */
|
|
+ selectDumpableObject(&(prsinfo[i].dobj));
|
|
+ }
|
|
+
|
|
+ PQclear(res);
|
|
+
|
|
+ destroyPQExpBuffer(query);
|
|
+
|
|
+ return prsinfo;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * getTSDictionaries:
|
|
+ * read all text search dictionaries in the system catalogs and return them
|
|
+ * in the TSDictInfo* structure
|
|
+ *
|
|
+ * numTSDicts is set to the number of dictionaries read in
|
|
+ */
|
|
+TSDictInfo *
|
|
+getTSDictionaries(Archive *fout, int *numTSDicts)
|
|
+{
|
|
+ PGresult *res;
|
|
+ int ntups;
|
|
+ int i;
|
|
+ PQExpBuffer query;
|
|
+ TSDictInfo *dictinfo;
|
|
+ int i_tableoid;
|
|
+ int i_oid;
|
|
+ int i_dictname;
|
|
+ int i_dictnamespace;
|
|
+ int i_rolname;
|
|
+ int i_dicttemplate;
|
|
+ int i_dictinitoption;
|
|
+
|
|
+ /* Before 8.3, there is no built-in text search support */
|
|
+ if (fout->remoteVersion < 80300)
|
|
+ {
|
|
+ *numTSDicts = 0;
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ query = createPQExpBuffer();
|
|
+
|
|
+ /* Make sure we are in proper schema */
|
|
+ selectSourceSchema(fout, "pg_catalog");
|
|
+
|
|
+ appendPQExpBuffer(query, "SELECT tableoid, oid, dictname, "
|
|
+ "dictnamespace, (%s dictowner) AS rolname, "
|
|
+ "dicttemplate, dictinitoption "
|
|
+ "FROM pg_ts_dict",
|
|
+ username_subquery);
|
|
+
|
|
+ res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
|
|
+
|
|
+ ntups = PQntuples(res);
|
|
+ *numTSDicts = ntups;
|
|
+
|
|
+ dictinfo = (TSDictInfo *) pg_malloc(ntups * sizeof(TSDictInfo));
|
|
+
|
|
+ i_tableoid = PQfnumber(res, "tableoid");
|
|
+ i_oid = PQfnumber(res, "oid");
|
|
+ i_dictname = PQfnumber(res, "dictname");
|
|
+ i_dictnamespace = PQfnumber(res, "dictnamespace");
|
|
+ i_rolname = PQfnumber(res, "rolname");
|
|
+ i_dictinitoption = PQfnumber(res, "dictinitoption");
|
|
+ i_dicttemplate = PQfnumber(res, "dicttemplate");
|
|
+
|
|
+ for (i = 0; i < ntups; i++)
|
|
+ {
|
|
+ dictinfo[i].dobj.objType = DO_TSDICT;
|
|
+ dictinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
|
|
+ dictinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
|
|
+ AssignDumpId(&dictinfo[i].dobj);
|
|
+ dictinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_dictname));
|
|
+ dictinfo[i].dobj.namespace =
|
|
+ findNamespace(fout,
|
|
+ atooid(PQgetvalue(res, i, i_dictnamespace)),
|
|
+ dictinfo[i].dobj.catId.oid);
|
|
+ dictinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
|
|
+ dictinfo[i].dicttemplate = atooid(PQgetvalue(res, i, i_dicttemplate));
|
|
+ if (PQgetisnull(res, i, i_dictinitoption))
|
|
+ dictinfo[i].dictinitoption = NULL;
|
|
+ else
|
|
+ dictinfo[i].dictinitoption = pg_strdup(PQgetvalue(res, i, i_dictinitoption));
|
|
+
|
|
+ /* Decide whether we want to dump it */
|
|
+ selectDumpableObject(&(dictinfo[i].dobj));
|
|
+ }
|
|
+
|
|
+ PQclear(res);
|
|
+
|
|
+ destroyPQExpBuffer(query);
|
|
+
|
|
+ return dictinfo;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * getTSTemplates:
|
|
+ * read all text search templates in the system catalogs and return them
|
|
+ * in the TSTemplateInfo* structure
|
|
+ *
|
|
+ * numTSTemplates is set to the number of templates read in
|
|
+ */
|
|
+TSTemplateInfo *
|
|
+getTSTemplates(Archive *fout, int *numTSTemplates)
|
|
+{
|
|
+ PGresult *res;
|
|
+ int ntups;
|
|
+ int i;
|
|
+ PQExpBuffer query;
|
|
+ TSTemplateInfo *tmplinfo;
|
|
+ int i_tableoid;
|
|
+ int i_oid;
|
|
+ int i_tmplname;
|
|
+ int i_tmplnamespace;
|
|
+ int i_tmplinit;
|
|
+ int i_tmpllexize;
|
|
+
|
|
+ /* Before 8.3, there is no built-in text search support */
|
|
+ if (fout->remoteVersion < 80300)
|
|
+ {
|
|
+ *numTSTemplates = 0;
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ query = createPQExpBuffer();
|
|
+
|
|
+ /* Make sure we are in proper schema */
|
|
+ selectSourceSchema(fout, "pg_catalog");
|
|
+
|
|
+ appendPQExpBufferStr(query, "SELECT tableoid, oid, tmplname, "
|
|
+ "tmplnamespace, tmplinit::oid, tmpllexize::oid "
|
|
+ "FROM pg_ts_template");
|
|
+
|
|
+ res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
|
|
+
|
|
+ ntups = PQntuples(res);
|
|
+ *numTSTemplates = ntups;
|
|
+
|
|
+ tmplinfo = (TSTemplateInfo *) pg_malloc(ntups * sizeof(TSTemplateInfo));
|
|
+
|
|
+ i_tableoid = PQfnumber(res, "tableoid");
|
|
+ i_oid = PQfnumber(res, "oid");
|
|
+ i_tmplname = PQfnumber(res, "tmplname");
|
|
+ i_tmplnamespace = PQfnumber(res, "tmplnamespace");
|
|
+ i_tmplinit = PQfnumber(res, "tmplinit");
|
|
+ i_tmpllexize = PQfnumber(res, "tmpllexize");
|
|
+
|
|
+ for (i = 0; i < ntups; i++)
|
|
+ {
|
|
+ tmplinfo[i].dobj.objType = DO_TSTEMPLATE;
|
|
+ tmplinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
|
|
+ tmplinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
|
|
+ AssignDumpId(&tmplinfo[i].dobj);
|
|
+ tmplinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_tmplname));
|
|
+ tmplinfo[i].dobj.namespace =
|
|
+ findNamespace(fout,
|
|
+ atooid(PQgetvalue(res, i, i_tmplnamespace)),
|
|
+ tmplinfo[i].dobj.catId.oid);
|
|
+ tmplinfo[i].tmplinit = atooid(PQgetvalue(res, i, i_tmplinit));
|
|
+ tmplinfo[i].tmpllexize = atooid(PQgetvalue(res, i, i_tmpllexize));
|
|
+
|
|
+ /* Decide whether we want to dump it */
|
|
+ selectDumpableObject(&(tmplinfo[i].dobj));
|
|
+ }
|
|
+
|
|
+ PQclear(res);
|
|
+
|
|
+ destroyPQExpBuffer(query);
|
|
+
|
|
+ return tmplinfo;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * getTSConfigurations:
|
|
+ * read all text search configurations in the system catalogs and return
|
|
+ * them in the TSConfigInfo* structure
|
|
+ *
|
|
+ * numTSConfigs is set to the number of configurations read in
|
|
+ */
|
|
+TSConfigInfo *
|
|
+getTSConfigurations(Archive *fout, int *numTSConfigs)
|
|
+{
|
|
+ PGresult *res;
|
|
+ int ntups;
|
|
+ int i;
|
|
+ PQExpBuffer query;
|
|
+ TSConfigInfo *cfginfo;
|
|
+ int i_tableoid;
|
|
+ int i_oid;
|
|
+ int i_cfgname;
|
|
+ int i_cfgnamespace;
|
|
+ int i_rolname;
|
|
+ int i_cfgparser;
|
|
+
|
|
+ /* Before 8.3, there is no built-in text search support */
|
|
+ if (fout->remoteVersion < 80300)
|
|
+ {
|
|
+ *numTSConfigs = 0;
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ query = createPQExpBuffer();
|
|
+
|
|
+ /* Make sure we are in proper schema */
|
|
+ selectSourceSchema(fout, "pg_catalog");
|
|
+
|
|
+ appendPQExpBuffer(query, "SELECT tableoid, oid, cfgname, "
|
|
+ "cfgnamespace, (%s cfgowner) AS rolname, cfgparser "
|
|
+ "FROM pg_ts_config",
|
|
+ username_subquery);
|
|
+
|
|
+ res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
|
|
+
|
|
+ ntups = PQntuples(res);
|
|
+ *numTSConfigs = ntups;
|
|
+
|
|
+ cfginfo = (TSConfigInfo *) pg_malloc(ntups * sizeof(TSConfigInfo));
|
|
+
|
|
+ i_tableoid = PQfnumber(res, "tableoid");
|
|
+ i_oid = PQfnumber(res, "oid");
|
|
+ i_cfgname = PQfnumber(res, "cfgname");
|
|
+ i_cfgnamespace = PQfnumber(res, "cfgnamespace");
|
|
+ i_rolname = PQfnumber(res, "rolname");
|
|
+ i_cfgparser = PQfnumber(res, "cfgparser");
|
|
+
|
|
+ for (i = 0; i < ntups; i++)
|
|
+ {
|
|
+ cfginfo[i].dobj.objType = DO_TSCONFIG;
|
|
+ cfginfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
|
|
+ cfginfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
|
|
+ AssignDumpId(&cfginfo[i].dobj);
|
|
+ cfginfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_cfgname));
|
|
+ cfginfo[i].dobj.namespace =
|
|
+ findNamespace(fout,
|
|
+ atooid(PQgetvalue(res, i, i_cfgnamespace)),
|
|
+ cfginfo[i].dobj.catId.oid);
|
|
+ cfginfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
|
|
+ cfginfo[i].cfgparser = atooid(PQgetvalue(res, i, i_cfgparser));
|
|
+
|
|
+ /* Decide whether we want to dump it */
|
|
+ selectDumpableObject(&(cfginfo[i].dobj));
|
|
+ }
|
|
+
|
|
+ PQclear(res);
|
|
+
|
|
+ destroyPQExpBuffer(query);
|
|
+
|
|
+ return cfginfo;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * getForeignDataWrappers:
|
|
+ * read all foreign-data wrappers in the system catalogs and return
|
|
+ * them in the FdwInfo* structure
|
|
+ *
|
|
+ * numForeignDataWrappers is set to the number of fdws read in
|
|
+ */
|
|
+FdwInfo *
|
|
+getForeignDataWrappers(Archive *fout, int *numForeignDataWrappers)
|
|
+{
|
|
+ PGresult *res;
|
|
+ int ntups;
|
|
+ int i;
|
|
+ PQExpBuffer query;
|
|
+ FdwInfo *fdwinfo;
|
|
+ int i_tableoid;
|
|
+ int i_oid;
|
|
+ int i_fdwname;
|
|
+ int i_rolname;
|
|
+ int i_fdwhandler;
|
|
+ int i_fdwvalidator;
|
|
+ int i_fdwacl;
|
|
+ int i_fdwoptions;
|
|
+
|
|
+ /* Before 8.4, there are no foreign-data wrappers */
|
|
+ if (fout->remoteVersion < 80400)
|
|
+ {
|
|
+ *numForeignDataWrappers = 0;
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ query = createPQExpBuffer();
|
|
+
|
|
+ /* Make sure we are in proper schema */
|
|
+ selectSourceSchema(fout, "pg_catalog");
|
|
+
|
|
+ if (fout->remoteVersion >= 90100)
|
|
+ {
|
|
+ appendPQExpBuffer(query, "SELECT tableoid, oid, fdwname, "
|
|
+ "(%s fdwowner) AS rolname, "
|
|
+ "fdwhandler::pg_catalog.regproc, "
|
|
+ "fdwvalidator::pg_catalog.regproc, fdwacl, "
|
|
+ "array_to_string(ARRAY("
|
|
+ "SELECT quote_ident(option_name) || ' ' || "
|
|
+ "quote_literal(option_value) "
|
|
+ "FROM pg_options_to_table(fdwoptions) "
|
|
+ "ORDER BY option_name"
|
|
+ "), E',\n ') AS fdwoptions "
|
|
+ "FROM pg_foreign_data_wrapper",
|
|
+ username_subquery);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ appendPQExpBuffer(query, "SELECT tableoid, oid, fdwname, "
|
|
+ "(%s fdwowner) AS rolname, "
|
|
+ "'-' AS fdwhandler, "
|
|
+ "fdwvalidator::pg_catalog.regproc, fdwacl, "
|
|
+ "array_to_string(ARRAY("
|
|
+ "SELECT quote_ident(option_name) || ' ' || "
|
|
+ "quote_literal(option_value) "
|
|
+ "FROM pg_options_to_table(fdwoptions) "
|
|
+ "ORDER BY option_name"
|
|
+ "), E',\n ') AS fdwoptions "
|
|
+ "FROM pg_foreign_data_wrapper",
|
|
+ username_subquery);
|
|
+ }
|
|
+
|
|
+ res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
|
|
+
|
|
+ ntups = PQntuples(res);
|
|
+ *numForeignDataWrappers = ntups;
|
|
+
|
|
+ fdwinfo = (FdwInfo *) pg_malloc(ntups * sizeof(FdwInfo));
|
|
+
|
|
+ i_tableoid = PQfnumber(res, "tableoid");
|
|
+ i_oid = PQfnumber(res, "oid");
|
|
+ i_fdwname = PQfnumber(res, "fdwname");
|
|
+ i_rolname = PQfnumber(res, "rolname");
|
|
+ i_fdwhandler = PQfnumber(res, "fdwhandler");
|
|
+ i_fdwvalidator = PQfnumber(res, "fdwvalidator");
|
|
+ i_fdwacl = PQfnumber(res, "fdwacl");
|
|
+ i_fdwoptions = PQfnumber(res, "fdwoptions");
|
|
+
|
|
+ for (i = 0; i < ntups; i++)
|
|
+ {
|
|
+ fdwinfo[i].dobj.objType = DO_FDW;
|
|
+ fdwinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
|
|
+ fdwinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
|
|
+ AssignDumpId(&fdwinfo[i].dobj);
|
|
+ fdwinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_fdwname));
|
|
+ fdwinfo[i].dobj.namespace = NULL;
|
|
+ fdwinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
|
|
+ fdwinfo[i].fdwhandler = pg_strdup(PQgetvalue(res, i, i_fdwhandler));
|
|
+ fdwinfo[i].fdwvalidator = pg_strdup(PQgetvalue(res, i, i_fdwvalidator));
|
|
+ fdwinfo[i].fdwoptions = pg_strdup(PQgetvalue(res, i, i_fdwoptions));
|
|
+ fdwinfo[i].fdwacl = pg_strdup(PQgetvalue(res, i, i_fdwacl));
|
|
+
|
|
+ /* Decide whether we want to dump it */
|
|
+ selectDumpableObject(&(fdwinfo[i].dobj));
|
|
+ }
|
|
+
|
|
+ PQclear(res);
|
|
+
|
|
+ destroyPQExpBuffer(query);
|
|
+
|
|
+ return fdwinfo;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * getForeignServers:
|
|
+ * read all foreign servers in the system catalogs and return
|
|
+ * them in the ForeignServerInfo * structure
|
|
+ *
|
|
+ * numForeignServers is set to the number of servers read in
|
|
+ */
|
|
+ForeignServerInfo *
|
|
+getForeignServers(Archive *fout, int *numForeignServers)
|
|
+{
|
|
+ PGresult *res;
|
|
+ int ntups;
|
|
+ int i;
|
|
+ PQExpBuffer query;
|
|
+ ForeignServerInfo *srvinfo;
|
|
+ int i_tableoid;
|
|
+ int i_oid;
|
|
+ int i_srvname;
|
|
+ int i_rolname;
|
|
+ int i_srvfdw;
|
|
+ int i_srvtype;
|
|
+ int i_srvversion;
|
|
+ int i_srvacl;
|
|
+ int i_srvoptions;
|
|
+
|
|
+ /* Before 8.4, there are no foreign servers */
|
|
+ if (fout->remoteVersion < 80400)
|
|
+ {
|
|
+ *numForeignServers = 0;
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ query = createPQExpBuffer();
|
|
+
|
|
+ /* Make sure we are in proper schema */
|
|
+ selectSourceSchema(fout, "pg_catalog");
|
|
+
|
|
+ appendPQExpBuffer(query, "SELECT tableoid, oid, srvname, "
|
|
+ "(%s srvowner) AS rolname, "
|
|
+ "srvfdw, srvtype, srvversion, srvacl,"
|
|
+ "array_to_string(ARRAY("
|
|
+ "SELECT quote_ident(option_name) || ' ' || "
|
|
+ "quote_literal(option_value) "
|
|
+ "FROM pg_options_to_table(srvoptions) "
|
|
+ "ORDER BY option_name"
|
|
+ "), E',\n ') AS srvoptions "
|
|
+ "FROM pg_foreign_server",
|
|
+ username_subquery);
|
|
+
|
|
+ res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
|
|
+
|
|
+ ntups = PQntuples(res);
|
|
+ *numForeignServers = ntups;
|
|
+
|
|
+ srvinfo = (ForeignServerInfo *) pg_malloc(ntups * sizeof(ForeignServerInfo));
|
|
+
|
|
+ i_tableoid = PQfnumber(res, "tableoid");
|
|
+ i_oid = PQfnumber(res, "oid");
|
|
+ i_srvname = PQfnumber(res, "srvname");
|
|
+ i_rolname = PQfnumber(res, "rolname");
|
|
+ i_srvfdw = PQfnumber(res, "srvfdw");
|
|
+ i_srvtype = PQfnumber(res, "srvtype");
|
|
+ i_srvversion = PQfnumber(res, "srvversion");
|
|
+ i_srvacl = PQfnumber(res, "srvacl");
|
|
+ i_srvoptions = PQfnumber(res, "srvoptions");
|
|
+
|
|
+ for (i = 0; i < ntups; i++)
|
|
+ {
|
|
+ srvinfo[i].dobj.objType = DO_FOREIGN_SERVER;
|
|
+ srvinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
|
|
+ srvinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
|
|
+ AssignDumpId(&srvinfo[i].dobj);
|
|
+ srvinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_srvname));
|
|
+ srvinfo[i].dobj.namespace = NULL;
|
|
+ srvinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
|
|
+ srvinfo[i].srvfdw = atooid(PQgetvalue(res, i, i_srvfdw));
|
|
+ srvinfo[i].srvtype = pg_strdup(PQgetvalue(res, i, i_srvtype));
|
|
+ srvinfo[i].srvversion = pg_strdup(PQgetvalue(res, i, i_srvversion));
|
|
+ srvinfo[i].srvoptions = pg_strdup(PQgetvalue(res, i, i_srvoptions));
|
|
+ srvinfo[i].srvacl = pg_strdup(PQgetvalue(res, i, i_srvacl));
|
|
+
|
|
+ /* Decide whether we want to dump it */
|
|
+ selectDumpableObject(&(srvinfo[i].dobj));
|
|
+ }
|
|
+
|
|
+ PQclear(res);
|
|
+
|
|
+ destroyPQExpBuffer(query);
|
|
+
|
|
+ return srvinfo;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * getDefaultACLs:
|
|
+ * read all default ACL information in the system catalogs and return
|
|
+ * them in the DefaultACLInfo structure
|
|
+ *
|
|
+ * numDefaultACLs is set to the number of ACLs read in
|
|
+ */
|
|
+DefaultACLInfo *
|
|
+getDefaultACLs(Archive *fout, int *numDefaultACLs)
|
|
+{
|
|
+ DefaultACLInfo *daclinfo;
|
|
+ PQExpBuffer query;
|
|
+ PGresult *res;
|
|
+ int i_oid;
|
|
+ int i_tableoid;
|
|
+ int i_defaclrole;
|
|
+ int i_defaclnamespace;
|
|
+ int i_defaclobjtype;
|
|
+ int i_defaclacl;
|
|
+ int i,
|
|
+ ntups;
|
|
+
|
|
+ if (fout->remoteVersion < 90000)
|
|
+ {
|
|
+ *numDefaultACLs = 0;
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ query = createPQExpBuffer();
|
|
+
|
|
+ /* Make sure we are in proper schema */
|
|
+ selectSourceSchema(fout, "pg_catalog");
|
|
+
|
|
+ appendPQExpBuffer(query, "SELECT oid, tableoid, "
|
|
+ "(%s defaclrole) AS defaclrole, "
|
|
+ "defaclnamespace, "
|
|
+ "defaclobjtype, "
|
|
+ "defaclacl "
|
|
+ "FROM pg_default_acl",
|
|
+ username_subquery);
|
|
+
|
|
+ res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
|
|
+
|
|
+ ntups = PQntuples(res);
|
|
+ *numDefaultACLs = ntups;
|
|
+
|
|
+ daclinfo = (DefaultACLInfo *) pg_malloc(ntups * sizeof(DefaultACLInfo));
|
|
+
|
|
+ i_oid = PQfnumber(res, "oid");
|
|
+ i_tableoid = PQfnumber(res, "tableoid");
|
|
+ i_defaclrole = PQfnumber(res, "defaclrole");
|
|
+ i_defaclnamespace = PQfnumber(res, "defaclnamespace");
|
|
+ i_defaclobjtype = PQfnumber(res, "defaclobjtype");
|
|
+ i_defaclacl = PQfnumber(res, "defaclacl");
|
|
+
|
|
+ for (i = 0; i < ntups; i++)
|
|
+ {
|
|
+ Oid nspid = atooid(PQgetvalue(res, i, i_defaclnamespace));
|
|
+
|
|
+ daclinfo[i].dobj.objType = DO_DEFAULT_ACL;
|
|
+ daclinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
|
|
+ daclinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
|
|
+ AssignDumpId(&daclinfo[i].dobj);
|
|
+ /* cheesy ... is it worth coming up with a better object name? */
|
|
+ daclinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_defaclobjtype));
|
|
+
|
|
+ if (nspid != InvalidOid)
|
|
+ daclinfo[i].dobj.namespace = findNamespace(fout, nspid,
|
|
+ daclinfo[i].dobj.catId.oid);
|
|
+ else
|
|
+ daclinfo[i].dobj.namespace = NULL;
|
|
+
|
|
+ daclinfo[i].defaclrole = pg_strdup(PQgetvalue(res, i, i_defaclrole));
|
|
+ daclinfo[i].defaclobjtype = *(PQgetvalue(res, i, i_defaclobjtype));
|
|
+ daclinfo[i].defaclacl = pg_strdup(PQgetvalue(res, i, i_defaclacl));
|
|
+
|
|
+ /* Decide whether we want to dump it */
|
|
+ selectDumpableDefaultACL(&(daclinfo[i]));
|
|
+ }
|
|
+
|
|
+ PQclear(res);
|
|
+
|
|
+ destroyPQExpBuffer(query);
|
|
+
|
|
+ return daclinfo;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * dumpComment --
|
|
+ *
|
|
+ * This routine is used to dump any comments associated with the
|
|
+ * object handed to this routine. The routine takes a constant character
|
|
+ * string for the target part of the comment-creation command, plus
|
|
+ * the namespace and owner of the object (for labeling the ArchiveEntry),
|
|
+ * plus catalog ID and subid which are the lookup key for pg_description,
|
|
+ * plus the dump ID for the object (for setting a dependency).
|
|
+ * If a matching pg_description entry is found, it is dumped.
|
|
+ *
|
|
+ * Note: although this routine takes a dumpId for dependency purposes,
|
|
+ * that purpose is just to mark the dependency in the emitted dump file
|
|
+ * for possible future use by pg_restore. We do NOT use it for determining
|
|
+ * ordering of the comment in the dump file, because this routine is called
|
|
+ * after dependency sorting occurs. This routine should be called just after
|
|
+ * calling ArchiveEntry() for the specified object.
|
|
+ */
|
|
+static void
|
|
+dumpComment(Archive *fout, const char *target,
|
|
+ const char *namespace, const char *owner,
|
|
+ CatalogId catalogId, int subid, DumpId dumpId)
|
|
+{
|
|
+ CommentItem *comments;
|
|
+ int ncomments;
|
|
+
|
|
+ /* Comments are schema not data ... except blob comments are data */
|
|
+ if (strncmp(target, "LARGE OBJECT ", 13) != 0)
|
|
+ {
|
|
+ if (dataOnly)
|
|
+ return;
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ if (schemaOnly)
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ /* Search for comments associated with catalogId, using table */
|
|
+ ncomments = findComments(fout, catalogId.tableoid, catalogId.oid,
|
|
+ &comments);
|
|
+
|
|
+ /* Is there one matching the subid? */
|
|
+ while (ncomments > 0)
|
|
+ {
|
|
+ if (comments->objsubid == subid)
|
|
+ break;
|
|
+ comments++;
|
|
+ ncomments--;
|
|
+ }
|
|
+
|
|
+ /* If a comment exists, build COMMENT ON statement */
|
|
+ if (ncomments > 0)
|
|
+ {
|
|
+ PQExpBuffer query = createPQExpBuffer();
|
|
+
|
|
+ appendPQExpBuffer(query, "COMMENT ON %s IS ", target);
|
|
+ appendStringLiteralAH(query, comments->descr, fout);
|
|
+ appendPQExpBufferStr(query, ";\n");
|
|
+
|
|
+ /*
|
|
+ * We mark comments as SECTION_NONE because they really belong in the
|
|
+ * same section as their parent, whether that is pre-data or
|
|
+ * post-data.
|
|
+ */
|
|
+ ArchiveEntry(fout, nilCatalogId, createDumpId(),
|
|
+ target, namespace, NULL, owner,
|
|
+ false, "COMMENT", SECTION_NONE,
|
|
+ query->data, "", NULL,
|
|
+ &(dumpId), 1,
|
|
+ NULL, NULL);
|
|
+
|
|
+ destroyPQExpBuffer(query);
|
|
+ }
|
|
+}
|
|
+
|
|
+/*
|
|
+ * dumpTableComment --
|
|
+ *
|
|
+ * As above, but dump comments for both the specified table (or view)
|
|
+ * and its columns.
|
|
+ */
|
|
+static void
|
|
+dumpTableComment(Archive *fout, TableInfo *tbinfo,
|
|
+ const char *reltypename)
|
|
+{
|
|
+ CommentItem *comments;
|
|
+ int ncomments;
|
|
+ PQExpBuffer query;
|
|
+ PQExpBuffer target;
|
|
+
|
|
+ /* Comments are SCHEMA not data */
|
|
+ if (dataOnly)
|
|
+ return;
|
|
+
|
|
+ /* Search for comments associated with relation, using table */
|
|
+ ncomments = findComments(fout,
|
|
+ tbinfo->dobj.catId.tableoid,
|
|
+ tbinfo->dobj.catId.oid,
|
|
+ &comments);
|
|
+
|
|
+ /* If comments exist, build COMMENT ON statements */
|
|
+ if (ncomments <= 0)
|
|
+ return;
|
|
+
|
|
+ query = createPQExpBuffer();
|
|
+ target = createPQExpBuffer();
|
|
+
|
|
+ while (ncomments > 0)
|
|
+ {
|
|
+ const char *descr = comments->descr;
|
|
+ int objsubid = comments->objsubid;
|
|
+
|
|
+ if (objsubid == 0)
|
|
+ {
|
|
+ resetPQExpBuffer(target);
|
|
+ appendPQExpBuffer(target, "%s %s", reltypename,
|
|
+ fmtId(tbinfo->dobj.name));
|
|
+
|
|
+ resetPQExpBuffer(query);
|
|
+ appendPQExpBuffer(query, "COMMENT ON %s IS ", target->data);
|
|
+ appendStringLiteralAH(query, descr, fout);
|
|
+ appendPQExpBufferStr(query, ";\n");
|
|
+
|
|
+ ArchiveEntry(fout, nilCatalogId, createDumpId(),
|
|
+ target->data,
|
|
+ tbinfo->dobj.namespace->dobj.name,
|
|
+ NULL, tbinfo->rolname,
|
|
+ false, "COMMENT", SECTION_NONE,
|
|
+ query->data, "", NULL,
|
|
+ &(tbinfo->dobj.dumpId), 1,
|
|
+ NULL, NULL);
|
|
+ }
|
|
+ else if (objsubid > 0 && objsubid <= tbinfo->numatts)
|
|
+ {
|
|
+ resetPQExpBuffer(target);
|
|
+ appendPQExpBuffer(target, "COLUMN %s.",
|
|
+ fmtId(tbinfo->dobj.name));
|
|
+ appendPQExpBufferStr(target, fmtId(tbinfo->attnames[objsubid - 1]));
|
|
+
|
|
+ resetPQExpBuffer(query);
|
|
+ appendPQExpBuffer(query, "COMMENT ON %s IS ", target->data);
|
|
+ appendStringLiteralAH(query, descr, fout);
|
|
+ appendPQExpBufferStr(query, ";\n");
|
|
+
|
|
+ ArchiveEntry(fout, nilCatalogId, createDumpId(),
|
|
+ target->data,
|
|
+ tbinfo->dobj.namespace->dobj.name,
|
|
+ NULL, tbinfo->rolname,
|
|
+ false, "COMMENT", SECTION_NONE,
|
|
+ query->data, "", NULL,
|
|
+ &(tbinfo->dobj.dumpId), 1,
|
|
+ NULL, NULL);
|
|
+ }
|
|
+
|
|
+ comments++;
|
|
+ ncomments--;
|
|
+ }
|
|
+
|
|
+ destroyPQExpBuffer(query);
|
|
+ destroyPQExpBuffer(target);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * findComments --
|
|
+ *
|
|
+ * Find the comment(s), if any, associated with the given object. All the
|
|
+ * objsubid values associated with the given classoid/objoid are found with
|
|
+ * one search.
|
|
+ */
|
|
+static int
|
|
+findComments(Archive *fout, Oid classoid, Oid objoid,
|
|
+ CommentItem **items)
|
|
+{
|
|
+ /* static storage for table of comments */
|
|
+ static CommentItem *comments = NULL;
|
|
+ static int ncomments = -1;
|
|
+
|
|
+ CommentItem *middle = NULL;
|
|
+ CommentItem *low;
|
|
+ CommentItem *high;
|
|
+ int nmatch;
|
|
+
|
|
+ /* Get comments if we didn't already */
|
|
+ if (ncomments < 0)
|
|
+ ncomments = collectComments(fout, &comments);
|
|
+
|
|
+ /*
|
|
+ * Pre-7.2, pg_description does not contain classoid, so collectComments
|
|
+ * just stores a zero. If there's a collision on object OID, well, you
|
|
+ * get duplicate comments.
|
|
+ */
|
|
+ if (fout->remoteVersion < 70200)
|
|
+ classoid = 0;
|
|
+
|
|
+ /*
|
|
+ * Do binary search to find some item matching the object.
|
|
+ */
|
|
+ low = &comments[0];
|
|
+ high = &comments[ncomments - 1];
|
|
+ while (low <= high)
|
|
+ {
|
|
+ middle = low + (high - low) / 2;
|
|
+
|
|
+ if (classoid < middle->classoid)
|
|
+ high = middle - 1;
|
|
+ else if (classoid > middle->classoid)
|
|
+ low = middle + 1;
|
|
+ else if (objoid < middle->objoid)
|
|
+ high = middle - 1;
|
|
+ else if (objoid > middle->objoid)
|
|
+ low = middle + 1;
|
|
+ else
|
|
+ break; /* found a match */
|
|
+ }
|
|
+
|
|
+ if (low > high) /* no matches */
|
|
+ {
|
|
+ *items = NULL;
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Now determine how many items match the object. The search loop
|
|
+ * invariant still holds: only items between low and high inclusive could
|
|
+ * match.
|
|
+ */
|
|
+ nmatch = 1;
|
|
+ while (middle > low)
|
|
+ {
|
|
+ if (classoid != middle[-1].classoid ||
|
|
+ objoid != middle[-1].objoid)
|
|
+ break;
|
|
+ middle--;
|
|
+ nmatch++;
|
|
+ }
|
|
+
|
|
+ *items = middle;
|
|
+
|
|
+ middle += nmatch;
|
|
+ while (middle <= high)
|
|
+ {
|
|
+ if (classoid != middle->classoid ||
|
|
+ objoid != middle->objoid)
|
|
+ break;
|
|
+ middle++;
|
|
+ nmatch++;
|
|
+ }
|
|
+
|
|
+ return nmatch;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * collectComments --
|
|
+ *
|
|
+ * Construct a table of all comments available for database objects.
|
|
+ * We used to do per-object queries for the comments, but it's much faster
|
|
+ * to pull them all over at once, and on most databases the memory cost
|
|
+ * isn't high.
|
|
+ *
|
|
+ * The table is sorted by classoid/objid/objsubid for speed in lookup.
|
|
+ */
|
|
+static int
|
|
+collectComments(Archive *fout, CommentItem **items)
|
|
+{
|
|
+ PGresult *res;
|
|
+ PQExpBuffer query;
|
|
+ int i_description;
|
|
+ int i_classoid;
|
|
+ int i_objoid;
|
|
+ int i_objsubid;
|
|
+ int ntups;
|
|
+ int i;
|
|
+ CommentItem *comments;
|
|
+
|
|
+ /*
|
|
+ * Note we do NOT change source schema here; preserve the caller's
|
|
+ * setting, instead.
|
|
+ */
|
|
+
|
|
+ query = createPQExpBuffer();
|
|
+
|
|
+ if (fout->remoteVersion >= 70300)
|
|
+ {
|
|
+ appendPQExpBufferStr(query, "SELECT description, classoid, objoid, objsubid "
|
|
+ "FROM pg_catalog.pg_description "
|
|
+ "ORDER BY classoid, objoid, objsubid");
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 70200)
|
|
+ {
|
|
+ appendPQExpBufferStr(query, "SELECT description, classoid, objoid, objsubid "
|
|
+ "FROM pg_description "
|
|
+ "ORDER BY classoid, objoid, objsubid");
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ /* Note: this will fail to find attribute comments in pre-7.2... */
|
|
+ appendPQExpBufferStr(query, "SELECT description, 0 AS classoid, objoid, 0 AS objsubid "
|
|
+ "FROM pg_description "
|
|
+ "ORDER BY objoid");
|
|
+ }
|
|
+
|
|
+ res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
|
|
+
|
|
+ /* Construct lookup table containing OIDs in numeric form */
|
|
+
|
|
+ i_description = PQfnumber(res, "description");
|
|
+ i_classoid = PQfnumber(res, "classoid");
|
|
+ i_objoid = PQfnumber(res, "objoid");
|
|
+ i_objsubid = PQfnumber(res, "objsubid");
|
|
+
|
|
+ ntups = PQntuples(res);
|
|
+
|
|
+ comments = (CommentItem *) pg_malloc(ntups * sizeof(CommentItem));
|
|
+
|
|
+ for (i = 0; i < ntups; i++)
|
|
+ {
|
|
+ comments[i].descr = PQgetvalue(res, i, i_description);
|
|
+ comments[i].classoid = atooid(PQgetvalue(res, i, i_classoid));
|
|
+ comments[i].objoid = atooid(PQgetvalue(res, i, i_objoid));
|
|
+ comments[i].objsubid = atoi(PQgetvalue(res, i, i_objsubid));
|
|
+ }
|
|
+
|
|
+ /* Do NOT free the PGresult since we are keeping pointers into it */
|
|
+ destroyPQExpBuffer(query);
|
|
+
|
|
+ *items = comments;
|
|
+ return ntups;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * dumpDumpableObject
|
|
+ *
|
|
+ * This routine and its subsidiaries are responsible for creating
|
|
+ * ArchiveEntries (TOC objects) for each object to be dumped.
|
|
+ */
|
|
+static void
|
|
+dumpDumpableObject(Archive *fout, DumpableObject *dobj)
|
|
+{
|
|
+ switch (dobj->objType)
|
|
+ {
|
|
+ case DO_NAMESPACE:
|
|
+ dumpNamespace(fout, (NamespaceInfo *) dobj);
|
|
+ break;
|
|
+ case DO_EXTENSION:
|
|
+ dumpExtension(fout, (ExtensionInfo *) dobj);
|
|
+ break;
|
|
+ case DO_TYPE:
|
|
+ dumpType(fout, (TypeInfo *) dobj);
|
|
+ break;
|
|
+ case DO_SHELL_TYPE:
|
|
+ dumpShellType(fout, (ShellTypeInfo *) dobj);
|
|
+ break;
|
|
+ case DO_FUNC:
|
|
+ dumpFunc(fout, (FuncInfo *) dobj);
|
|
+ break;
|
|
+ case DO_AGG:
|
|
+ dumpAgg(fout, (AggInfo *) dobj);
|
|
+ break;
|
|
+ case DO_OPERATOR:
|
|
+ dumpOpr(fout, (OprInfo *) dobj);
|
|
+ break;
|
|
+ case DO_OPCLASS:
|
|
+ dumpOpclass(fout, (OpclassInfo *) dobj);
|
|
+ break;
|
|
+ case DO_OPFAMILY:
|
|
+ dumpOpfamily(fout, (OpfamilyInfo *) dobj);
|
|
+ break;
|
|
+ case DO_COLLATION:
|
|
+ dumpCollation(fout, (CollInfo *) dobj);
|
|
+ break;
|
|
+ case DO_CONVERSION:
|
|
+ dumpConversion(fout, (ConvInfo *) dobj);
|
|
+ break;
|
|
+ case DO_TABLE:
|
|
+ dumpTable(fout, (TableInfo *) dobj);
|
|
+ break;
|
|
+ case DO_ATTRDEF:
|
|
+ dumpAttrDef(fout, (AttrDefInfo *) dobj);
|
|
+ break;
|
|
+ case DO_INDEX:
|
|
+ dumpIndex(fout, (IndxInfo *) dobj);
|
|
+ break;
|
|
+ case DO_REFRESH_MATVIEW:
|
|
+ refreshMatViewData(fout, (TableDataInfo *) dobj);
|
|
+ break;
|
|
+ case DO_RULE:
|
|
+ dumpRule(fout, (RuleInfo *) dobj);
|
|
+ break;
|
|
+ case DO_TRIGGER:
|
|
+ dumpTrigger(fout, (TriggerInfo *) dobj);
|
|
+ break;
|
|
+ case DO_EVENT_TRIGGER:
|
|
+ dumpEventTrigger(fout, (EventTriggerInfo *) dobj);
|
|
+ break;
|
|
+ case DO_CONSTRAINT:
|
|
+ dumpConstraint(fout, (ConstraintInfo *) dobj);
|
|
+ break;
|
|
+ case DO_FK_CONSTRAINT:
|
|
+ dumpConstraint(fout, (ConstraintInfo *) dobj);
|
|
+ break;
|
|
+ case DO_PROCLANG:
|
|
+ dumpProcLang(fout, (ProcLangInfo *) dobj);
|
|
+ break;
|
|
+ case DO_CAST:
|
|
+ dumpCast(fout, (CastInfo *) dobj);
|
|
+ break;
|
|
+ case DO_TABLE_DATA:
|
|
+ if (((TableDataInfo *) dobj)->tdtable->relkind == RELKIND_SEQUENCE)
|
|
+ dumpSequenceData(fout, (TableDataInfo *) dobj);
|
|
+ else
|
|
+ dumpTableData(fout, (TableDataInfo *) dobj);
|
|
+ break;
|
|
+ case DO_DUMMY_TYPE:
|
|
+ /* table rowtypes and array types are never dumped separately */
|
|
+ break;
|
|
+ case DO_TSPARSER:
|
|
+ dumpTSParser(fout, (TSParserInfo *) dobj);
|
|
+ break;
|
|
+ case DO_TSDICT:
|
|
+ dumpTSDictionary(fout, (TSDictInfo *) dobj);
|
|
+ break;
|
|
+ case DO_TSTEMPLATE:
|
|
+ dumpTSTemplate(fout, (TSTemplateInfo *) dobj);
|
|
+ break;
|
|
+ case DO_TSCONFIG:
|
|
+ dumpTSConfig(fout, (TSConfigInfo *) dobj);
|
|
+ break;
|
|
+ case DO_FDW:
|
|
+ dumpForeignDataWrapper(fout, (FdwInfo *) dobj);
|
|
+ break;
|
|
+ case DO_FOREIGN_SERVER:
|
|
+ dumpForeignServer(fout, (ForeignServerInfo *) dobj);
|
|
+ break;
|
|
+ case DO_DEFAULT_ACL:
|
|
+ dumpDefaultACL(fout, (DefaultACLInfo *) dobj);
|
|
+ break;
|
|
+ case DO_BLOB:
|
|
+ dumpBlob(fout, (BlobInfo *) dobj);
|
|
+ break;
|
|
+ case DO_BLOB_DATA:
|
|
+ ArchiveEntry(fout, dobj->catId, dobj->dumpId,
|
|
+ dobj->name, NULL, NULL, "",
|
|
+ false, "BLOBS", SECTION_DATA,
|
|
+ "", "", NULL,
|
|
+ NULL, 0,
|
|
+ dumpBlobs, NULL);
|
|
+ break;
|
|
+ case DO_PRE_DATA_BOUNDARY:
|
|
+ case DO_POST_DATA_BOUNDARY:
|
|
+ /* never dumped, nothing to do */
|
|
+ break;
|
|
+ }
|
|
+}
|
|
+
|
|
+/*
|
|
+ * dumpNamespace
|
|
+ * writes out to fout the queries to recreate a user-defined namespace
|
|
+ */
|
|
+static void
|
|
+dumpNamespace(Archive *fout, NamespaceInfo *nspinfo)
|
|
+{
|
|
+ PQExpBuffer q;
|
|
+ PQExpBuffer delq;
|
|
+ PQExpBuffer labelq;
|
|
+ char *qnspname;
|
|
+
|
|
+ /* Skip if not to be dumped */
|
|
+ if (!nspinfo->dobj.dump || dataOnly)
|
|
+ return;
|
|
+
|
|
+ /* don't dump dummy namespace from pre-7.3 source */
|
|
+ if (strlen(nspinfo->dobj.name) == 0)
|
|
+ return;
|
|
+
|
|
+ q = createPQExpBuffer();
|
|
+ delq = createPQExpBuffer();
|
|
+ labelq = createPQExpBuffer();
|
|
+
|
|
+ qnspname = pg_strdup(fmtId(nspinfo->dobj.name));
|
|
+
|
|
+ appendPQExpBuffer(delq, "DROP SCHEMA %s;\n", qnspname);
|
|
+
|
|
+ appendPQExpBuffer(q, "CREATE SCHEMA %s;\n", qnspname);
|
|
+
|
|
+ appendPQExpBuffer(labelq, "SCHEMA %s", qnspname);
|
|
+
|
|
+ if (binary_upgrade)
|
|
+ binary_upgrade_extension_member(q, &nspinfo->dobj, labelq->data);
|
|
+
|
|
+ ArchiveEntry(fout, nspinfo->dobj.catId, nspinfo->dobj.dumpId,
|
|
+ nspinfo->dobj.name,
|
|
+ NULL, NULL,
|
|
+ nspinfo->rolname,
|
|
+ false, "SCHEMA", SECTION_PRE_DATA,
|
|
+ q->data, delq->data, NULL,
|
|
+ NULL, 0,
|
|
+ NULL, NULL);
|
|
+
|
|
+ /* Dump Schema Comments and Security Labels */
|
|
+ dumpComment(fout, labelq->data,
|
|
+ NULL, nspinfo->rolname,
|
|
+ nspinfo->dobj.catId, 0, nspinfo->dobj.dumpId);
|
|
+ dumpSecLabel(fout, labelq->data,
|
|
+ NULL, nspinfo->rolname,
|
|
+ nspinfo->dobj.catId, 0, nspinfo->dobj.dumpId);
|
|
+
|
|
+ dumpACL(fout, nspinfo->dobj.catId, nspinfo->dobj.dumpId, "SCHEMA",
|
|
+ qnspname, NULL, nspinfo->dobj.name, NULL,
|
|
+ nspinfo->rolname, nspinfo->nspacl);
|
|
+
|
|
+ free(qnspname);
|
|
+
|
|
+ destroyPQExpBuffer(q);
|
|
+ destroyPQExpBuffer(delq);
|
|
+ destroyPQExpBuffer(labelq);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * dumpExtension
|
|
+ * writes out to fout the queries to recreate an extension
|
|
+ */
|
|
+static void
|
|
+dumpExtension(Archive *fout, ExtensionInfo *extinfo)
|
|
+{
|
|
+ PQExpBuffer q;
|
|
+ PQExpBuffer delq;
|
|
+ PQExpBuffer labelq;
|
|
+ char *qextname;
|
|
+
|
|
+ /* Skip if not to be dumped */
|
|
+ if (!extinfo->dobj.dump || dataOnly)
|
|
+ return;
|
|
+
|
|
+ q = createPQExpBuffer();
|
|
+ delq = createPQExpBuffer();
|
|
+ labelq = createPQExpBuffer();
|
|
+
|
|
+ qextname = pg_strdup(fmtId(extinfo->dobj.name));
|
|
+
|
|
+ appendPQExpBuffer(delq, "DROP EXTENSION %s;\n", qextname);
|
|
+
|
|
+ if (!binary_upgrade)
|
|
+ {
|
|
+ /*
|
|
+ * In a regular dump, we use IF NOT EXISTS so that there isn't a
|
|
+ * problem if the extension already exists in the target database;
|
|
+ * this is essential for installed-by-default extensions such as
|
|
+ * plpgsql.
|
|
+ *
|
|
+ * In binary-upgrade mode, that doesn't work well, so instead we skip
|
|
+ * built-in extensions based on their OIDs; see
|
|
+ * selectDumpableExtension.
|
|
+ */
|
|
+ appendPQExpBuffer(q, "CREATE EXTENSION IF NOT EXISTS %s WITH SCHEMA %s;\n",
|
|
+ qextname, fmtId(extinfo->namespace));
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ int i;
|
|
+ int n;
|
|
+
|
|
+ appendPQExpBufferStr(q, "-- For binary upgrade, create an empty extension and insert objects into it\n");
|
|
+
|
|
+ /*
|
|
+ * We unconditionally create the extension, so we must drop it if it
|
|
+ * exists. This could happen if the user deleted 'plpgsql' and then
|
|
+ * readded it, causing its oid to be greater than FirstNormalObjectId.
|
|
+ * The FirstNormalObjectId test was kept to avoid repeatedly dropping
|
|
+ * and recreating extensions like 'plpgsql'.
|
|
+ */
|
|
+ appendPQExpBuffer(q, "DROP EXTENSION IF EXISTS %s;\n", qextname);
|
|
+
|
|
+ appendPQExpBufferStr(q,
|
|
+ "SELECT binary_upgrade.create_empty_extension(");
|
|
+ appendStringLiteralAH(q, extinfo->dobj.name, fout);
|
|
+ appendPQExpBufferStr(q, ", ");
|
|
+ appendStringLiteralAH(q, extinfo->namespace, fout);
|
|
+ appendPQExpBufferStr(q, ", ");
|
|
+ appendPQExpBuffer(q, "%s, ", extinfo->relocatable ? "true" : "false");
|
|
+ appendStringLiteralAH(q, extinfo->extversion, fout);
|
|
+ appendPQExpBufferStr(q, ", ");
|
|
+
|
|
+ /*
|
|
+ * Note that we're pushing extconfig (an OID array) back into
|
|
+ * pg_extension exactly as-is. This is OK because pg_class OIDs are
|
|
+ * preserved in binary upgrade.
|
|
+ */
|
|
+ if (strlen(extinfo->extconfig) > 2)
|
|
+ appendStringLiteralAH(q, extinfo->extconfig, fout);
|
|
+ else
|
|
+ appendPQExpBufferStr(q, "NULL");
|
|
+ appendPQExpBufferStr(q, ", ");
|
|
+ if (strlen(extinfo->extcondition) > 2)
|
|
+ appendStringLiteralAH(q, extinfo->extcondition, fout);
|
|
+ else
|
|
+ appendPQExpBufferStr(q, "NULL");
|
|
+ appendPQExpBufferStr(q, ", ");
|
|
+ appendPQExpBufferStr(q, "ARRAY[");
|
|
+ n = 0;
|
|
+ for (i = 0; i < extinfo->dobj.nDeps; i++)
|
|
+ {
|
|
+ DumpableObject *extobj;
|
|
+
|
|
+ extobj = findObjectByDumpId(extinfo->dobj.dependencies[i]);
|
|
+ if (extobj && extobj->objType == DO_EXTENSION)
|
|
+ {
|
|
+ if (n++ > 0)
|
|
+ appendPQExpBufferChar(q, ',');
|
|
+ appendStringLiteralAH(q, extobj->name, fout);
|
|
+ }
|
|
+ }
|
|
+ appendPQExpBufferStr(q, "]::pg_catalog.text[]");
|
|
+ appendPQExpBufferStr(q, ");\n");
|
|
+ }
|
|
+
|
|
+ appendPQExpBuffer(labelq, "EXTENSION %s", qextname);
|
|
+
|
|
+ ArchiveEntry(fout, extinfo->dobj.catId, extinfo->dobj.dumpId,
|
|
+ extinfo->dobj.name,
|
|
+ NULL, NULL,
|
|
+ "",
|
|
+ false, "EXTENSION", SECTION_PRE_DATA,
|
|
+ q->data, delq->data, NULL,
|
|
+ NULL, 0,
|
|
+ NULL, NULL);
|
|
+
|
|
+ /* Dump Extension Comments and Security Labels */
|
|
+ dumpComment(fout, labelq->data,
|
|
+ NULL, "",
|
|
+ extinfo->dobj.catId, 0, extinfo->dobj.dumpId);
|
|
+ dumpSecLabel(fout, labelq->data,
|
|
+ NULL, "",
|
|
+ extinfo->dobj.catId, 0, extinfo->dobj.dumpId);
|
|
+
|
|
+ free(qextname);
|
|
+
|
|
+ destroyPQExpBuffer(q);
|
|
+ destroyPQExpBuffer(delq);
|
|
+ destroyPQExpBuffer(labelq);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * dumpType
|
|
+ * writes out to fout the queries to recreate a user-defined type
|
|
+ */
|
|
+static void
|
|
+dumpType(Archive *fout, TypeInfo *tyinfo)
|
|
+{
|
|
+ /* Skip if not to be dumped */
|
|
+ if (!tyinfo->dobj.dump || dataOnly)
|
|
+ return;
|
|
+
|
|
+ /* Dump out in proper style */
|
|
+ if (tyinfo->typtype == TYPTYPE_BASE)
|
|
+ dumpBaseType(fout, tyinfo);
|
|
+ else if (tyinfo->typtype == TYPTYPE_DOMAIN)
|
|
+ dumpDomain(fout, tyinfo);
|
|
+ else if (tyinfo->typtype == TYPTYPE_COMPOSITE)
|
|
+ dumpCompositeType(fout, tyinfo);
|
|
+ else if (tyinfo->typtype == TYPTYPE_ENUM)
|
|
+ dumpEnumType(fout, tyinfo);
|
|
+ else if (tyinfo->typtype == TYPTYPE_RANGE)
|
|
+ dumpRangeType(fout, tyinfo);
|
|
+ else if (tyinfo->typtype == TYPTYPE_PSEUDO && !tyinfo->isDefined)
|
|
+ dumpUndefinedType(fout, tyinfo);
|
|
+ else
|
|
+ write_msg(NULL, "WARNING: typtype of data type \"%s\" appears to be invalid\n",
|
|
+ tyinfo->dobj.name);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * dumpEnumType
|
|
+ * writes out to fout the queries to recreate a user-defined enum type
|
|
+ */
|
|
+static void
|
|
+dumpEnumType(Archive *fout, TypeInfo *tyinfo)
|
|
+{
|
|
+ PQExpBuffer q = createPQExpBuffer();
|
|
+ PQExpBuffer delq = createPQExpBuffer();
|
|
+ PQExpBuffer labelq = createPQExpBuffer();
|
|
+ PQExpBuffer query = createPQExpBuffer();
|
|
+ PGresult *res;
|
|
+ int num,
|
|
+ i;
|
|
+ Oid enum_oid;
|
|
+ char *qtypname;
|
|
+ char *label;
|
|
+
|
|
+ /* Set proper schema search path */
|
|
+ selectSourceSchema(fout, "pg_catalog");
|
|
+
|
|
+ if (fout->remoteVersion >= 90100)
|
|
+ appendPQExpBuffer(query, "SELECT oid, enumlabel "
|
|
+ "FROM pg_catalog.pg_enum "
|
|
+ "WHERE enumtypid = '%u'"
|
|
+ "ORDER BY enumsortorder",
|
|
+ tyinfo->dobj.catId.oid);
|
|
+ else
|
|
+ appendPQExpBuffer(query, "SELECT oid, enumlabel "
|
|
+ "FROM pg_catalog.pg_enum "
|
|
+ "WHERE enumtypid = '%u'"
|
|
+ "ORDER BY oid",
|
|
+ tyinfo->dobj.catId.oid);
|
|
+
|
|
+ res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
|
|
+
|
|
+ num = PQntuples(res);
|
|
+
|
|
+ qtypname = pg_strdup(fmtId(tyinfo->dobj.name));
|
|
+
|
|
+ /*
|
|
+ * DROP must be fully qualified in case same name appears in pg_catalog.
|
|
+ * CASCADE shouldn't be required here as for normal types since the I/O
|
|
+ * functions are generic and do not get dropped.
|
|
+ */
|
|
+ appendPQExpBuffer(delq, "DROP TYPE %s.",
|
|
+ fmtId(tyinfo->dobj.namespace->dobj.name));
|
|
+ appendPQExpBuffer(delq, "%s;\n",
|
|
+ qtypname);
|
|
+
|
|
+ if (binary_upgrade)
|
|
+ binary_upgrade_set_type_oids_by_type_oid(fout, q,
|
|
+ tyinfo->dobj.catId.oid);
|
|
+
|
|
+ appendPQExpBuffer(q, "CREATE TYPE %s AS ENUM (",
|
|
+ qtypname);
|
|
+
|
|
+ if (!binary_upgrade)
|
|
+ {
|
|
+ /* Labels with server-assigned oids */
|
|
+ for (i = 0; i < num; i++)
|
|
+ {
|
|
+ label = PQgetvalue(res, i, PQfnumber(res, "enumlabel"));
|
|
+ if (i > 0)
|
|
+ appendPQExpBufferChar(q, ',');
|
|
+ appendPQExpBufferStr(q, "\n ");
|
|
+ appendStringLiteralAH(q, label, fout);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ appendPQExpBufferStr(q, "\n);\n");
|
|
+
|
|
+ if (binary_upgrade)
|
|
+ {
|
|
+ /* Labels with dump-assigned (preserved) oids */
|
|
+ for (i = 0; i < num; i++)
|
|
+ {
|
|
+ enum_oid = atooid(PQgetvalue(res, i, PQfnumber(res, "oid")));
|
|
+ label = PQgetvalue(res, i, PQfnumber(res, "enumlabel"));
|
|
+
|
|
+ if (i == 0)
|
|
+ appendPQExpBufferStr(q, "\n-- For binary upgrade, must preserve pg_enum oids\n");
|
|
+ appendPQExpBuffer(q,
|
|
+ "SELECT binary_upgrade.set_next_pg_enum_oid('%u'::pg_catalog.oid);\n",
|
|
+ enum_oid);
|
|
+ appendPQExpBuffer(q, "ALTER TYPE %s.",
|
|
+ fmtId(tyinfo->dobj.namespace->dobj.name));
|
|
+ appendPQExpBuffer(q, "%s ADD VALUE ",
|
|
+ qtypname);
|
|
+ appendStringLiteralAH(q, label, fout);
|
|
+ appendPQExpBufferStr(q, ";\n\n");
|
|
+ }
|
|
+ }
|
|
+
|
|
+ appendPQExpBuffer(labelq, "TYPE %s", qtypname);
|
|
+
|
|
+ if (binary_upgrade)
|
|
+ binary_upgrade_extension_member(q, &tyinfo->dobj, labelq->data);
|
|
+
|
|
+ ArchiveEntry(fout, tyinfo->dobj.catId, tyinfo->dobj.dumpId,
|
|
+ tyinfo->dobj.name,
|
|
+ tyinfo->dobj.namespace->dobj.name,
|
|
+ NULL,
|
|
+ tyinfo->rolname, false,
|
|
+ "TYPE", SECTION_PRE_DATA,
|
|
+ q->data, delq->data, NULL,
|
|
+ NULL, 0,
|
|
+ NULL, NULL);
|
|
+
|
|
+ /* Dump Type Comments and Security Labels */
|
|
+ dumpComment(fout, labelq->data,
|
|
+ tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
|
|
+ tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
|
|
+ dumpSecLabel(fout, labelq->data,
|
|
+ tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
|
|
+ tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
|
|
+
|
|
+ dumpACL(fout, tyinfo->dobj.catId, tyinfo->dobj.dumpId, "TYPE",
|
|
+ qtypname, NULL, tyinfo->dobj.name,
|
|
+ tyinfo->dobj.namespace->dobj.name,
|
|
+ tyinfo->rolname, tyinfo->typacl);
|
|
+
|
|
+ PQclear(res);
|
|
+ destroyPQExpBuffer(q);
|
|
+ destroyPQExpBuffer(delq);
|
|
+ destroyPQExpBuffer(labelq);
|
|
+ destroyPQExpBuffer(query);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * dumpRangeType
|
|
+ * writes out to fout the queries to recreate a user-defined range type
|
|
+ */
|
|
+static void
|
|
+dumpRangeType(Archive *fout, TypeInfo *tyinfo)
|
|
+{
|
|
+ PQExpBuffer q = createPQExpBuffer();
|
|
+ PQExpBuffer delq = createPQExpBuffer();
|
|
+ PQExpBuffer labelq = createPQExpBuffer();
|
|
+ PQExpBuffer query = createPQExpBuffer();
|
|
+ PGresult *res;
|
|
+ Oid collationOid;
|
|
+ char *qtypname;
|
|
+ char *procname;
|
|
+
|
|
+ /*
|
|
+ * select appropriate schema to ensure names in CREATE are properly
|
|
+ * qualified
|
|
+ */
|
|
+ selectSourceSchema(fout, tyinfo->dobj.namespace->dobj.name);
|
|
+
|
|
+ appendPQExpBuffer(query,
|
|
+ "SELECT pg_catalog.format_type(rngsubtype, NULL) AS rngsubtype, "
|
|
+ "opc.opcname AS opcname, "
|
|
+ "(SELECT nspname FROM pg_catalog.pg_namespace nsp "
|
|
+ " WHERE nsp.oid = opc.opcnamespace) AS opcnsp, "
|
|
+ "opc.opcdefault, "
|
|
+ "CASE WHEN rngcollation = st.typcollation THEN 0 "
|
|
+ " ELSE rngcollation END AS collation, "
|
|
+ "rngcanonical, rngsubdiff "
|
|
+ "FROM pg_catalog.pg_range r, pg_catalog.pg_type st, "
|
|
+ " pg_catalog.pg_opclass opc "
|
|
+ "WHERE st.oid = rngsubtype AND opc.oid = rngsubopc AND "
|
|
+ "rngtypid = '%u'",
|
|
+ tyinfo->dobj.catId.oid);
|
|
+
|
|
+ res = ExecuteSqlQueryForSingleRow(fout, query->data);
|
|
+
|
|
+ qtypname = pg_strdup(fmtId(tyinfo->dobj.name));
|
|
+
|
|
+ /*
|
|
+ * DROP must be fully qualified in case same name appears in pg_catalog.
|
|
+ * CASCADE shouldn't be required here as for normal types since the I/O
|
|
+ * functions are generic and do not get dropped.
|
|
+ */
|
|
+ appendPQExpBuffer(delq, "DROP TYPE %s.",
|
|
+ fmtId(tyinfo->dobj.namespace->dobj.name));
|
|
+ appendPQExpBuffer(delq, "%s;\n",
|
|
+ qtypname);
|
|
+
|
|
+ if (binary_upgrade)
|
|
+ binary_upgrade_set_type_oids_by_type_oid(fout,
|
|
+ q, tyinfo->dobj.catId.oid);
|
|
+
|
|
+ appendPQExpBuffer(q, "CREATE TYPE %s AS RANGE (",
|
|
+ qtypname);
|
|
+
|
|
+ appendPQExpBuffer(q, "\n subtype = %s",
|
|
+ PQgetvalue(res, 0, PQfnumber(res, "rngsubtype")));
|
|
+
|
|
+ /* print subtype_opclass only if not default for subtype */
|
|
+ if (PQgetvalue(res, 0, PQfnumber(res, "opcdefault"))[0] != 't')
|
|
+ {
|
|
+ char *opcname = PQgetvalue(res, 0, PQfnumber(res, "opcname"));
|
|
+ char *nspname = PQgetvalue(res, 0, PQfnumber(res, "opcnsp"));
|
|
+
|
|
+ /* always schema-qualify, don't try to be smart */
|
|
+ appendPQExpBuffer(q, ",\n subtype_opclass = %s.",
|
|
+ fmtId(nspname));
|
|
+ appendPQExpBufferStr(q, fmtId(opcname));
|
|
+ }
|
|
+
|
|
+ collationOid = atooid(PQgetvalue(res, 0, PQfnumber(res, "collation")));
|
|
+ if (OidIsValid(collationOid))
|
|
+ {
|
|
+ CollInfo *coll = findCollationByOid(collationOid);
|
|
+
|
|
+ if (coll)
|
|
+ {
|
|
+ /* always schema-qualify, don't try to be smart */
|
|
+ appendPQExpBuffer(q, ",\n collation = %s.",
|
|
+ fmtId(coll->dobj.namespace->dobj.name));
|
|
+ appendPQExpBufferStr(q, fmtId(coll->dobj.name));
|
|
+ }
|
|
+ }
|
|
+
|
|
+ procname = PQgetvalue(res, 0, PQfnumber(res, "rngcanonical"));
|
|
+ if (strcmp(procname, "-") != 0)
|
|
+ appendPQExpBuffer(q, ",\n canonical = %s", procname);
|
|
+
|
|
+ procname = PQgetvalue(res, 0, PQfnumber(res, "rngsubdiff"));
|
|
+ if (strcmp(procname, "-") != 0)
|
|
+ appendPQExpBuffer(q, ",\n subtype_diff = %s", procname);
|
|
+
|
|
+ appendPQExpBufferStr(q, "\n);\n");
|
|
+
|
|
+ appendPQExpBuffer(labelq, "TYPE %s", qtypname);
|
|
+
|
|
+ if (binary_upgrade)
|
|
+ binary_upgrade_extension_member(q, &tyinfo->dobj, labelq->data);
|
|
+
|
|
+ ArchiveEntry(fout, tyinfo->dobj.catId, tyinfo->dobj.dumpId,
|
|
+ tyinfo->dobj.name,
|
|
+ tyinfo->dobj.namespace->dobj.name,
|
|
+ NULL,
|
|
+ tyinfo->rolname, false,
|
|
+ "TYPE", SECTION_PRE_DATA,
|
|
+ q->data, delq->data, NULL,
|
|
+ NULL, 0,
|
|
+ NULL, NULL);
|
|
+
|
|
+ /* Dump Type Comments and Security Labels */
|
|
+ dumpComment(fout, labelq->data,
|
|
+ tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
|
|
+ tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
|
|
+ dumpSecLabel(fout, labelq->data,
|
|
+ tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
|
|
+ tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
|
|
+
|
|
+ dumpACL(fout, tyinfo->dobj.catId, tyinfo->dobj.dumpId, "TYPE",
|
|
+ qtypname, NULL, tyinfo->dobj.name,
|
|
+ tyinfo->dobj.namespace->dobj.name,
|
|
+ tyinfo->rolname, tyinfo->typacl);
|
|
+
|
|
+ PQclear(res);
|
|
+ destroyPQExpBuffer(q);
|
|
+ destroyPQExpBuffer(delq);
|
|
+ destroyPQExpBuffer(labelq);
|
|
+ destroyPQExpBuffer(query);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * dumpUndefinedType
|
|
+ * writes out to fout the queries to recreate a !typisdefined type
|
|
+ *
|
|
+ * This is a shell type, but we use different terminology to distinguish
|
|
+ * this case from where we have to emit a shell type definition to break
|
|
+ * circular dependencies. An undefined type shouldn't ever have anything
|
|
+ * depending on it.
|
|
+ */
|
|
+static void
|
|
+dumpUndefinedType(Archive *fout, TypeInfo *tyinfo)
|
|
+{
|
|
+ PQExpBuffer q = createPQExpBuffer();
|
|
+ PQExpBuffer delq = createPQExpBuffer();
|
|
+ PQExpBuffer labelq = createPQExpBuffer();
|
|
+ char *qtypname;
|
|
+
|
|
+ qtypname = pg_strdup(fmtId(tyinfo->dobj.name));
|
|
+
|
|
+ /*
|
|
+ * DROP must be fully qualified in case same name appears in pg_catalog.
|
|
+ */
|
|
+ appendPQExpBuffer(delq, "DROP TYPE %s.",
|
|
+ fmtId(tyinfo->dobj.namespace->dobj.name));
|
|
+ appendPQExpBuffer(delq, "%s;\n",
|
|
+ qtypname);
|
|
+
|
|
+ if (binary_upgrade)
|
|
+ binary_upgrade_set_type_oids_by_type_oid(fout,
|
|
+ q, tyinfo->dobj.catId.oid);
|
|
+
|
|
+ appendPQExpBuffer(q, "CREATE TYPE %s;\n",
|
|
+ qtypname);
|
|
+
|
|
+ appendPQExpBuffer(labelq, "TYPE %s", qtypname);
|
|
+
|
|
+ if (binary_upgrade)
|
|
+ binary_upgrade_extension_member(q, &tyinfo->dobj, labelq->data);
|
|
+
|
|
+ ArchiveEntry(fout, tyinfo->dobj.catId, tyinfo->dobj.dumpId,
|
|
+ tyinfo->dobj.name,
|
|
+ tyinfo->dobj.namespace->dobj.name,
|
|
+ NULL,
|
|
+ tyinfo->rolname, false,
|
|
+ "TYPE", SECTION_PRE_DATA,
|
|
+ q->data, delq->data, NULL,
|
|
+ NULL, 0,
|
|
+ NULL, NULL);
|
|
+
|
|
+ /* Dump Type Comments and Security Labels */
|
|
+ dumpComment(fout, labelq->data,
|
|
+ tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
|
|
+ tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
|
|
+ dumpSecLabel(fout, labelq->data,
|
|
+ tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
|
|
+ tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
|
|
+
|
|
+ dumpACL(fout, tyinfo->dobj.catId, tyinfo->dobj.dumpId, "TYPE",
|
|
+ qtypname, NULL, tyinfo->dobj.name,
|
|
+ tyinfo->dobj.namespace->dobj.name,
|
|
+ tyinfo->rolname, tyinfo->typacl);
|
|
+
|
|
+ destroyPQExpBuffer(q);
|
|
+ destroyPQExpBuffer(delq);
|
|
+ destroyPQExpBuffer(labelq);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * dumpBaseType
|
|
+ * writes out to fout the queries to recreate a user-defined base type
|
|
+ */
|
|
+static void
|
|
+dumpBaseType(Archive *fout, TypeInfo *tyinfo)
|
|
+{
|
|
+ PQExpBuffer q = createPQExpBuffer();
|
|
+ PQExpBuffer delq = createPQExpBuffer();
|
|
+ PQExpBuffer labelq = createPQExpBuffer();
|
|
+ PQExpBuffer query = createPQExpBuffer();
|
|
+ PGresult *res;
|
|
+ char *qtypname;
|
|
+ char *typlen;
|
|
+ char *typinput;
|
|
+ char *typoutput;
|
|
+ char *typreceive;
|
|
+ char *typsend;
|
|
+ char *typmodin;
|
|
+ char *typmodout;
|
|
+ char *typanalyze;
|
|
+ Oid typreceiveoid;
|
|
+ Oid typsendoid;
|
|
+ Oid typmodinoid;
|
|
+ Oid typmodoutoid;
|
|
+ Oid typanalyzeoid;
|
|
+ char *typcategory;
|
|
+ char *typispreferred;
|
|
+ char *typdelim;
|
|
+ char *typbyval;
|
|
+ char *typalign;
|
|
+ char *typstorage;
|
|
+ char *typcollatable;
|
|
+ char *typdefault;
|
|
+ bool typdefault_is_literal = false;
|
|
+
|
|
+ /* Set proper schema search path so regproc references list correctly */
|
|
+ selectSourceSchema(fout, tyinfo->dobj.namespace->dobj.name);
|
|
+
|
|
+ /* Fetch type-specific details */
|
|
+ if (fout->remoteVersion >= 90100)
|
|
+ {
|
|
+ appendPQExpBuffer(query, "SELECT typlen, "
|
|
+ "typinput, typoutput, typreceive, typsend, "
|
|
+ "typmodin, typmodout, typanalyze, "
|
|
+ "typreceive::pg_catalog.oid AS typreceiveoid, "
|
|
+ "typsend::pg_catalog.oid AS typsendoid, "
|
|
+ "typmodin::pg_catalog.oid AS typmodinoid, "
|
|
+ "typmodout::pg_catalog.oid AS typmodoutoid, "
|
|
+ "typanalyze::pg_catalog.oid AS typanalyzeoid, "
|
|
+ "typcategory, typispreferred, "
|
|
+ "typdelim, typbyval, typalign, typstorage, "
|
|
+ "(typcollation <> 0) AS typcollatable, "
|
|
+ "pg_catalog.pg_get_expr(typdefaultbin, 0) AS typdefaultbin, typdefault "
|
|
+ "FROM pg_catalog.pg_type "
|
|
+ "WHERE oid = '%u'::pg_catalog.oid",
|
|
+ tyinfo->dobj.catId.oid);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 80400)
|
|
+ {
|
|
+ appendPQExpBuffer(query, "SELECT typlen, "
|
|
+ "typinput, typoutput, typreceive, typsend, "
|
|
+ "typmodin, typmodout, typanalyze, "
|
|
+ "typreceive::pg_catalog.oid AS typreceiveoid, "
|
|
+ "typsend::pg_catalog.oid AS typsendoid, "
|
|
+ "typmodin::pg_catalog.oid AS typmodinoid, "
|
|
+ "typmodout::pg_catalog.oid AS typmodoutoid, "
|
|
+ "typanalyze::pg_catalog.oid AS typanalyzeoid, "
|
|
+ "typcategory, typispreferred, "
|
|
+ "typdelim, typbyval, typalign, typstorage, "
|
|
+ "false AS typcollatable, "
|
|
+ "pg_catalog.pg_get_expr(typdefaultbin, 0) AS typdefaultbin, typdefault "
|
|
+ "FROM pg_catalog.pg_type "
|
|
+ "WHERE oid = '%u'::pg_catalog.oid",
|
|
+ tyinfo->dobj.catId.oid);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 80300)
|
|
+ {
|
|
+ /* Before 8.4, pg_get_expr does not allow 0 for its second arg */
|
|
+ appendPQExpBuffer(query, "SELECT typlen, "
|
|
+ "typinput, typoutput, typreceive, typsend, "
|
|
+ "typmodin, typmodout, typanalyze, "
|
|
+ "typreceive::pg_catalog.oid AS typreceiveoid, "
|
|
+ "typsend::pg_catalog.oid AS typsendoid, "
|
|
+ "typmodin::pg_catalog.oid AS typmodinoid, "
|
|
+ "typmodout::pg_catalog.oid AS typmodoutoid, "
|
|
+ "typanalyze::pg_catalog.oid AS typanalyzeoid, "
|
|
+ "'U' AS typcategory, false AS typispreferred, "
|
|
+ "typdelim, typbyval, typalign, typstorage, "
|
|
+ "false AS typcollatable, "
|
|
+ "pg_catalog.pg_get_expr(typdefaultbin, 'pg_catalog.pg_type'::pg_catalog.regclass) AS typdefaultbin, typdefault "
|
|
+ "FROM pg_catalog.pg_type "
|
|
+ "WHERE oid = '%u'::pg_catalog.oid",
|
|
+ tyinfo->dobj.catId.oid);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 80000)
|
|
+ {
|
|
+ appendPQExpBuffer(query, "SELECT typlen, "
|
|
+ "typinput, typoutput, typreceive, typsend, "
|
|
+ "'-' AS typmodin, '-' AS typmodout, "
|
|
+ "typanalyze, "
|
|
+ "typreceive::pg_catalog.oid AS typreceiveoid, "
|
|
+ "typsend::pg_catalog.oid AS typsendoid, "
|
|
+ "0 AS typmodinoid, 0 AS typmodoutoid, "
|
|
+ "typanalyze::pg_catalog.oid AS typanalyzeoid, "
|
|
+ "'U' AS typcategory, false AS typispreferred, "
|
|
+ "typdelim, typbyval, typalign, typstorage, "
|
|
+ "false AS typcollatable, "
|
|
+ "pg_catalog.pg_get_expr(typdefaultbin, 'pg_catalog.pg_type'::pg_catalog.regclass) AS typdefaultbin, typdefault "
|
|
+ "FROM pg_catalog.pg_type "
|
|
+ "WHERE oid = '%u'::pg_catalog.oid",
|
|
+ tyinfo->dobj.catId.oid);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 70400)
|
|
+ {
|
|
+ appendPQExpBuffer(query, "SELECT typlen, "
|
|
+ "typinput, typoutput, typreceive, typsend, "
|
|
+ "'-' AS typmodin, '-' AS typmodout, "
|
|
+ "'-' AS typanalyze, "
|
|
+ "typreceive::pg_catalog.oid AS typreceiveoid, "
|
|
+ "typsend::pg_catalog.oid AS typsendoid, "
|
|
+ "0 AS typmodinoid, 0 AS typmodoutoid, "
|
|
+ "0 AS typanalyzeoid, "
|
|
+ "'U' AS typcategory, false AS typispreferred, "
|
|
+ "typdelim, typbyval, typalign, typstorage, "
|
|
+ "false AS typcollatable, "
|
|
+ "pg_catalog.pg_get_expr(typdefaultbin, 'pg_catalog.pg_type'::pg_catalog.regclass) AS typdefaultbin, typdefault "
|
|
+ "FROM pg_catalog.pg_type "
|
|
+ "WHERE oid = '%u'::pg_catalog.oid",
|
|
+ tyinfo->dobj.catId.oid);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 70300)
|
|
+ {
|
|
+ appendPQExpBuffer(query, "SELECT typlen, "
|
|
+ "typinput, typoutput, "
|
|
+ "'-' AS typreceive, '-' AS typsend, "
|
|
+ "'-' AS typmodin, '-' AS typmodout, "
|
|
+ "'-' AS typanalyze, "
|
|
+ "0 AS typreceiveoid, 0 AS typsendoid, "
|
|
+ "0 AS typmodinoid, 0 AS typmodoutoid, "
|
|
+ "0 AS typanalyzeoid, "
|
|
+ "'U' AS typcategory, false AS typispreferred, "
|
|
+ "typdelim, typbyval, typalign, typstorage, "
|
|
+ "false AS typcollatable, "
|
|
+ "pg_catalog.pg_get_expr(typdefaultbin, 'pg_catalog.pg_type'::pg_catalog.regclass) AS typdefaultbin, typdefault "
|
|
+ "FROM pg_catalog.pg_type "
|
|
+ "WHERE oid = '%u'::pg_catalog.oid",
|
|
+ tyinfo->dobj.catId.oid);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 70200)
|
|
+ {
|
|
+ /*
|
|
+ * Note: although pre-7.3 catalogs contain typreceive and typsend,
|
|
+ * ignore them because they are not right.
|
|
+ */
|
|
+ appendPQExpBuffer(query, "SELECT typlen, "
|
|
+ "typinput, typoutput, "
|
|
+ "'-' AS typreceive, '-' AS typsend, "
|
|
+ "'-' AS typmodin, '-' AS typmodout, "
|
|
+ "'-' AS typanalyze, "
|
|
+ "0 AS typreceiveoid, 0 AS typsendoid, "
|
|
+ "0 AS typmodinoid, 0 AS typmodoutoid, "
|
|
+ "0 AS typanalyzeoid, "
|
|
+ "'U' AS typcategory, false AS typispreferred, "
|
|
+ "typdelim, typbyval, typalign, typstorage, "
|
|
+ "false AS typcollatable, "
|
|
+ "NULL AS typdefaultbin, typdefault "
|
|
+ "FROM pg_type "
|
|
+ "WHERE oid = '%u'::oid",
|
|
+ tyinfo->dobj.catId.oid);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 70100)
|
|
+ {
|
|
+ /*
|
|
+ * Ignore pre-7.2 typdefault; the field exists but has an unusable
|
|
+ * representation.
|
|
+ */
|
|
+ appendPQExpBuffer(query, "SELECT typlen, "
|
|
+ "typinput, typoutput, "
|
|
+ "'-' AS typreceive, '-' AS typsend, "
|
|
+ "'-' AS typmodin, '-' AS typmodout, "
|
|
+ "'-' AS typanalyze, "
|
|
+ "0 AS typreceiveoid, 0 AS typsendoid, "
|
|
+ "0 AS typmodinoid, 0 AS typmodoutoid, "
|
|
+ "0 AS typanalyzeoid, "
|
|
+ "'U' AS typcategory, false AS typispreferred, "
|
|
+ "typdelim, typbyval, typalign, typstorage, "
|
|
+ "false AS typcollatable, "
|
|
+ "NULL AS typdefaultbin, NULL AS typdefault "
|
|
+ "FROM pg_type "
|
|
+ "WHERE oid = '%u'::oid",
|
|
+ tyinfo->dobj.catId.oid);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ appendPQExpBuffer(query, "SELECT typlen, "
|
|
+ "typinput, typoutput, "
|
|
+ "'-' AS typreceive, '-' AS typsend, "
|
|
+ "'-' AS typmodin, '-' AS typmodout, "
|
|
+ "'-' AS typanalyze, "
|
|
+ "0 AS typreceiveoid, 0 AS typsendoid, "
|
|
+ "0 AS typmodinoid, 0 AS typmodoutoid, "
|
|
+ "0 AS typanalyzeoid, "
|
|
+ "'U' AS typcategory, false AS typispreferred, "
|
|
+ "typdelim, typbyval, typalign, "
|
|
+ "'p'::char AS typstorage, "
|
|
+ "false AS typcollatable, "
|
|
+ "NULL AS typdefaultbin, NULL AS typdefault "
|
|
+ "FROM pg_type "
|
|
+ "WHERE oid = '%u'::oid",
|
|
+ tyinfo->dobj.catId.oid);
|
|
+ }
|
|
+
|
|
+ res = ExecuteSqlQueryForSingleRow(fout, query->data);
|
|
+
|
|
+ typlen = PQgetvalue(res, 0, PQfnumber(res, "typlen"));
|
|
+ typinput = PQgetvalue(res, 0, PQfnumber(res, "typinput"));
|
|
+ typoutput = PQgetvalue(res, 0, PQfnumber(res, "typoutput"));
|
|
+ typreceive = PQgetvalue(res, 0, PQfnumber(res, "typreceive"));
|
|
+ typsend = PQgetvalue(res, 0, PQfnumber(res, "typsend"));
|
|
+ typmodin = PQgetvalue(res, 0, PQfnumber(res, "typmodin"));
|
|
+ typmodout = PQgetvalue(res, 0, PQfnumber(res, "typmodout"));
|
|
+ typanalyze = PQgetvalue(res, 0, PQfnumber(res, "typanalyze"));
|
|
+ typreceiveoid = atooid(PQgetvalue(res, 0, PQfnumber(res, "typreceiveoid")));
|
|
+ typsendoid = atooid(PQgetvalue(res, 0, PQfnumber(res, "typsendoid")));
|
|
+ typmodinoid = atooid(PQgetvalue(res, 0, PQfnumber(res, "typmodinoid")));
|
|
+ typmodoutoid = atooid(PQgetvalue(res, 0, PQfnumber(res, "typmodoutoid")));
|
|
+ typanalyzeoid = atooid(PQgetvalue(res, 0, PQfnumber(res, "typanalyzeoid")));
|
|
+ typcategory = PQgetvalue(res, 0, PQfnumber(res, "typcategory"));
|
|
+ typispreferred = PQgetvalue(res, 0, PQfnumber(res, "typispreferred"));
|
|
+ typdelim = PQgetvalue(res, 0, PQfnumber(res, "typdelim"));
|
|
+ typbyval = PQgetvalue(res, 0, PQfnumber(res, "typbyval"));
|
|
+ typalign = PQgetvalue(res, 0, PQfnumber(res, "typalign"));
|
|
+ typstorage = PQgetvalue(res, 0, PQfnumber(res, "typstorage"));
|
|
+ typcollatable = PQgetvalue(res, 0, PQfnumber(res, "typcollatable"));
|
|
+ if (!PQgetisnull(res, 0, PQfnumber(res, "typdefaultbin")))
|
|
+ typdefault = PQgetvalue(res, 0, PQfnumber(res, "typdefaultbin"));
|
|
+ else if (!PQgetisnull(res, 0, PQfnumber(res, "typdefault")))
|
|
+ {
|
|
+ typdefault = PQgetvalue(res, 0, PQfnumber(res, "typdefault"));
|
|
+ typdefault_is_literal = true; /* it needs quotes */
|
|
+ }
|
|
+ else
|
|
+ typdefault = NULL;
|
|
+
|
|
+ qtypname = pg_strdup(fmtId(tyinfo->dobj.name));
|
|
+
|
|
+ /*
|
|
+ * DROP must be fully qualified in case same name appears in pg_catalog.
|
|
+ * The reason we include CASCADE is that the circular dependency between
|
|
+ * the type and its I/O functions makes it impossible to drop the type any
|
|
+ * other way.
|
|
+ */
|
|
+ appendPQExpBuffer(delq, "DROP TYPE %s.",
|
|
+ fmtId(tyinfo->dobj.namespace->dobj.name));
|
|
+ appendPQExpBuffer(delq, "%s CASCADE;\n",
|
|
+ qtypname);
|
|
+
|
|
+ /* We might already have a shell type, but setting pg_type_oid is harmless */
|
|
+ if (binary_upgrade)
|
|
+ binary_upgrade_set_type_oids_by_type_oid(fout, q,
|
|
+ tyinfo->dobj.catId.oid);
|
|
+
|
|
+ appendPQExpBuffer(q,
|
|
+ "CREATE TYPE %s (\n"
|
|
+ " INTERNALLENGTH = %s",
|
|
+ qtypname,
|
|
+ (strcmp(typlen, "-1") == 0) ? "variable" : typlen);
|
|
+
|
|
+ if (fout->remoteVersion >= 70300)
|
|
+ {
|
|
+ /* regproc result is correctly quoted as of 7.3 */
|
|
+ appendPQExpBuffer(q, ",\n INPUT = %s", typinput);
|
|
+ appendPQExpBuffer(q, ",\n OUTPUT = %s", typoutput);
|
|
+ if (OidIsValid(typreceiveoid))
|
|
+ appendPQExpBuffer(q, ",\n RECEIVE = %s", typreceive);
|
|
+ if (OidIsValid(typsendoid))
|
|
+ appendPQExpBuffer(q, ",\n SEND = %s", typsend);
|
|
+ if (OidIsValid(typmodinoid))
|
|
+ appendPQExpBuffer(q, ",\n TYPMOD_IN = %s", typmodin);
|
|
+ if (OidIsValid(typmodoutoid))
|
|
+ appendPQExpBuffer(q, ",\n TYPMOD_OUT = %s", typmodout);
|
|
+ if (OidIsValid(typanalyzeoid))
|
|
+ appendPQExpBuffer(q, ",\n ANALYZE = %s", typanalyze);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ /* regproc delivers an unquoted name before 7.3 */
|
|
+ /* cannot combine these because fmtId uses static result area */
|
|
+ appendPQExpBuffer(q, ",\n INPUT = %s", fmtId(typinput));
|
|
+ appendPQExpBuffer(q, ",\n OUTPUT = %s", fmtId(typoutput));
|
|
+ /* receive/send/typmodin/typmodout/analyze need not be printed */
|
|
+ }
|
|
+
|
|
+ if (strcmp(typcollatable, "t") == 0)
|
|
+ appendPQExpBufferStr(q, ",\n COLLATABLE = true");
|
|
+
|
|
+ if (typdefault != NULL)
|
|
+ {
|
|
+ appendPQExpBufferStr(q, ",\n DEFAULT = ");
|
|
+ if (typdefault_is_literal)
|
|
+ appendStringLiteralAH(q, typdefault, fout);
|
|
+ else
|
|
+ appendPQExpBufferStr(q, typdefault);
|
|
+ }
|
|
+
|
|
+ if (OidIsValid(tyinfo->typelem))
|
|
+ {
|
|
+ char *elemType;
|
|
+
|
|
+ /* reselect schema in case changed by function dump */
|
|
+ selectSourceSchema(fout, tyinfo->dobj.namespace->dobj.name);
|
|
+ elemType = getFormattedTypeName(fout, tyinfo->typelem, zeroAsOpaque);
|
|
+ appendPQExpBuffer(q, ",\n ELEMENT = %s", elemType);
|
|
+ free(elemType);
|
|
+ }
|
|
+
|
|
+ if (strcmp(typcategory, "U") != 0)
|
|
+ {
|
|
+ appendPQExpBufferStr(q, ",\n CATEGORY = ");
|
|
+ appendStringLiteralAH(q, typcategory, fout);
|
|
+ }
|
|
+
|
|
+ if (strcmp(typispreferred, "t") == 0)
|
|
+ appendPQExpBufferStr(q, ",\n PREFERRED = true");
|
|
+
|
|
+ if (typdelim && strcmp(typdelim, ",") != 0)
|
|
+ {
|
|
+ appendPQExpBufferStr(q, ",\n DELIMITER = ");
|
|
+ appendStringLiteralAH(q, typdelim, fout);
|
|
+ }
|
|
+
|
|
+ if (strcmp(typalign, "c") == 0)
|
|
+ appendPQExpBufferStr(q, ",\n ALIGNMENT = char");
|
|
+ else if (strcmp(typalign, "s") == 0)
|
|
+ appendPQExpBufferStr(q, ",\n ALIGNMENT = int2");
|
|
+ else if (strcmp(typalign, "i") == 0)
|
|
+ appendPQExpBufferStr(q, ",\n ALIGNMENT = int4");
|
|
+ else if (strcmp(typalign, "d") == 0)
|
|
+ appendPQExpBufferStr(q, ",\n ALIGNMENT = double");
|
|
+
|
|
+ if (strcmp(typstorage, "p") == 0)
|
|
+ appendPQExpBufferStr(q, ",\n STORAGE = plain");
|
|
+ else if (strcmp(typstorage, "e") == 0)
|
|
+ appendPQExpBufferStr(q, ",\n STORAGE = external");
|
|
+ else if (strcmp(typstorage, "x") == 0)
|
|
+ appendPQExpBufferStr(q, ",\n STORAGE = extended");
|
|
+ else if (strcmp(typstorage, "m") == 0)
|
|
+ appendPQExpBufferStr(q, ",\n STORAGE = main");
|
|
+
|
|
+ if (strcmp(typbyval, "t") == 0)
|
|
+ appendPQExpBufferStr(q, ",\n PASSEDBYVALUE");
|
|
+
|
|
+ appendPQExpBufferStr(q, "\n);\n");
|
|
+
|
|
+ appendPQExpBuffer(labelq, "TYPE %s", qtypname);
|
|
+
|
|
+ if (binary_upgrade)
|
|
+ binary_upgrade_extension_member(q, &tyinfo->dobj, labelq->data);
|
|
+
|
|
+ ArchiveEntry(fout, tyinfo->dobj.catId, tyinfo->dobj.dumpId,
|
|
+ tyinfo->dobj.name,
|
|
+ tyinfo->dobj.namespace->dobj.name,
|
|
+ NULL,
|
|
+ tyinfo->rolname, false,
|
|
+ "TYPE", SECTION_PRE_DATA,
|
|
+ q->data, delq->data, NULL,
|
|
+ NULL, 0,
|
|
+ NULL, NULL);
|
|
+
|
|
+ /* Dump Type Comments and Security Labels */
|
|
+ dumpComment(fout, labelq->data,
|
|
+ tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
|
|
+ tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
|
|
+ dumpSecLabel(fout, labelq->data,
|
|
+ tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
|
|
+ tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
|
|
+
|
|
+ dumpACL(fout, tyinfo->dobj.catId, tyinfo->dobj.dumpId, "TYPE",
|
|
+ qtypname, NULL, tyinfo->dobj.name,
|
|
+ tyinfo->dobj.namespace->dobj.name,
|
|
+ tyinfo->rolname, tyinfo->typacl);
|
|
+
|
|
+ PQclear(res);
|
|
+ destroyPQExpBuffer(q);
|
|
+ destroyPQExpBuffer(delq);
|
|
+ destroyPQExpBuffer(labelq);
|
|
+ destroyPQExpBuffer(query);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * dumpDomain
|
|
+ * writes out to fout the queries to recreate a user-defined domain
|
|
+ */
|
|
+static void
|
|
+dumpDomain(Archive *fout, TypeInfo *tyinfo)
|
|
+{
|
|
+ PQExpBuffer q = createPQExpBuffer();
|
|
+ PQExpBuffer delq = createPQExpBuffer();
|
|
+ PQExpBuffer labelq = createPQExpBuffer();
|
|
+ PQExpBuffer query = createPQExpBuffer();
|
|
+ PGresult *res;
|
|
+ int i;
|
|
+ char *qtypname;
|
|
+ char *typnotnull;
|
|
+ char *typdefn;
|
|
+ char *typdefault;
|
|
+ Oid typcollation;
|
|
+ bool typdefault_is_literal = false;
|
|
+
|
|
+ /* Set proper schema search path so type references list correctly */
|
|
+ selectSourceSchema(fout, tyinfo->dobj.namespace->dobj.name);
|
|
+
|
|
+ /* Fetch domain specific details */
|
|
+ if (fout->remoteVersion >= 90100)
|
|
+ {
|
|
+ /* typcollation is new in 9.1 */
|
|
+ appendPQExpBuffer(query, "SELECT t.typnotnull, "
|
|
+ "pg_catalog.format_type(t.typbasetype, t.typtypmod) AS typdefn, "
|
|
+ "pg_catalog.pg_get_expr(t.typdefaultbin, 'pg_catalog.pg_type'::pg_catalog.regclass) AS typdefaultbin, "
|
|
+ "t.typdefault, "
|
|
+ "CASE WHEN t.typcollation <> u.typcollation "
|
|
+ "THEN t.typcollation ELSE 0 END AS typcollation "
|
|
+ "FROM pg_catalog.pg_type t "
|
|
+ "LEFT JOIN pg_catalog.pg_type u ON (t.typbasetype = u.oid) "
|
|
+ "WHERE t.oid = '%u'::pg_catalog.oid",
|
|
+ tyinfo->dobj.catId.oid);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ /* We assume here that remoteVersion must be at least 70300 */
|
|
+ appendPQExpBuffer(query, "SELECT typnotnull, "
|
|
+ "pg_catalog.format_type(typbasetype, typtypmod) AS typdefn, "
|
|
+ "pg_catalog.pg_get_expr(typdefaultbin, 'pg_catalog.pg_type'::pg_catalog.regclass) AS typdefaultbin, "
|
|
+ "typdefault, 0 AS typcollation "
|
|
+ "FROM pg_catalog.pg_type "
|
|
+ "WHERE oid = '%u'::pg_catalog.oid",
|
|
+ tyinfo->dobj.catId.oid);
|
|
+ }
|
|
+
|
|
+ res = ExecuteSqlQueryForSingleRow(fout, query->data);
|
|
+
|
|
+ typnotnull = PQgetvalue(res, 0, PQfnumber(res, "typnotnull"));
|
|
+ typdefn = PQgetvalue(res, 0, PQfnumber(res, "typdefn"));
|
|
+ if (!PQgetisnull(res, 0, PQfnumber(res, "typdefaultbin")))
|
|
+ typdefault = PQgetvalue(res, 0, PQfnumber(res, "typdefaultbin"));
|
|
+ else if (!PQgetisnull(res, 0, PQfnumber(res, "typdefault")))
|
|
+ {
|
|
+ typdefault = PQgetvalue(res, 0, PQfnumber(res, "typdefault"));
|
|
+ typdefault_is_literal = true; /* it needs quotes */
|
|
+ }
|
|
+ else
|
|
+ typdefault = NULL;
|
|
+ typcollation = atooid(PQgetvalue(res, 0, PQfnumber(res, "typcollation")));
|
|
+
|
|
+ if (binary_upgrade)
|
|
+ binary_upgrade_set_type_oids_by_type_oid(fout, q,
|
|
+ tyinfo->dobj.catId.oid);
|
|
+
|
|
+ qtypname = pg_strdup(fmtId(tyinfo->dobj.name));
|
|
+
|
|
+ appendPQExpBuffer(q,
|
|
+ "CREATE DOMAIN %s AS %s",
|
|
+ qtypname,
|
|
+ typdefn);
|
|
+
|
|
+ /* Print collation only if different from base type's collation */
|
|
+ if (OidIsValid(typcollation))
|
|
+ {
|
|
+ CollInfo *coll;
|
|
+
|
|
+ coll = findCollationByOid(typcollation);
|
|
+ if (coll)
|
|
+ {
|
|
+ /* always schema-qualify, don't try to be smart */
|
|
+ appendPQExpBuffer(q, " COLLATE %s.",
|
|
+ fmtId(coll->dobj.namespace->dobj.name));
|
|
+ appendPQExpBufferStr(q, fmtId(coll->dobj.name));
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (typnotnull[0] == 't')
|
|
+ appendPQExpBufferStr(q, " NOT NULL");
|
|
+
|
|
+ if (typdefault != NULL)
|
|
+ {
|
|
+ appendPQExpBufferStr(q, " DEFAULT ");
|
|
+ if (typdefault_is_literal)
|
|
+ appendStringLiteralAH(q, typdefault, fout);
|
|
+ else
|
|
+ appendPQExpBufferStr(q, typdefault);
|
|
+ }
|
|
+
|
|
+ PQclear(res);
|
|
+
|
|
+ /*
|
|
+ * Add any CHECK constraints for the domain
|
|
+ */
|
|
+ for (i = 0; i < tyinfo->nDomChecks; i++)
|
|
+ {
|
|
+ ConstraintInfo *domcheck = &(tyinfo->domChecks[i]);
|
|
+
|
|
+ if (!domcheck->separate)
|
|
+ appendPQExpBuffer(q, "\n\tCONSTRAINT %s %s",
|
|
+ fmtId(domcheck->dobj.name), domcheck->condef);
|
|
+ }
|
|
+
|
|
+ appendPQExpBufferStr(q, ";\n");
|
|
+
|
|
+ /*
|
|
+ * DROP must be fully qualified in case same name appears in pg_catalog
|
|
+ */
|
|
+ appendPQExpBuffer(delq, "DROP DOMAIN %s.",
|
|
+ fmtId(tyinfo->dobj.namespace->dobj.name));
|
|
+ appendPQExpBuffer(delq, "%s;\n",
|
|
+ qtypname);
|
|
+
|
|
+ appendPQExpBuffer(labelq, "DOMAIN %s", qtypname);
|
|
+
|
|
+ if (binary_upgrade)
|
|
+ binary_upgrade_extension_member(q, &tyinfo->dobj, labelq->data);
|
|
+
|
|
+ ArchiveEntry(fout, tyinfo->dobj.catId, tyinfo->dobj.dumpId,
|
|
+ tyinfo->dobj.name,
|
|
+ tyinfo->dobj.namespace->dobj.name,
|
|
+ NULL,
|
|
+ tyinfo->rolname, false,
|
|
+ "DOMAIN", SECTION_PRE_DATA,
|
|
+ q->data, delq->data, NULL,
|
|
+ NULL, 0,
|
|
+ NULL, NULL);
|
|
+
|
|
+ /* Dump Domain Comments and Security Labels */
|
|
+ dumpComment(fout, labelq->data,
|
|
+ tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
|
|
+ tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
|
|
+ dumpSecLabel(fout, labelq->data,
|
|
+ tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
|
|
+ tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
|
|
+
|
|
+ dumpACL(fout, tyinfo->dobj.catId, tyinfo->dobj.dumpId, "TYPE",
|
|
+ qtypname, NULL, tyinfo->dobj.name,
|
|
+ tyinfo->dobj.namespace->dobj.name,
|
|
+ tyinfo->rolname, tyinfo->typacl);
|
|
+
|
|
+ destroyPQExpBuffer(q);
|
|
+ destroyPQExpBuffer(delq);
|
|
+ destroyPQExpBuffer(labelq);
|
|
+ destroyPQExpBuffer(query);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * dumpCompositeType
|
|
+ * writes out to fout the queries to recreate a user-defined stand-alone
|
|
+ * composite type
|
|
+ */
|
|
+static void
|
|
+dumpCompositeType(Archive *fout, TypeInfo *tyinfo)
|
|
+{
|
|
+ PQExpBuffer q = createPQExpBuffer();
|
|
+ PQExpBuffer dropped = createPQExpBuffer();
|
|
+ PQExpBuffer delq = createPQExpBuffer();
|
|
+ PQExpBuffer labelq = createPQExpBuffer();
|
|
+ PQExpBuffer query = createPQExpBuffer();
|
|
+ PGresult *res;
|
|
+ char *qtypname;
|
|
+ int ntups;
|
|
+ int i_attname;
|
|
+ int i_atttypdefn;
|
|
+ int i_attlen;
|
|
+ int i_attalign;
|
|
+ int i_attisdropped;
|
|
+ int i_attcollation;
|
|
+ int i;
|
|
+ int actual_atts;
|
|
+
|
|
+ /* Set proper schema search path so type references list correctly */
|
|
+ selectSourceSchema(fout, tyinfo->dobj.namespace->dobj.name);
|
|
+
|
|
+ /* Fetch type specific details */
|
|
+ if (fout->remoteVersion >= 90100)
|
|
+ {
|
|
+ /*
|
|
+ * attcollation is new in 9.1. Since we only want to dump COLLATE
|
|
+ * clauses for attributes whose collation is different from their
|
|
+ * type's default, we use a CASE here to suppress uninteresting
|
|
+ * attcollations cheaply. atttypid will be 0 for dropped columns;
|
|
+ * collation does not matter for those.
|
|
+ */
|
|
+ appendPQExpBuffer(query, "SELECT a.attname, "
|
|
+ "pg_catalog.format_type(a.atttypid, a.atttypmod) AS atttypdefn, "
|
|
+ "a.attlen, a.attalign, a.attisdropped, "
|
|
+ "CASE WHEN a.attcollation <> at.typcollation "
|
|
+ "THEN a.attcollation ELSE 0 END AS attcollation "
|
|
+ "FROM pg_catalog.pg_type ct "
|
|
+ "JOIN pg_catalog.pg_attribute a ON a.attrelid = ct.typrelid "
|
|
+ "LEFT JOIN pg_catalog.pg_type at ON at.oid = a.atttypid "
|
|
+ "WHERE ct.oid = '%u'::pg_catalog.oid "
|
|
+ "ORDER BY a.attnum ",
|
|
+ tyinfo->dobj.catId.oid);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ /*
|
|
+ * We assume here that remoteVersion must be at least 70300. Since
|
|
+ * ALTER TYPE could not drop columns until 9.1, attisdropped should
|
|
+ * always be false.
|
|
+ */
|
|
+ appendPQExpBuffer(query, "SELECT a.attname, "
|
|
+ "pg_catalog.format_type(a.atttypid, a.atttypmod) AS atttypdefn, "
|
|
+ "a.attlen, a.attalign, a.attisdropped, "
|
|
+ "0 AS attcollation "
|
|
+ "FROM pg_catalog.pg_type ct, pg_catalog.pg_attribute a "
|
|
+ "WHERE ct.oid = '%u'::pg_catalog.oid "
|
|
+ "AND a.attrelid = ct.typrelid "
|
|
+ "ORDER BY a.attnum ",
|
|
+ tyinfo->dobj.catId.oid);
|
|
+ }
|
|
+
|
|
+ res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
|
|
+
|
|
+ ntups = PQntuples(res);
|
|
+
|
|
+ i_attname = PQfnumber(res, "attname");
|
|
+ i_atttypdefn = PQfnumber(res, "atttypdefn");
|
|
+ i_attlen = PQfnumber(res, "attlen");
|
|
+ i_attalign = PQfnumber(res, "attalign");
|
|
+ i_attisdropped = PQfnumber(res, "attisdropped");
|
|
+ i_attcollation = PQfnumber(res, "attcollation");
|
|
+
|
|
+ if (binary_upgrade)
|
|
+ {
|
|
+ binary_upgrade_set_type_oids_by_type_oid(fout, q,
|
|
+ tyinfo->dobj.catId.oid);
|
|
+ binary_upgrade_set_pg_class_oids(fout, q, tyinfo->typrelid, false);
|
|
+ }
|
|
+
|
|
+ qtypname = pg_strdup(fmtId(tyinfo->dobj.name));
|
|
+
|
|
+ appendPQExpBuffer(q, "CREATE TYPE %s AS (",
|
|
+ qtypname);
|
|
+
|
|
+ actual_atts = 0;
|
|
+ for (i = 0; i < ntups; i++)
|
|
+ {
|
|
+ char *attname;
|
|
+ char *atttypdefn;
|
|
+ char *attlen;
|
|
+ char *attalign;
|
|
+ bool attisdropped;
|
|
+ Oid attcollation;
|
|
+
|
|
+ attname = PQgetvalue(res, i, i_attname);
|
|
+ atttypdefn = PQgetvalue(res, i, i_atttypdefn);
|
|
+ attlen = PQgetvalue(res, i, i_attlen);
|
|
+ attalign = PQgetvalue(res, i, i_attalign);
|
|
+ attisdropped = (PQgetvalue(res, i, i_attisdropped)[0] == 't');
|
|
+ attcollation = atooid(PQgetvalue(res, i, i_attcollation));
|
|
+
|
|
+ if (attisdropped && !binary_upgrade)
|
|
+ continue;
|
|
+
|
|
+ /* Format properly if not first attr */
|
|
+ if (actual_atts++ > 0)
|
|
+ appendPQExpBufferChar(q, ',');
|
|
+ appendPQExpBufferStr(q, "\n\t");
|
|
+
|
|
+ if (!attisdropped)
|
|
+ {
|
|
+ appendPQExpBuffer(q, "%s %s", fmtId(attname), atttypdefn);
|
|
+
|
|
+ /* Add collation if not default for the column type */
|
|
+ if (OidIsValid(attcollation))
|
|
+ {
|
|
+ CollInfo *coll;
|
|
+
|
|
+ coll = findCollationByOid(attcollation);
|
|
+ if (coll)
|
|
+ {
|
|
+ /* always schema-qualify, don't try to be smart */
|
|
+ appendPQExpBuffer(q, " COLLATE %s.",
|
|
+ fmtId(coll->dobj.namespace->dobj.name));
|
|
+ appendPQExpBufferStr(q, fmtId(coll->dobj.name));
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ /*
|
|
+ * This is a dropped attribute and we're in binary_upgrade mode.
|
|
+ * Insert a placeholder for it in the CREATE TYPE command, and set
|
|
+ * length and alignment with direct UPDATE to the catalogs
|
|
+ * afterwards. See similar code in dumpTableSchema().
|
|
+ */
|
|
+ appendPQExpBuffer(q, "%s INTEGER /* dummy */", fmtId(attname));
|
|
+
|
|
+ /* stash separately for insertion after the CREATE TYPE */
|
|
+ appendPQExpBufferStr(dropped,
|
|
+ "\n-- For binary upgrade, recreate dropped column.\n");
|
|
+ appendPQExpBuffer(dropped, "UPDATE pg_catalog.pg_attribute\n"
|
|
+ "SET attlen = %s, "
|
|
+ "attalign = '%s', attbyval = false\n"
|
|
+ "WHERE attname = ", attlen, attalign);
|
|
+ appendStringLiteralAH(dropped, attname, fout);
|
|
+ appendPQExpBufferStr(dropped, "\n AND attrelid = ");
|
|
+ appendStringLiteralAH(dropped, qtypname, fout);
|
|
+ appendPQExpBufferStr(dropped, "::pg_catalog.regclass;\n");
|
|
+
|
|
+ appendPQExpBuffer(dropped, "ALTER TYPE %s ",
|
|
+ qtypname);
|
|
+ appendPQExpBuffer(dropped, "DROP ATTRIBUTE %s;\n",
|
|
+ fmtId(attname));
|
|
+ }
|
|
+ }
|
|
+ appendPQExpBufferStr(q, "\n);\n");
|
|
+ appendPQExpBufferStr(q, dropped->data);
|
|
+
|
|
+ /*
|
|
+ * DROP must be fully qualified in case same name appears in pg_catalog
|
|
+ */
|
|
+ appendPQExpBuffer(delq, "DROP TYPE %s.",
|
|
+ fmtId(tyinfo->dobj.namespace->dobj.name));
|
|
+ appendPQExpBuffer(delq, "%s;\n",
|
|
+ qtypname);
|
|
+
|
|
+ appendPQExpBuffer(labelq, "TYPE %s", qtypname);
|
|
+
|
|
+ if (binary_upgrade)
|
|
+ binary_upgrade_extension_member(q, &tyinfo->dobj, labelq->data);
|
|
+
|
|
+ ArchiveEntry(fout, tyinfo->dobj.catId, tyinfo->dobj.dumpId,
|
|
+ tyinfo->dobj.name,
|
|
+ tyinfo->dobj.namespace->dobj.name,
|
|
+ NULL,
|
|
+ tyinfo->rolname, false,
|
|
+ "TYPE", SECTION_PRE_DATA,
|
|
+ q->data, delq->data, NULL,
|
|
+ NULL, 0,
|
|
+ NULL, NULL);
|
|
+
|
|
+
|
|
+ /* Dump Type Comments and Security Labels */
|
|
+ dumpComment(fout, labelq->data,
|
|
+ tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
|
|
+ tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
|
|
+ dumpSecLabel(fout, labelq->data,
|
|
+ tyinfo->dobj.namespace->dobj.name, tyinfo->rolname,
|
|
+ tyinfo->dobj.catId, 0, tyinfo->dobj.dumpId);
|
|
+
|
|
+ dumpACL(fout, tyinfo->dobj.catId, tyinfo->dobj.dumpId, "TYPE",
|
|
+ qtypname, NULL, tyinfo->dobj.name,
|
|
+ tyinfo->dobj.namespace->dobj.name,
|
|
+ tyinfo->rolname, tyinfo->typacl);
|
|
+
|
|
+ PQclear(res);
|
|
+ destroyPQExpBuffer(q);
|
|
+ destroyPQExpBuffer(dropped);
|
|
+ destroyPQExpBuffer(delq);
|
|
+ destroyPQExpBuffer(labelq);
|
|
+ destroyPQExpBuffer(query);
|
|
+
|
|
+ /* Dump any per-column comments */
|
|
+ dumpCompositeTypeColComments(fout, tyinfo);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * dumpCompositeTypeColComments
|
|
+ * writes out to fout the queries to recreate comments on the columns of
|
|
+ * a user-defined stand-alone composite type
|
|
+ */
|
|
+static void
|
|
+dumpCompositeTypeColComments(Archive *fout, TypeInfo *tyinfo)
|
|
+{
|
|
+ CommentItem *comments;
|
|
+ int ncomments;
|
|
+ PGresult *res;
|
|
+ PQExpBuffer query;
|
|
+ PQExpBuffer target;
|
|
+ Oid pgClassOid;
|
|
+ int i;
|
|
+ int ntups;
|
|
+ int i_attname;
|
|
+ int i_attnum;
|
|
+
|
|
+ query = createPQExpBuffer();
|
|
+
|
|
+ /* We assume here that remoteVersion must be at least 70300 */
|
|
+ appendPQExpBuffer(query,
|
|
+ "SELECT c.tableoid, a.attname, a.attnum "
|
|
+ "FROM pg_catalog.pg_class c, pg_catalog.pg_attribute a "
|
|
+ "WHERE c.oid = '%u' AND c.oid = a.attrelid "
|
|
+ " AND NOT a.attisdropped "
|
|
+ "ORDER BY a.attnum ",
|
|
+ tyinfo->typrelid);
|
|
+
|
|
+ /* Fetch column attnames */
|
|
+ res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
|
|
+
|
|
+ ntups = PQntuples(res);
|
|
+ if (ntups < 1)
|
|
+ {
|
|
+ PQclear(res);
|
|
+ destroyPQExpBuffer(query);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ pgClassOid = atooid(PQgetvalue(res, 0, PQfnumber(res, "tableoid")));
|
|
+
|
|
+ /* Search for comments associated with type's pg_class OID */
|
|
+ ncomments = findComments(fout,
|
|
+ pgClassOid,
|
|
+ tyinfo->typrelid,
|
|
+ &comments);
|
|
+
|
|
+ /* If no comments exist, we're done */
|
|
+ if (ncomments <= 0)
|
|
+ {
|
|
+ PQclear(res);
|
|
+ destroyPQExpBuffer(query);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ /* Build COMMENT ON statements */
|
|
+ target = createPQExpBuffer();
|
|
+
|
|
+ i_attnum = PQfnumber(res, "attnum");
|
|
+ i_attname = PQfnumber(res, "attname");
|
|
+ while (ncomments > 0)
|
|
+ {
|
|
+ const char *attname;
|
|
+
|
|
+ attname = NULL;
|
|
+ for (i = 0; i < ntups; i++)
|
|
+ {
|
|
+ if (atoi(PQgetvalue(res, i, i_attnum)) == comments->objsubid)
|
|
+ {
|
|
+ attname = PQgetvalue(res, i, i_attname);
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ if (attname) /* just in case we don't find it */
|
|
+ {
|
|
+ const char *descr = comments->descr;
|
|
+
|
|
+ resetPQExpBuffer(target);
|
|
+ appendPQExpBuffer(target, "COLUMN %s.",
|
|
+ fmtId(tyinfo->dobj.name));
|
|
+ appendPQExpBufferStr(target, fmtId(attname));
|
|
+
|
|
+ resetPQExpBuffer(query);
|
|
+ appendPQExpBuffer(query, "COMMENT ON %s IS ", target->data);
|
|
+ appendStringLiteralAH(query, descr, fout);
|
|
+ appendPQExpBufferStr(query, ";\n");
|
|
+
|
|
+ ArchiveEntry(fout, nilCatalogId, createDumpId(),
|
|
+ target->data,
|
|
+ tyinfo->dobj.namespace->dobj.name,
|
|
+ NULL, tyinfo->rolname,
|
|
+ false, "COMMENT", SECTION_NONE,
|
|
+ query->data, "", NULL,
|
|
+ &(tyinfo->dobj.dumpId), 1,
|
|
+ NULL, NULL);
|
|
+ }
|
|
+
|
|
+ comments++;
|
|
+ ncomments--;
|
|
+ }
|
|
+
|
|
+ PQclear(res);
|
|
+ destroyPQExpBuffer(query);
|
|
+ destroyPQExpBuffer(target);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * dumpShellType
|
|
+ * writes out to fout the queries to create a shell type
|
|
+ *
|
|
+ * We dump a shell definition in advance of the I/O functions for the type.
|
|
+ */
|
|
+static void
|
|
+dumpShellType(Archive *fout, ShellTypeInfo *stinfo)
|
|
+{
|
|
+ PQExpBuffer q;
|
|
+
|
|
+ /* Skip if not to be dumped */
|
|
+ if (!stinfo->dobj.dump || dataOnly)
|
|
+ return;
|
|
+
|
|
+ q = createPQExpBuffer();
|
|
+
|
|
+ /*
|
|
+ * Note the lack of a DROP command for the shell type; any required DROP
|
|
+ * is driven off the base type entry, instead. This interacts with
|
|
+ * _printTocEntry()'s use of the presence of a DROP command to decide
|
|
+ * whether an entry needs an ALTER OWNER command. We don't want to alter
|
|
+ * the shell type's owner immediately on creation; that should happen only
|
|
+ * after it's filled in, otherwise the backend complains.
|
|
+ */
|
|
+
|
|
+ if (binary_upgrade)
|
|
+ binary_upgrade_set_type_oids_by_type_oid(fout, q,
|
|
+ stinfo->baseType->dobj.catId.oid);
|
|
+
|
|
+ appendPQExpBuffer(q, "CREATE TYPE %s;\n",
|
|
+ fmtId(stinfo->dobj.name));
|
|
+
|
|
+ ArchiveEntry(fout, stinfo->dobj.catId, stinfo->dobj.dumpId,
|
|
+ stinfo->dobj.name,
|
|
+ stinfo->dobj.namespace->dobj.name,
|
|
+ NULL,
|
|
+ stinfo->baseType->rolname, false,
|
|
+ "SHELL TYPE", SECTION_PRE_DATA,
|
|
+ q->data, "", NULL,
|
|
+ NULL, 0,
|
|
+ NULL, NULL);
|
|
+
|
|
+ destroyPQExpBuffer(q);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Determine whether we want to dump definitions for procedural languages.
|
|
+ * Since the languages themselves don't have schemas, we can't rely on
|
|
+ * the normal schema-based selection mechanism. We choose to dump them
|
|
+ * whenever neither --schema nor --table was given. (Before 8.1, we used
|
|
+ * the dump flag of the PL's call handler function, but in 8.1 this will
|
|
+ * probably always be false since call handlers are created in pg_catalog.)
|
|
+ *
|
|
+ * For some backwards compatibility with the older behavior, we forcibly
|
|
+ * dump a PL if its handler function (and validator if any) are in a
|
|
+ * dumpable namespace. That case is not checked here.
|
|
+ *
|
|
+ * Also, if the PL belongs to an extension, we do not use this heuristic.
|
|
+ * That case isn't checked here either.
|
|
+ */
|
|
+static bool
|
|
+shouldDumpProcLangs(void)
|
|
+{
|
|
+ if (!include_everything)
|
|
+ return false;
|
|
+ /* And they're schema not data */
|
|
+ if (dataOnly)
|
|
+ return false;
|
|
+ return true;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * dumpProcLang
|
|
+ * writes out to fout the queries to recreate a user-defined
|
|
+ * procedural language
|
|
+ */
|
|
+static void
|
|
+dumpProcLang(Archive *fout, ProcLangInfo *plang)
|
|
+{
|
|
+ PQExpBuffer defqry;
|
|
+ PQExpBuffer delqry;
|
|
+ PQExpBuffer labelq;
|
|
+ bool useParams;
|
|
+ char *qlanname;
|
|
+ char *lanschema;
|
|
+ FuncInfo *funcInfo;
|
|
+ FuncInfo *inlineInfo = NULL;
|
|
+ FuncInfo *validatorInfo = NULL;
|
|
+
|
|
+ /* Skip if not to be dumped */
|
|
+ if (!plang->dobj.dump || dataOnly)
|
|
+ return;
|
|
+
|
|
+ /*
|
|
+ * Try to find the support function(s). It is not an error if we don't
|
|
+ * find them --- if the functions are in the pg_catalog schema, as is
|
|
+ * standard in 8.1 and up, then we won't have loaded them. (In this case
|
|
+ * we will emit a parameterless CREATE LANGUAGE command, which will
|
|
+ * require PL template knowledge in the backend to reload.)
|
|
+ */
|
|
+
|
|
+ funcInfo = findFuncByOid(plang->lanplcallfoid);
|
|
+ if (funcInfo != NULL && !funcInfo->dobj.dump)
|
|
+ funcInfo = NULL; /* treat not-dumped same as not-found */
|
|
+
|
|
+ if (OidIsValid(plang->laninline))
|
|
+ {
|
|
+ inlineInfo = findFuncByOid(plang->laninline);
|
|
+ if (inlineInfo != NULL && !inlineInfo->dobj.dump)
|
|
+ inlineInfo = NULL;
|
|
+ }
|
|
+
|
|
+ if (OidIsValid(plang->lanvalidator))
|
|
+ {
|
|
+ validatorInfo = findFuncByOid(plang->lanvalidator);
|
|
+ if (validatorInfo != NULL && !validatorInfo->dobj.dump)
|
|
+ validatorInfo = NULL;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * If the functions are dumpable then emit a traditional CREATE LANGUAGE
|
|
+ * with parameters. Otherwise, dump only if shouldDumpProcLangs() says to
|
|
+ * dump it.
|
|
+ *
|
|
+ * However, for a language that belongs to an extension, we must not use
|
|
+ * the shouldDumpProcLangs heuristic, but just dump the language iff we're
|
|
+ * told to (via dobj.dump). Generally the support functions will belong
|
|
+ * to the same extension and so have the same dump flags ... if they
|
|
+ * don't, this might not work terribly nicely.
|
|
+ */
|
|
+ useParams = (funcInfo != NULL &&
|
|
+ (inlineInfo != NULL || !OidIsValid(plang->laninline)) &&
|
|
+ (validatorInfo != NULL || !OidIsValid(plang->lanvalidator)));
|
|
+
|
|
+ if (!plang->dobj.ext_member)
|
|
+ {
|
|
+ if (!useParams && !shouldDumpProcLangs())
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ defqry = createPQExpBuffer();
|
|
+ delqry = createPQExpBuffer();
|
|
+ labelq = createPQExpBuffer();
|
|
+
|
|
+ qlanname = pg_strdup(fmtId(plang->dobj.name));
|
|
+
|
|
+ /*
|
|
+ * If dumping a HANDLER clause, treat the language as being in the handler
|
|
+ * function's schema; this avoids cluttering the HANDLER clause. Otherwise
|
|
+ * it doesn't really have a schema.
|
|
+ */
|
|
+ if (useParams)
|
|
+ lanschema = funcInfo->dobj.namespace->dobj.name;
|
|
+ else
|
|
+ lanschema = NULL;
|
|
+
|
|
+ appendPQExpBuffer(delqry, "DROP PROCEDURAL LANGUAGE %s;\n",
|
|
+ qlanname);
|
|
+
|
|
+ if (useParams)
|
|
+ {
|
|
+ appendPQExpBuffer(defqry, "CREATE %sPROCEDURAL LANGUAGE %s",
|
|
+ plang->lanpltrusted ? "TRUSTED " : "",
|
|
+ qlanname);
|
|
+ appendPQExpBuffer(defqry, " HANDLER %s",
|
|
+ fmtId(funcInfo->dobj.name));
|
|
+ if (OidIsValid(plang->laninline))
|
|
+ {
|
|
+ appendPQExpBufferStr(defqry, " INLINE ");
|
|
+ /* Cope with possibility that inline is in different schema */
|
|
+ if (inlineInfo->dobj.namespace != funcInfo->dobj.namespace)
|
|
+ appendPQExpBuffer(defqry, "%s.",
|
|
+ fmtId(inlineInfo->dobj.namespace->dobj.name));
|
|
+ appendPQExpBufferStr(defqry, fmtId(inlineInfo->dobj.name));
|
|
+ }
|
|
+ if (OidIsValid(plang->lanvalidator))
|
|
+ {
|
|
+ appendPQExpBufferStr(defqry, " VALIDATOR ");
|
|
+ /* Cope with possibility that validator is in different schema */
|
|
+ if (validatorInfo->dobj.namespace != funcInfo->dobj.namespace)
|
|
+ appendPQExpBuffer(defqry, "%s.",
|
|
+ fmtId(validatorInfo->dobj.namespace->dobj.name));
|
|
+ appendPQExpBufferStr(defqry, fmtId(validatorInfo->dobj.name));
|
|
+ }
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ /*
|
|
+ * If not dumping parameters, then use CREATE OR REPLACE so that the
|
|
+ * command will not fail if the language is preinstalled in the target
|
|
+ * database. We restrict the use of REPLACE to this case so as to
|
|
+ * eliminate the risk of replacing a language with incompatible
|
|
+ * parameter settings: this command will only succeed at all if there
|
|
+ * is a pg_pltemplate entry, and if there is one, the existing entry
|
|
+ * must match it too.
|
|
+ */
|
|
+ appendPQExpBuffer(defqry, "CREATE OR REPLACE PROCEDURAL LANGUAGE %s",
|
|
+ qlanname);
|
|
+ }
|
|
+ appendPQExpBufferStr(defqry, ";\n");
|
|
+
|
|
+ appendPQExpBuffer(labelq, "LANGUAGE %s", qlanname);
|
|
+
|
|
+ if (binary_upgrade)
|
|
+ binary_upgrade_extension_member(defqry, &plang->dobj, labelq->data);
|
|
+
|
|
+ ArchiveEntry(fout, plang->dobj.catId, plang->dobj.dumpId,
|
|
+ plang->dobj.name,
|
|
+ lanschema, NULL, plang->lanowner,
|
|
+ false, "PROCEDURAL LANGUAGE", SECTION_PRE_DATA,
|
|
+ defqry->data, delqry->data, NULL,
|
|
+ NULL, 0,
|
|
+ NULL, NULL);
|
|
+
|
|
+ /* Dump Proc Lang Comments and Security Labels */
|
|
+ dumpComment(fout, labelq->data,
|
|
+ NULL, "",
|
|
+ plang->dobj.catId, 0, plang->dobj.dumpId);
|
|
+ dumpSecLabel(fout, labelq->data,
|
|
+ NULL, "",
|
|
+ plang->dobj.catId, 0, plang->dobj.dumpId);
|
|
+
|
|
+ if (plang->lanpltrusted)
|
|
+ dumpACL(fout, plang->dobj.catId, plang->dobj.dumpId, "LANGUAGE",
|
|
+ qlanname, NULL, plang->dobj.name,
|
|
+ lanschema,
|
|
+ plang->lanowner, plang->lanacl);
|
|
+
|
|
+ free(qlanname);
|
|
+
|
|
+ destroyPQExpBuffer(defqry);
|
|
+ destroyPQExpBuffer(delqry);
|
|
+ destroyPQExpBuffer(labelq);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * format_function_arguments: generate function name and argument list
|
|
+ *
|
|
+ * This is used when we can rely on pg_get_function_arguments to format
|
|
+ * the argument list. Note, however, that pg_get_function_arguments
|
|
+ * does not special-case zero-argument aggregates.
|
|
+ */
|
|
+static char *
|
|
+format_function_arguments(FuncInfo *finfo, char *funcargs, bool is_agg)
|
|
+{
|
|
+ PQExpBufferData fn;
|
|
+
|
|
+ initPQExpBuffer(&fn);
|
|
+ appendPQExpBufferStr(&fn, fmtId(finfo->dobj.name));
|
|
+ if (is_agg && finfo->nargs == 0)
|
|
+ appendPQExpBufferStr(&fn, "(*)");
|
|
+ else
|
|
+ appendPQExpBuffer(&fn, "(%s)", funcargs);
|
|
+ return fn.data;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * format_function_arguments_old: generate function name and argument list
|
|
+ *
|
|
+ * The argument type names are qualified if needed. The function name
|
|
+ * is never qualified.
|
|
+ *
|
|
+ * This is used only with pre-8.4 servers, so we aren't expecting to see
|
|
+ * VARIADIC or TABLE arguments, nor are there any defaults for arguments.
|
|
+ *
|
|
+ * Any or all of allargtypes, argmodes, argnames may be NULL.
|
|
+ */
|
|
+static char *
|
|
+format_function_arguments_old(Archive *fout,
|
|
+ FuncInfo *finfo, int nallargs,
|
|
+ char **allargtypes,
|
|
+ char **argmodes,
|
|
+ char **argnames)
|
|
+{
|
|
+ PQExpBufferData fn;
|
|
+ int j;
|
|
+
|
|
+ initPQExpBuffer(&fn);
|
|
+ appendPQExpBuffer(&fn, "%s(", fmtId(finfo->dobj.name));
|
|
+ for (j = 0; j < nallargs; j++)
|
|
+ {
|
|
+ Oid typid;
|
|
+ char *typname;
|
|
+ const char *argmode;
|
|
+ const char *argname;
|
|
+
|
|
+ typid = allargtypes ? atooid(allargtypes[j]) : finfo->argtypes[j];
|
|
+ typname = getFormattedTypeName(fout, typid, zeroAsOpaque);
|
|
+
|
|
+ if (argmodes)
|
|
+ {
|
|
+ switch (argmodes[j][0])
|
|
+ {
|
|
+ case PROARGMODE_IN:
|
|
+ argmode = "";
|
|
+ break;
|
|
+ case PROARGMODE_OUT:
|
|
+ argmode = "OUT ";
|
|
+ break;
|
|
+ case PROARGMODE_INOUT:
|
|
+ argmode = "INOUT ";
|
|
+ break;
|
|
+ default:
|
|
+ write_msg(NULL, "WARNING: bogus value in proargmodes array\n");
|
|
+ argmode = "";
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ else
|
|
+ argmode = "";
|
|
+
|
|
+ argname = argnames ? argnames[j] : (char *) NULL;
|
|
+ if (argname && argname[0] == '\0')
|
|
+ argname = NULL;
|
|
+
|
|
+ appendPQExpBuffer(&fn, "%s%s%s%s%s",
|
|
+ (j > 0) ? ", " : "",
|
|
+ argmode,
|
|
+ argname ? fmtId(argname) : "",
|
|
+ argname ? " " : "",
|
|
+ typname);
|
|
+ free(typname);
|
|
+ }
|
|
+ appendPQExpBufferChar(&fn, ')');
|
|
+ return fn.data;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * format_function_signature: generate function name and argument list
|
|
+ *
|
|
+ * This is like format_function_arguments_old except that only a minimal
|
|
+ * list of input argument types is generated; this is sufficient to
|
|
+ * reference the function, but not to define it.
|
|
+ *
|
|
+ * If honor_quotes is false then the function name is never quoted.
|
|
+ * This is appropriate for use in TOC tags, but not in SQL commands.
|
|
+ */
|
|
+static char *
|
|
+format_function_signature(Archive *fout, FuncInfo *finfo, bool honor_quotes)
|
|
+{
|
|
+ PQExpBufferData fn;
|
|
+ int j;
|
|
+
|
|
+ initPQExpBuffer(&fn);
|
|
+ if (honor_quotes)
|
|
+ appendPQExpBuffer(&fn, "%s(", fmtId(finfo->dobj.name));
|
|
+ else
|
|
+ appendPQExpBuffer(&fn, "%s(", finfo->dobj.name);
|
|
+ for (j = 0; j < finfo->nargs; j++)
|
|
+ {
|
|
+ char *typname;
|
|
+
|
|
+ if (j > 0)
|
|
+ appendPQExpBufferStr(&fn, ", ");
|
|
+
|
|
+ typname = getFormattedTypeName(fout, finfo->argtypes[j],
|
|
+ zeroAsOpaque);
|
|
+ appendPQExpBufferStr(&fn, typname);
|
|
+ free(typname);
|
|
+ }
|
|
+ appendPQExpBufferChar(&fn, ')');
|
|
+ return fn.data;
|
|
+}
|
|
+
|
|
+
|
|
+/*
|
|
+ * dumpFunc:
|
|
+ * dump out one function
|
|
+ */
|
|
+static void
|
|
+dumpFunc(Archive *fout, FuncInfo *finfo)
|
|
+{
|
|
+ PQExpBuffer query;
|
|
+ PQExpBuffer q;
|
|
+ PQExpBuffer delqry;
|
|
+ PQExpBuffer labelq;
|
|
+ PQExpBuffer asPart;
|
|
+ PGresult *res;
|
|
+ char *funcsig; /* identity signature */
|
|
+ char *funcfullsig = NULL; /* full signature */
|
|
+ char *funcsig_tag;
|
|
+ char *proretset;
|
|
+ char *prosrc;
|
|
+ char *probin;
|
|
+ char *funcargs;
|
|
+ char *funciargs;
|
|
+ char *funcresult;
|
|
+ char *proallargtypes;
|
|
+ char *proargmodes;
|
|
+ char *proargnames;
|
|
+ char *proiswindow;
|
|
+ char *provolatile;
|
|
+ char *proisstrict;
|
|
+ char *prosecdef;
|
|
+ char *proleakproof;
|
|
+ char *proconfig;
|
|
+ char *procost;
|
|
+ char *prorows;
|
|
+ char *lanname;
|
|
+ char *rettypename;
|
|
+ int nallargs;
|
|
+ char **allargtypes = NULL;
|
|
+ char **argmodes = NULL;
|
|
+ char **argnames = NULL;
|
|
+ char **configitems = NULL;
|
|
+ int nconfigitems = 0;
|
|
+ int i;
|
|
+
|
|
+ /* Skip if not to be dumped */
|
|
+ if (!finfo->dobj.dump || dataOnly)
|
|
+ return;
|
|
+
|
|
+ query = createPQExpBuffer();
|
|
+ q = createPQExpBuffer();
|
|
+ delqry = createPQExpBuffer();
|
|
+ labelq = createPQExpBuffer();
|
|
+ asPart = createPQExpBuffer();
|
|
+
|
|
+ /* Set proper schema search path so type references list correctly */
|
|
+ selectSourceSchema(fout, finfo->dobj.namespace->dobj.name);
|
|
+
|
|
+ /* Fetch function-specific details */
|
|
+ if (fout->remoteVersion >= 90200)
|
|
+ {
|
|
+ /*
|
|
+ * proleakproof was added at v9.2
|
|
+ */
|
|
+ appendPQExpBuffer(query,
|
|
+ "SELECT proretset, prosrc, probin, "
|
|
+ "pg_catalog.pg_get_function_arguments(oid) AS funcargs, "
|
|
+ "pg_catalog.pg_get_function_identity_arguments(oid) AS funciargs, "
|
|
+ "pg_catalog.pg_get_function_result(oid) AS funcresult, "
|
|
+ "proiswindow, provolatile, proisstrict, prosecdef, "
|
|
+ "proleakproof, proconfig, procost, prorows, "
|
|
+ "(SELECT lanname FROM pg_catalog.pg_language WHERE oid = prolang) AS lanname "
|
|
+ "FROM pg_catalog.pg_proc "
|
|
+ "WHERE oid = '%u'::pg_catalog.oid",
|
|
+ finfo->dobj.catId.oid);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 80400)
|
|
+ {
|
|
+ /*
|
|
+ * In 8.4 and up we rely on pg_get_function_arguments and
|
|
+ * pg_get_function_result instead of examining proallargtypes etc.
|
|
+ */
|
|
+ appendPQExpBuffer(query,
|
|
+ "SELECT proretset, prosrc, probin, "
|
|
+ "pg_catalog.pg_get_function_arguments(oid) AS funcargs, "
|
|
+ "pg_catalog.pg_get_function_identity_arguments(oid) AS funciargs, "
|
|
+ "pg_catalog.pg_get_function_result(oid) AS funcresult, "
|
|
+ "proiswindow, provolatile, proisstrict, prosecdef, "
|
|
+ "false AS proleakproof, "
|
|
+ " proconfig, procost, prorows, "
|
|
+ "(SELECT lanname FROM pg_catalog.pg_language WHERE oid = prolang) AS lanname "
|
|
+ "FROM pg_catalog.pg_proc "
|
|
+ "WHERE oid = '%u'::pg_catalog.oid",
|
|
+ finfo->dobj.catId.oid);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 80300)
|
|
+ {
|
|
+ appendPQExpBuffer(query,
|
|
+ "SELECT proretset, prosrc, probin, "
|
|
+ "proallargtypes, proargmodes, proargnames, "
|
|
+ "false AS proiswindow, "
|
|
+ "provolatile, proisstrict, prosecdef, "
|
|
+ "false AS proleakproof, "
|
|
+ "proconfig, procost, prorows, "
|
|
+ "(SELECT lanname FROM pg_catalog.pg_language WHERE oid = prolang) AS lanname "
|
|
+ "FROM pg_catalog.pg_proc "
|
|
+ "WHERE oid = '%u'::pg_catalog.oid",
|
|
+ finfo->dobj.catId.oid);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 80100)
|
|
+ {
|
|
+ appendPQExpBuffer(query,
|
|
+ "SELECT proretset, prosrc, probin, "
|
|
+ "proallargtypes, proargmodes, proargnames, "
|
|
+ "false AS proiswindow, "
|
|
+ "provolatile, proisstrict, prosecdef, "
|
|
+ "false AS proleakproof, "
|
|
+ "null AS proconfig, 0 AS procost, 0 AS prorows, "
|
|
+ "(SELECT lanname FROM pg_catalog.pg_language WHERE oid = prolang) AS lanname "
|
|
+ "FROM pg_catalog.pg_proc "
|
|
+ "WHERE oid = '%u'::pg_catalog.oid",
|
|
+ finfo->dobj.catId.oid);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 80000)
|
|
+ {
|
|
+ appendPQExpBuffer(query,
|
|
+ "SELECT proretset, prosrc, probin, "
|
|
+ "null AS proallargtypes, "
|
|
+ "null AS proargmodes, "
|
|
+ "proargnames, "
|
|
+ "false AS proiswindow, "
|
|
+ "provolatile, proisstrict, prosecdef, "
|
|
+ "false AS proleakproof, "
|
|
+ "null AS proconfig, 0 AS procost, 0 AS prorows, "
|
|
+ "(SELECT lanname FROM pg_catalog.pg_language WHERE oid = prolang) AS lanname "
|
|
+ "FROM pg_catalog.pg_proc "
|
|
+ "WHERE oid = '%u'::pg_catalog.oid",
|
|
+ finfo->dobj.catId.oid);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 70300)
|
|
+ {
|
|
+ appendPQExpBuffer(query,
|
|
+ "SELECT proretset, prosrc, probin, "
|
|
+ "null AS proallargtypes, "
|
|
+ "null AS proargmodes, "
|
|
+ "null AS proargnames, "
|
|
+ "false AS proiswindow, "
|
|
+ "provolatile, proisstrict, prosecdef, "
|
|
+ "false AS proleakproof, "
|
|
+ "null AS proconfig, 0 AS procost, 0 AS prorows, "
|
|
+ "(SELECT lanname FROM pg_catalog.pg_language WHERE oid = prolang) AS lanname "
|
|
+ "FROM pg_catalog.pg_proc "
|
|
+ "WHERE oid = '%u'::pg_catalog.oid",
|
|
+ finfo->dobj.catId.oid);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 70100)
|
|
+ {
|
|
+ appendPQExpBuffer(query,
|
|
+ "SELECT proretset, prosrc, probin, "
|
|
+ "null AS proallargtypes, "
|
|
+ "null AS proargmodes, "
|
|
+ "null AS proargnames, "
|
|
+ "false AS proiswindow, "
|
|
+ "case when proiscachable then 'i' else 'v' end AS provolatile, "
|
|
+ "proisstrict, "
|
|
+ "false AS prosecdef, "
|
|
+ "false AS proleakproof, "
|
|
+ "null AS proconfig, 0 AS procost, 0 AS prorows, "
|
|
+ "(SELECT lanname FROM pg_language WHERE oid = prolang) AS lanname "
|
|
+ "FROM pg_proc "
|
|
+ "WHERE oid = '%u'::oid",
|
|
+ finfo->dobj.catId.oid);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ appendPQExpBuffer(query,
|
|
+ "SELECT proretset, prosrc, probin, "
|
|
+ "null AS proallargtypes, "
|
|
+ "null AS proargmodes, "
|
|
+ "null AS proargnames, "
|
|
+ "false AS proiswindow, "
|
|
+ "CASE WHEN proiscachable THEN 'i' ELSE 'v' END AS provolatile, "
|
|
+ "false AS proisstrict, "
|
|
+ "false AS prosecdef, "
|
|
+ "false AS proleakproof, "
|
|
+ "NULL AS proconfig, 0 AS procost, 0 AS prorows, "
|
|
+ "(SELECT lanname FROM pg_language WHERE oid = prolang) AS lanname "
|
|
+ "FROM pg_proc "
|
|
+ "WHERE oid = '%u'::oid",
|
|
+ finfo->dobj.catId.oid);
|
|
+ }
|
|
+
|
|
+ res = ExecuteSqlQueryForSingleRow(fout, query->data);
|
|
+
|
|
+ proretset = PQgetvalue(res, 0, PQfnumber(res, "proretset"));
|
|
+ prosrc = PQgetvalue(res, 0, PQfnumber(res, "prosrc"));
|
|
+ probin = PQgetvalue(res, 0, PQfnumber(res, "probin"));
|
|
+ if (fout->remoteVersion >= 80400)
|
|
+ {
|
|
+ funcargs = PQgetvalue(res, 0, PQfnumber(res, "funcargs"));
|
|
+ funciargs = PQgetvalue(res, 0, PQfnumber(res, "funciargs"));
|
|
+ funcresult = PQgetvalue(res, 0, PQfnumber(res, "funcresult"));
|
|
+ proallargtypes = proargmodes = proargnames = NULL;
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ proallargtypes = PQgetvalue(res, 0, PQfnumber(res, "proallargtypes"));
|
|
+ proargmodes = PQgetvalue(res, 0, PQfnumber(res, "proargmodes"));
|
|
+ proargnames = PQgetvalue(res, 0, PQfnumber(res, "proargnames"));
|
|
+ funcargs = funciargs = funcresult = NULL;
|
|
+ }
|
|
+ proiswindow = PQgetvalue(res, 0, PQfnumber(res, "proiswindow"));
|
|
+ provolatile = PQgetvalue(res, 0, PQfnumber(res, "provolatile"));
|
|
+ proisstrict = PQgetvalue(res, 0, PQfnumber(res, "proisstrict"));
|
|
+ prosecdef = PQgetvalue(res, 0, PQfnumber(res, "prosecdef"));
|
|
+ proleakproof = PQgetvalue(res, 0, PQfnumber(res, "proleakproof"));
|
|
+ proconfig = PQgetvalue(res, 0, PQfnumber(res, "proconfig"));
|
|
+ procost = PQgetvalue(res, 0, PQfnumber(res, "procost"));
|
|
+ prorows = PQgetvalue(res, 0, PQfnumber(res, "prorows"));
|
|
+ lanname = PQgetvalue(res, 0, PQfnumber(res, "lanname"));
|
|
+
|
|
+ /*
|
|
+ * See backend/commands/functioncmds.c for details of how the 'AS' clause
|
|
+ * is used. In 8.4 and up, an unused probin is NULL (here ""); previous
|
|
+ * versions would set it to "-". There are no known cases in which prosrc
|
|
+ * is unused, so the tests below for "-" are probably useless.
|
|
+ */
|
|
+ if (probin[0] != '\0' && strcmp(probin, "-") != 0)
|
|
+ {
|
|
+ appendPQExpBufferStr(asPart, "AS ");
|
|
+ appendStringLiteralAH(asPart, probin, fout);
|
|
+ if (strcmp(prosrc, "-") != 0)
|
|
+ {
|
|
+ appendPQExpBufferStr(asPart, ", ");
|
|
+
|
|
+ /*
|
|
+ * where we have bin, use dollar quoting if allowed and src
|
|
+ * contains quote or backslash; else use regular quoting.
|
|
+ */
|
|
+ if (disable_dollar_quoting ||
|
|
+ (strchr(prosrc, '\'') == NULL && strchr(prosrc, '\\') == NULL))
|
|
+ appendStringLiteralAH(asPart, prosrc, fout);
|
|
+ else
|
|
+ appendStringLiteralDQ(asPart, prosrc, NULL);
|
|
+ }
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ if (strcmp(prosrc, "-") != 0)
|
|
+ {
|
|
+ appendPQExpBufferStr(asPart, "AS ");
|
|
+ /* with no bin, dollar quote src unconditionally if allowed */
|
|
+ if (disable_dollar_quoting)
|
|
+ appendStringLiteralAH(asPart, prosrc, fout);
|
|
+ else
|
|
+ appendStringLiteralDQ(asPart, prosrc, NULL);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ nallargs = finfo->nargs; /* unless we learn different from allargs */
|
|
+
|
|
+ if (proallargtypes && *proallargtypes)
|
|
+ {
|
|
+ int nitems = 0;
|
|
+
|
|
+ if (!parsePGArray(proallargtypes, &allargtypes, &nitems) ||
|
|
+ nitems < finfo->nargs)
|
|
+ {
|
|
+ write_msg(NULL, "WARNING: could not parse proallargtypes array\n");
|
|
+ if (allargtypes)
|
|
+ free(allargtypes);
|
|
+ allargtypes = NULL;
|
|
+ }
|
|
+ else
|
|
+ nallargs = nitems;
|
|
+ }
|
|
+
|
|
+ if (proargmodes && *proargmodes)
|
|
+ {
|
|
+ int nitems = 0;
|
|
+
|
|
+ if (!parsePGArray(proargmodes, &argmodes, &nitems) ||
|
|
+ nitems != nallargs)
|
|
+ {
|
|
+ write_msg(NULL, "WARNING: could not parse proargmodes array\n");
|
|
+ if (argmodes)
|
|
+ free(argmodes);
|
|
+ argmodes = NULL;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (proargnames && *proargnames)
|
|
+ {
|
|
+ int nitems = 0;
|
|
+
|
|
+ if (!parsePGArray(proargnames, &argnames, &nitems) ||
|
|
+ nitems != nallargs)
|
|
+ {
|
|
+ write_msg(NULL, "WARNING: could not parse proargnames array\n");
|
|
+ if (argnames)
|
|
+ free(argnames);
|
|
+ argnames = NULL;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (proconfig && *proconfig)
|
|
+ {
|
|
+ if (!parsePGArray(proconfig, &configitems, &nconfigitems))
|
|
+ {
|
|
+ write_msg(NULL, "WARNING: could not parse proconfig array\n");
|
|
+ if (configitems)
|
|
+ free(configitems);
|
|
+ configitems = NULL;
|
|
+ nconfigitems = 0;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (funcargs)
|
|
+ {
|
|
+ /* 8.4 or later; we rely on server-side code for most of the work */
|
|
+ funcfullsig = format_function_arguments(finfo, funcargs, false);
|
|
+ funcsig = format_function_arguments(finfo, funciargs, false);
|
|
+ }
|
|
+ else
|
|
+ /* pre-8.4, do it ourselves */
|
|
+ funcsig = format_function_arguments_old(fout,
|
|
+ finfo, nallargs, allargtypes,
|
|
+ argmodes, argnames);
|
|
+
|
|
+ funcsig_tag = format_function_signature(fout, finfo, false);
|
|
+
|
|
+ /*
|
|
+ * DROP must be fully qualified in case same name appears in pg_catalog
|
|
+ */
|
|
+ appendPQExpBuffer(delqry, "DROP FUNCTION %s.%s;\n",
|
|
+ fmtId(finfo->dobj.namespace->dobj.name),
|
|
+ funcsig);
|
|
+
|
|
+ appendPQExpBuffer(q, "CREATE FUNCTION %s ", funcfullsig ? funcfullsig :
|
|
+ funcsig);
|
|
+ if (funcresult)
|
|
+ appendPQExpBuffer(q, "RETURNS %s", funcresult);
|
|
+ else
|
|
+ {
|
|
+ rettypename = getFormattedTypeName(fout, finfo->prorettype,
|
|
+ zeroAsOpaque);
|
|
+ appendPQExpBuffer(q, "RETURNS %s%s",
|
|
+ (proretset[0] == 't') ? "SETOF " : "",
|
|
+ rettypename);
|
|
+ free(rettypename);
|
|
+ }
|
|
+
|
|
+ appendPQExpBuffer(q, "\n LANGUAGE %s", fmtId(lanname));
|
|
+
|
|
+ if (proiswindow[0] == 't')
|
|
+ appendPQExpBufferStr(q, " WINDOW");
|
|
+
|
|
+ if (provolatile[0] != PROVOLATILE_VOLATILE)
|
|
+ {
|
|
+ if (provolatile[0] == PROVOLATILE_IMMUTABLE)
|
|
+ appendPQExpBufferStr(q, " IMMUTABLE");
|
|
+ else if (provolatile[0] == PROVOLATILE_STABLE)
|
|
+ appendPQExpBufferStr(q, " STABLE");
|
|
+ else if (provolatile[0] != PROVOLATILE_VOLATILE)
|
|
+ exit_horribly(NULL, "unrecognized provolatile value for function \"%s\"\n",
|
|
+ finfo->dobj.name);
|
|
+ }
|
|
+
|
|
+ if (proisstrict[0] == 't')
|
|
+ appendPQExpBufferStr(q, " STRICT");
|
|
+
|
|
+ if (prosecdef[0] == 't')
|
|
+ appendPQExpBufferStr(q, " SECURITY DEFINER");
|
|
+
|
|
+ if (proleakproof[0] == 't')
|
|
+ appendPQExpBufferStr(q, " LEAKPROOF");
|
|
+
|
|
+ /*
|
|
+ * COST and ROWS are emitted only if present and not default, so as not to
|
|
+ * break backwards-compatibility of the dump without need. Keep this code
|
|
+ * in sync with the defaults in functioncmds.c.
|
|
+ */
|
|
+ if (strcmp(procost, "0") != 0)
|
|
+ {
|
|
+ if (strcmp(lanname, "internal") == 0 || strcmp(lanname, "c") == 0)
|
|
+ {
|
|
+ /* default cost is 1 */
|
|
+ if (strcmp(procost, "1") != 0)
|
|
+ appendPQExpBuffer(q, " COST %s", procost);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ /* default cost is 100 */
|
|
+ if (strcmp(procost, "100") != 0)
|
|
+ appendPQExpBuffer(q, " COST %s", procost);
|
|
+ }
|
|
+ }
|
|
+ if (proretset[0] == 't' &&
|
|
+ strcmp(prorows, "0") != 0 && strcmp(prorows, "1000") != 0)
|
|
+ appendPQExpBuffer(q, " ROWS %s", prorows);
|
|
+
|
|
+ for (i = 0; i < nconfigitems; i++)
|
|
+ {
|
|
+ /* we feel free to scribble on configitems[] here */
|
|
+ char *configitem = configitems[i];
|
|
+ char *pos;
|
|
+
|
|
+ pos = strchr(configitem, '=');
|
|
+ if (pos == NULL)
|
|
+ continue;
|
|
+ *pos++ = '\0';
|
|
+ appendPQExpBuffer(q, "\n SET %s TO ", fmtId(configitem));
|
|
+
|
|
+ /*
|
|
+ * Some GUC variable names are 'LIST' type and hence must not be
|
|
+ * quoted.
|
|
+ */
|
|
+ if (pg_strcasecmp(configitem, "DateStyle") == 0
|
|
+ || pg_strcasecmp(configitem, "search_path") == 0)
|
|
+ appendPQExpBufferStr(q, pos);
|
|
+ else
|
|
+ appendStringLiteralAH(q, pos, fout);
|
|
+ }
|
|
+
|
|
+ appendPQExpBuffer(q, "\n %s;\n", asPart->data);
|
|
+
|
|
+ appendPQExpBuffer(labelq, "FUNCTION %s", funcsig);
|
|
+
|
|
+ if (binary_upgrade)
|
|
+ binary_upgrade_extension_member(q, &finfo->dobj, labelq->data);
|
|
+
|
|
+ ArchiveEntry(fout, finfo->dobj.catId, finfo->dobj.dumpId,
|
|
+ funcsig_tag,
|
|
+ finfo->dobj.namespace->dobj.name,
|
|
+ NULL,
|
|
+ finfo->rolname, false,
|
|
+ "FUNCTION", SECTION_PRE_DATA,
|
|
+ q->data, delqry->data, NULL,
|
|
+ NULL, 0,
|
|
+ NULL, NULL);
|
|
+
|
|
+ /* Dump Function Comments and Security Labels */
|
|
+ dumpComment(fout, labelq->data,
|
|
+ finfo->dobj.namespace->dobj.name, finfo->rolname,
|
|
+ finfo->dobj.catId, 0, finfo->dobj.dumpId);
|
|
+ dumpSecLabel(fout, labelq->data,
|
|
+ finfo->dobj.namespace->dobj.name, finfo->rolname,
|
|
+ finfo->dobj.catId, 0, finfo->dobj.dumpId);
|
|
+
|
|
+ dumpACL(fout, finfo->dobj.catId, finfo->dobj.dumpId, "FUNCTION",
|
|
+ funcsig, NULL, funcsig_tag,
|
|
+ finfo->dobj.namespace->dobj.name,
|
|
+ finfo->rolname, finfo->proacl);
|
|
+
|
|
+ PQclear(res);
|
|
+
|
|
+ destroyPQExpBuffer(query);
|
|
+ destroyPQExpBuffer(q);
|
|
+ destroyPQExpBuffer(delqry);
|
|
+ destroyPQExpBuffer(labelq);
|
|
+ destroyPQExpBuffer(asPart);
|
|
+ free(funcsig);
|
|
+ if (funcfullsig)
|
|
+ free(funcfullsig);
|
|
+ free(funcsig_tag);
|
|
+ if (allargtypes)
|
|
+ free(allargtypes);
|
|
+ if (argmodes)
|
|
+ free(argmodes);
|
|
+ if (argnames)
|
|
+ free(argnames);
|
|
+ if (configitems)
|
|
+ free(configitems);
|
|
+}
|
|
+
|
|
+
|
|
+/*
|
|
+ * Dump a user-defined cast
|
|
+ */
|
|
+static void
|
|
+dumpCast(Archive *fout, CastInfo *cast)
|
|
+{
|
|
+ PQExpBuffer defqry;
|
|
+ PQExpBuffer delqry;
|
|
+ PQExpBuffer labelq;
|
|
+ FuncInfo *funcInfo = NULL;
|
|
+
|
|
+ /* Skip if not to be dumped */
|
|
+ if (!cast->dobj.dump || dataOnly)
|
|
+ return;
|
|
+
|
|
+ /* Cannot dump if we don't have the cast function's info */
|
|
+ if (OidIsValid(cast->castfunc))
|
|
+ {
|
|
+ funcInfo = findFuncByOid(cast->castfunc);
|
|
+ if (funcInfo == NULL)
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Make sure we are in proper schema (needed for getFormattedTypeName).
|
|
+ * Casts don't have a schema of their own, so use pg_catalog.
|
|
+ */
|
|
+ selectSourceSchema(fout, "pg_catalog");
|
|
+
|
|
+ defqry = createPQExpBuffer();
|
|
+ delqry = createPQExpBuffer();
|
|
+ labelq = createPQExpBuffer();
|
|
+
|
|
+ appendPQExpBuffer(delqry, "DROP CAST (%s AS %s);\n",
|
|
+ getFormattedTypeName(fout, cast->castsource, zeroAsNone),
|
|
+ getFormattedTypeName(fout, cast->casttarget, zeroAsNone));
|
|
+
|
|
+ appendPQExpBuffer(defqry, "CREATE CAST (%s AS %s) ",
|
|
+ getFormattedTypeName(fout, cast->castsource, zeroAsNone),
|
|
+ getFormattedTypeName(fout, cast->casttarget, zeroAsNone));
|
|
+
|
|
+ switch (cast->castmethod)
|
|
+ {
|
|
+ case COERCION_METHOD_BINARY:
|
|
+ appendPQExpBufferStr(defqry, "WITHOUT FUNCTION");
|
|
+ break;
|
|
+ case COERCION_METHOD_INOUT:
|
|
+ appendPQExpBufferStr(defqry, "WITH INOUT");
|
|
+ break;
|
|
+ case COERCION_METHOD_FUNCTION:
|
|
+ if (funcInfo)
|
|
+ {
|
|
+ char *fsig = format_function_signature(fout, funcInfo, true);
|
|
+
|
|
+ /*
|
|
+ * Always qualify the function name, in case it is not in
|
|
+ * pg_catalog schema (format_function_signature won't qualify
|
|
+ * it).
|
|
+ */
|
|
+ appendPQExpBuffer(defqry, "WITH FUNCTION %s.%s",
|
|
+ fmtId(funcInfo->dobj.namespace->dobj.name), fsig);
|
|
+ free(fsig);
|
|
+ }
|
|
+ else
|
|
+ write_msg(NULL, "WARNING: bogus value in pg_cast.castfunc or pg_cast.castmethod field\n");
|
|
+ break;
|
|
+ default:
|
|
+ write_msg(NULL, "WARNING: bogus value in pg_cast.castmethod field\n");
|
|
+ }
|
|
+
|
|
+ if (cast->castcontext == 'a')
|
|
+ appendPQExpBufferStr(defqry, " AS ASSIGNMENT");
|
|
+ else if (cast->castcontext == 'i')
|
|
+ appendPQExpBufferStr(defqry, " AS IMPLICIT");
|
|
+ appendPQExpBufferStr(defqry, ";\n");
|
|
+
|
|
+ appendPQExpBuffer(labelq, "CAST (%s AS %s)",
|
|
+ getFormattedTypeName(fout, cast->castsource, zeroAsNone),
|
|
+ getFormattedTypeName(fout, cast->casttarget, zeroAsNone));
|
|
+
|
|
+ if (binary_upgrade)
|
|
+ binary_upgrade_extension_member(defqry, &cast->dobj, labelq->data);
|
|
+
|
|
+ ArchiveEntry(fout, cast->dobj.catId, cast->dobj.dumpId,
|
|
+ labelq->data,
|
|
+ "pg_catalog", NULL, "",
|
|
+ false, "CAST", SECTION_PRE_DATA,
|
|
+ defqry->data, delqry->data, NULL,
|
|
+ NULL, 0,
|
|
+ NULL, NULL);
|
|
+
|
|
+ /* Dump Cast Comments */
|
|
+ dumpComment(fout, labelq->data,
|
|
+ NULL, "",
|
|
+ cast->dobj.catId, 0, cast->dobj.dumpId);
|
|
+
|
|
+ destroyPQExpBuffer(defqry);
|
|
+ destroyPQExpBuffer(delqry);
|
|
+ destroyPQExpBuffer(labelq);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * dumpOpr
|
|
+ * write out a single operator definition
|
|
+ */
|
|
+static void
|
|
+dumpOpr(Archive *fout, OprInfo *oprinfo)
|
|
+{
|
|
+ PQExpBuffer query;
|
|
+ PQExpBuffer q;
|
|
+ PQExpBuffer delq;
|
|
+ PQExpBuffer labelq;
|
|
+ PQExpBuffer oprid;
|
|
+ PQExpBuffer details;
|
|
+ const char *name;
|
|
+ PGresult *res;
|
|
+ int i_oprkind;
|
|
+ int i_oprcode;
|
|
+ int i_oprleft;
|
|
+ int i_oprright;
|
|
+ int i_oprcom;
|
|
+ int i_oprnegate;
|
|
+ int i_oprrest;
|
|
+ int i_oprjoin;
|
|
+ int i_oprcanmerge;
|
|
+ int i_oprcanhash;
|
|
+ char *oprkind;
|
|
+ char *oprcode;
|
|
+ char *oprleft;
|
|
+ char *oprright;
|
|
+ char *oprcom;
|
|
+ char *oprnegate;
|
|
+ char *oprrest;
|
|
+ char *oprjoin;
|
|
+ char *oprcanmerge;
|
|
+ char *oprcanhash;
|
|
+ char *oprregproc;
|
|
+ char *oprref;
|
|
+
|
|
+ /* Skip if not to be dumped */
|
|
+ if (!oprinfo->dobj.dump || dataOnly)
|
|
+ return;
|
|
+
|
|
+ /*
|
|
+ * some operators are invalid because they were the result of user
|
|
+ * defining operators before commutators exist
|
|
+ */
|
|
+ if (!OidIsValid(oprinfo->oprcode))
|
|
+ return;
|
|
+
|
|
+ query = createPQExpBuffer();
|
|
+ q = createPQExpBuffer();
|
|
+ delq = createPQExpBuffer();
|
|
+ labelq = createPQExpBuffer();
|
|
+ oprid = createPQExpBuffer();
|
|
+ details = createPQExpBuffer();
|
|
+
|
|
+ /* Make sure we are in proper schema so regoperator works correctly */
|
|
+ selectSourceSchema(fout, oprinfo->dobj.namespace->dobj.name);
|
|
+
|
|
+ if (fout->remoteVersion >= 80300)
|
|
+ {
|
|
+ appendPQExpBuffer(query, "SELECT oprkind, "
|
|
+ "oprcode::pg_catalog.regprocedure, "
|
|
+ "oprleft::pg_catalog.regtype, "
|
|
+ "oprright::pg_catalog.regtype, "
|
|
+ "oprcom::pg_catalog.regoperator, "
|
|
+ "oprnegate::pg_catalog.regoperator, "
|
|
+ "oprrest::pg_catalog.regprocedure, "
|
|
+ "oprjoin::pg_catalog.regprocedure, "
|
|
+ "oprcanmerge, oprcanhash "
|
|
+ "FROM pg_catalog.pg_operator "
|
|
+ "WHERE oid = '%u'::pg_catalog.oid",
|
|
+ oprinfo->dobj.catId.oid);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 70300)
|
|
+ {
|
|
+ appendPQExpBuffer(query, "SELECT oprkind, "
|
|
+ "oprcode::pg_catalog.regprocedure, "
|
|
+ "oprleft::pg_catalog.regtype, "
|
|
+ "oprright::pg_catalog.regtype, "
|
|
+ "oprcom::pg_catalog.regoperator, "
|
|
+ "oprnegate::pg_catalog.regoperator, "
|
|
+ "oprrest::pg_catalog.regprocedure, "
|
|
+ "oprjoin::pg_catalog.regprocedure, "
|
|
+ "(oprlsortop != 0) AS oprcanmerge, "
|
|
+ "oprcanhash "
|
|
+ "FROM pg_catalog.pg_operator "
|
|
+ "WHERE oid = '%u'::pg_catalog.oid",
|
|
+ oprinfo->dobj.catId.oid);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 70100)
|
|
+ {
|
|
+ appendPQExpBuffer(query, "SELECT oprkind, oprcode, "
|
|
+ "CASE WHEN oprleft = 0 THEN '-' "
|
|
+ "ELSE format_type(oprleft, NULL) END AS oprleft, "
|
|
+ "CASE WHEN oprright = 0 THEN '-' "
|
|
+ "ELSE format_type(oprright, NULL) END AS oprright, "
|
|
+ "oprcom, oprnegate, oprrest, oprjoin, "
|
|
+ "(oprlsortop != 0) AS oprcanmerge, "
|
|
+ "oprcanhash "
|
|
+ "FROM pg_operator "
|
|
+ "WHERE oid = '%u'::oid",
|
|
+ oprinfo->dobj.catId.oid);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ appendPQExpBuffer(query, "SELECT oprkind, oprcode, "
|
|
+ "CASE WHEN oprleft = 0 THEN '-'::name "
|
|
+ "ELSE (SELECT typname FROM pg_type WHERE oid = oprleft) END AS oprleft, "
|
|
+ "CASE WHEN oprright = 0 THEN '-'::name "
|
|
+ "ELSE (SELECT typname FROM pg_type WHERE oid = oprright) END AS oprright, "
|
|
+ "oprcom, oprnegate, oprrest, oprjoin, "
|
|
+ "(oprlsortop != 0) AS oprcanmerge, "
|
|
+ "oprcanhash "
|
|
+ "FROM pg_operator "
|
|
+ "WHERE oid = '%u'::oid",
|
|
+ oprinfo->dobj.catId.oid);
|
|
+ }
|
|
+
|
|
+ res = ExecuteSqlQueryForSingleRow(fout, query->data);
|
|
+
|
|
+ i_oprkind = PQfnumber(res, "oprkind");
|
|
+ i_oprcode = PQfnumber(res, "oprcode");
|
|
+ i_oprleft = PQfnumber(res, "oprleft");
|
|
+ i_oprright = PQfnumber(res, "oprright");
|
|
+ i_oprcom = PQfnumber(res, "oprcom");
|
|
+ i_oprnegate = PQfnumber(res, "oprnegate");
|
|
+ i_oprrest = PQfnumber(res, "oprrest");
|
|
+ i_oprjoin = PQfnumber(res, "oprjoin");
|
|
+ i_oprcanmerge = PQfnumber(res, "oprcanmerge");
|
|
+ i_oprcanhash = PQfnumber(res, "oprcanhash");
|
|
+
|
|
+ oprkind = PQgetvalue(res, 0, i_oprkind);
|
|
+ oprcode = PQgetvalue(res, 0, i_oprcode);
|
|
+ oprleft = PQgetvalue(res, 0, i_oprleft);
|
|
+ oprright = PQgetvalue(res, 0, i_oprright);
|
|
+ oprcom = PQgetvalue(res, 0, i_oprcom);
|
|
+ oprnegate = PQgetvalue(res, 0, i_oprnegate);
|
|
+ oprrest = PQgetvalue(res, 0, i_oprrest);
|
|
+ oprjoin = PQgetvalue(res, 0, i_oprjoin);
|
|
+ oprcanmerge = PQgetvalue(res, 0, i_oprcanmerge);
|
|
+ oprcanhash = PQgetvalue(res, 0, i_oprcanhash);
|
|
+
|
|
+ oprregproc = convertRegProcReference(fout, oprcode);
|
|
+ if (oprregproc)
|
|
+ {
|
|
+ appendPQExpBuffer(details, " PROCEDURE = %s", oprregproc);
|
|
+ free(oprregproc);
|
|
+ }
|
|
+
|
|
+ appendPQExpBuffer(oprid, "%s (",
|
|
+ oprinfo->dobj.name);
|
|
+
|
|
+ /*
|
|
+ * right unary means there's a left arg and left unary means there's a
|
|
+ * right arg
|
|
+ */
|
|
+ if (strcmp(oprkind, "r") == 0 ||
|
|
+ strcmp(oprkind, "b") == 0)
|
|
+ {
|
|
+ if (fout->remoteVersion >= 70100)
|
|
+ name = oprleft;
|
|
+ else
|
|
+ name = fmtId(oprleft);
|
|
+ appendPQExpBuffer(details, ",\n LEFTARG = %s", name);
|
|
+ appendPQExpBufferStr(oprid, name);
|
|
+ }
|
|
+ else
|
|
+ appendPQExpBufferStr(oprid, "NONE");
|
|
+
|
|
+ if (strcmp(oprkind, "l") == 0 ||
|
|
+ strcmp(oprkind, "b") == 0)
|
|
+ {
|
|
+ if (fout->remoteVersion >= 70100)
|
|
+ name = oprright;
|
|
+ else
|
|
+ name = fmtId(oprright);
|
|
+ appendPQExpBuffer(details, ",\n RIGHTARG = %s", name);
|
|
+ appendPQExpBuffer(oprid, ", %s)", name);
|
|
+ }
|
|
+ else
|
|
+ appendPQExpBufferStr(oprid, ", NONE)");
|
|
+
|
|
+ oprref = convertOperatorReference(fout, oprcom);
|
|
+ if (oprref)
|
|
+ {
|
|
+ appendPQExpBuffer(details, ",\n COMMUTATOR = %s", oprref);
|
|
+ free(oprref);
|
|
+ }
|
|
+
|
|
+ oprref = convertOperatorReference(fout, oprnegate);
|
|
+ if (oprref)
|
|
+ {
|
|
+ appendPQExpBuffer(details, ",\n NEGATOR = %s", oprref);
|
|
+ free(oprref);
|
|
+ }
|
|
+
|
|
+ if (strcmp(oprcanmerge, "t") == 0)
|
|
+ appendPQExpBufferStr(details, ",\n MERGES");
|
|
+
|
|
+ if (strcmp(oprcanhash, "t") == 0)
|
|
+ appendPQExpBufferStr(details, ",\n HASHES");
|
|
+
|
|
+ oprregproc = convertRegProcReference(fout, oprrest);
|
|
+ if (oprregproc)
|
|
+ {
|
|
+ appendPQExpBuffer(details, ",\n RESTRICT = %s", oprregproc);
|
|
+ free(oprregproc);
|
|
+ }
|
|
+
|
|
+ oprregproc = convertRegProcReference(fout, oprjoin);
|
|
+ if (oprregproc)
|
|
+ {
|
|
+ appendPQExpBuffer(details, ",\n JOIN = %s", oprregproc);
|
|
+ free(oprregproc);
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * DROP must be fully qualified in case same name appears in pg_catalog
|
|
+ */
|
|
+ appendPQExpBuffer(delq, "DROP OPERATOR %s.%s;\n",
|
|
+ fmtId(oprinfo->dobj.namespace->dobj.name),
|
|
+ oprid->data);
|
|
+
|
|
+ appendPQExpBuffer(q, "CREATE OPERATOR %s (\n%s\n);\n",
|
|
+ oprinfo->dobj.name, details->data);
|
|
+
|
|
+ appendPQExpBuffer(labelq, "OPERATOR %s", oprid->data);
|
|
+
|
|
+ if (binary_upgrade)
|
|
+ binary_upgrade_extension_member(q, &oprinfo->dobj, labelq->data);
|
|
+
|
|
+ ArchiveEntry(fout, oprinfo->dobj.catId, oprinfo->dobj.dumpId,
|
|
+ oprinfo->dobj.name,
|
|
+ oprinfo->dobj.namespace->dobj.name,
|
|
+ NULL,
|
|
+ oprinfo->rolname,
|
|
+ false, "OPERATOR", SECTION_PRE_DATA,
|
|
+ q->data, delq->data, NULL,
|
|
+ NULL, 0,
|
|
+ NULL, NULL);
|
|
+
|
|
+ /* Dump Operator Comments */
|
|
+ dumpComment(fout, labelq->data,
|
|
+ oprinfo->dobj.namespace->dobj.name, oprinfo->rolname,
|
|
+ oprinfo->dobj.catId, 0, oprinfo->dobj.dumpId);
|
|
+
|
|
+ PQclear(res);
|
|
+
|
|
+ destroyPQExpBuffer(query);
|
|
+ destroyPQExpBuffer(q);
|
|
+ destroyPQExpBuffer(delq);
|
|
+ destroyPQExpBuffer(labelq);
|
|
+ destroyPQExpBuffer(oprid);
|
|
+ destroyPQExpBuffer(details);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Convert a function reference obtained from pg_operator
|
|
+ *
|
|
+ * Returns allocated string of what to print, or NULL if function references
|
|
+ * is InvalidOid. Returned string is expected to be free'd by the caller.
|
|
+ *
|
|
+ * In 7.3 the input is a REGPROCEDURE display; we have to strip the
|
|
+ * argument-types part. In prior versions, the input is a REGPROC display.
|
|
+ */
|
|
+static char *
|
|
+convertRegProcReference(Archive *fout, const char *proc)
|
|
+{
|
|
+ /* In all cases "-" means a null reference */
|
|
+ if (strcmp(proc, "-") == 0)
|
|
+ return NULL;
|
|
+
|
|
+ if (fout->remoteVersion >= 70300)
|
|
+ {
|
|
+ char *name;
|
|
+ char *paren;
|
|
+ bool inquote;
|
|
+
|
|
+ name = pg_strdup(proc);
|
|
+ /* find non-double-quoted left paren */
|
|
+ inquote = false;
|
|
+ for (paren = name; *paren; paren++)
|
|
+ {
|
|
+ if (*paren == '(' && !inquote)
|
|
+ {
|
|
+ *paren = '\0';
|
|
+ break;
|
|
+ }
|
|
+ if (*paren == '"')
|
|
+ inquote = !inquote;
|
|
+ }
|
|
+ return name;
|
|
+ }
|
|
+
|
|
+ /* REGPROC before 7.3 does not quote its result */
|
|
+ return pg_strdup(fmtId(proc));
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Convert an operator cross-reference obtained from pg_operator
|
|
+ *
|
|
+ * Returns an allocated string of what to print, or NULL to print nothing.
|
|
+ * Caller is responsible for free'ing result string.
|
|
+ *
|
|
+ * In 7.3 and up the input is a REGOPERATOR display; we have to strip the
|
|
+ * argument-types part, and add OPERATOR() decoration if the name is
|
|
+ * schema-qualified. In older versions, the input is just a numeric OID,
|
|
+ * which we search our operator list for.
|
|
+ */
|
|
+static char *
|
|
+convertOperatorReference(Archive *fout, const char *opr)
|
|
+{
|
|
+ OprInfo *oprInfo;
|
|
+
|
|
+ /* In all cases "0" means a null reference */
|
|
+ if (strcmp(opr, "0") == 0)
|
|
+ return NULL;
|
|
+
|
|
+ if (fout->remoteVersion >= 70300)
|
|
+ {
|
|
+ char *name;
|
|
+ char *oname;
|
|
+ char *ptr;
|
|
+ bool inquote;
|
|
+ bool sawdot;
|
|
+
|
|
+ name = pg_strdup(opr);
|
|
+ /* find non-double-quoted left paren, and check for non-quoted dot */
|
|
+ inquote = false;
|
|
+ sawdot = false;
|
|
+ for (ptr = name; *ptr; ptr++)
|
|
+ {
|
|
+ if (*ptr == '"')
|
|
+ inquote = !inquote;
|
|
+ else if (*ptr == '.' && !inquote)
|
|
+ sawdot = true;
|
|
+ else if (*ptr == '(' && !inquote)
|
|
+ {
|
|
+ *ptr = '\0';
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ /* If not schema-qualified, don't need to add OPERATOR() */
|
|
+ if (!sawdot)
|
|
+ return name;
|
|
+ oname = psprintf("OPERATOR(%s)", name);
|
|
+ free(name);
|
|
+ return oname;
|
|
+ }
|
|
+
|
|
+ oprInfo = findOprByOid(atooid(opr));
|
|
+ if (oprInfo == NULL)
|
|
+ {
|
|
+ write_msg(NULL, "WARNING: could not find operator with OID %s\n",
|
|
+ opr);
|
|
+ return NULL;
|
|
+ }
|
|
+ return pg_strdup(oprInfo->dobj.name);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Convert a function OID obtained from pg_ts_parser or pg_ts_template
|
|
+ *
|
|
+ * It is sufficient to use REGPROC rather than REGPROCEDURE, since the
|
|
+ * argument lists of these functions are predetermined. Note that the
|
|
+ * caller should ensure we are in the proper schema, because the results
|
|
+ * are search path dependent!
|
|
+ */
|
|
+static const char *
|
|
+convertTSFunction(Archive *fout, Oid funcOid)
|
|
+{
|
|
+ char *result;
|
|
+ char query[128];
|
|
+ PGresult *res;
|
|
+
|
|
+ snprintf(query, sizeof(query),
|
|
+ "SELECT '%u'::pg_catalog.regproc", funcOid);
|
|
+ res = ExecuteSqlQueryForSingleRow(fout, query);
|
|
+
|
|
+ result = pg_strdup(PQgetvalue(res, 0, 0));
|
|
+
|
|
+ PQclear(res);
|
|
+
|
|
+ return result;
|
|
+}
|
|
+
|
|
+
|
|
+/*
|
|
+ * dumpOpclass
|
|
+ * write out a single operator class definition
|
|
+ */
|
|
+static void
|
|
+dumpOpclass(Archive *fout, OpclassInfo *opcinfo)
|
|
+{
|
|
+ PQExpBuffer query;
|
|
+ PQExpBuffer q;
|
|
+ PQExpBuffer delq;
|
|
+ PQExpBuffer labelq;
|
|
+ PGresult *res;
|
|
+ int ntups;
|
|
+ int i_opcintype;
|
|
+ int i_opckeytype;
|
|
+ int i_opcdefault;
|
|
+ int i_opcfamily;
|
|
+ int i_opcfamilyname;
|
|
+ int i_opcfamilynsp;
|
|
+ int i_amname;
|
|
+ int i_amopstrategy;
|
|
+ int i_amopreqcheck;
|
|
+ int i_amopopr;
|
|
+ int i_sortfamily;
|
|
+ int i_sortfamilynsp;
|
|
+ int i_amprocnum;
|
|
+ int i_amproc;
|
|
+ int i_amproclefttype;
|
|
+ int i_amprocrighttype;
|
|
+ char *opcintype;
|
|
+ char *opckeytype;
|
|
+ char *opcdefault;
|
|
+ char *opcfamily;
|
|
+ char *opcfamilyname;
|
|
+ char *opcfamilynsp;
|
|
+ char *amname;
|
|
+ char *amopstrategy;
|
|
+ char *amopreqcheck;
|
|
+ char *amopopr;
|
|
+ char *sortfamily;
|
|
+ char *sortfamilynsp;
|
|
+ char *amprocnum;
|
|
+ char *amproc;
|
|
+ char *amproclefttype;
|
|
+ char *amprocrighttype;
|
|
+ bool needComma;
|
|
+ int i;
|
|
+
|
|
+ /* Skip if not to be dumped */
|
|
+ if (!opcinfo->dobj.dump || dataOnly)
|
|
+ return;
|
|
+
|
|
+ /*
|
|
+ * XXX currently we do not implement dumping of operator classes from
|
|
+ * pre-7.3 databases. This could be done but it seems not worth the
|
|
+ * trouble.
|
|
+ */
|
|
+ if (fout->remoteVersion < 70300)
|
|
+ return;
|
|
+
|
|
+ query = createPQExpBuffer();
|
|
+ q = createPQExpBuffer();
|
|
+ delq = createPQExpBuffer();
|
|
+ labelq = createPQExpBuffer();
|
|
+
|
|
+ /* Make sure we are in proper schema so regoperator works correctly */
|
|
+ selectSourceSchema(fout, opcinfo->dobj.namespace->dobj.name);
|
|
+
|
|
+ /* Get additional fields from the pg_opclass row */
|
|
+ if (fout->remoteVersion >= 80300)
|
|
+ {
|
|
+ appendPQExpBuffer(query, "SELECT opcintype::pg_catalog.regtype, "
|
|
+ "opckeytype::pg_catalog.regtype, "
|
|
+ "opcdefault, opcfamily, "
|
|
+ "opfname AS opcfamilyname, "
|
|
+ "nspname AS opcfamilynsp, "
|
|
+ "(SELECT amname FROM pg_catalog.pg_am WHERE oid = opcmethod) AS amname "
|
|
+ "FROM pg_catalog.pg_opclass c "
|
|
+ "LEFT JOIN pg_catalog.pg_opfamily f ON f.oid = opcfamily "
|
|
+ "LEFT JOIN pg_catalog.pg_namespace n ON n.oid = opfnamespace "
|
|
+ "WHERE c.oid = '%u'::pg_catalog.oid",
|
|
+ opcinfo->dobj.catId.oid);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ appendPQExpBuffer(query, "SELECT opcintype::pg_catalog.regtype, "
|
|
+ "opckeytype::pg_catalog.regtype, "
|
|
+ "opcdefault, NULL AS opcfamily, "
|
|
+ "NULL AS opcfamilyname, "
|
|
+ "NULL AS opcfamilynsp, "
|
|
+ "(SELECT amname FROM pg_catalog.pg_am WHERE oid = opcamid) AS amname "
|
|
+ "FROM pg_catalog.pg_opclass "
|
|
+ "WHERE oid = '%u'::pg_catalog.oid",
|
|
+ opcinfo->dobj.catId.oid);
|
|
+ }
|
|
+
|
|
+ res = ExecuteSqlQueryForSingleRow(fout, query->data);
|
|
+
|
|
+ i_opcintype = PQfnumber(res, "opcintype");
|
|
+ i_opckeytype = PQfnumber(res, "opckeytype");
|
|
+ i_opcdefault = PQfnumber(res, "opcdefault");
|
|
+ i_opcfamily = PQfnumber(res, "opcfamily");
|
|
+ i_opcfamilyname = PQfnumber(res, "opcfamilyname");
|
|
+ i_opcfamilynsp = PQfnumber(res, "opcfamilynsp");
|
|
+ i_amname = PQfnumber(res, "amname");
|
|
+
|
|
+ opcintype = PQgetvalue(res, 0, i_opcintype);
|
|
+ opckeytype = PQgetvalue(res, 0, i_opckeytype);
|
|
+ opcdefault = PQgetvalue(res, 0, i_opcdefault);
|
|
+ /* opcfamily will still be needed after we PQclear res */
|
|
+ opcfamily = pg_strdup(PQgetvalue(res, 0, i_opcfamily));
|
|
+ opcfamilyname = PQgetvalue(res, 0, i_opcfamilyname);
|
|
+ opcfamilynsp = PQgetvalue(res, 0, i_opcfamilynsp);
|
|
+ /* amname will still be needed after we PQclear res */
|
|
+ amname = pg_strdup(PQgetvalue(res, 0, i_amname));
|
|
+
|
|
+ /*
|
|
+ * DROP must be fully qualified in case same name appears in pg_catalog
|
|
+ */
|
|
+ appendPQExpBuffer(delq, "DROP OPERATOR CLASS %s",
|
|
+ fmtId(opcinfo->dobj.namespace->dobj.name));
|
|
+ appendPQExpBuffer(delq, ".%s",
|
|
+ fmtId(opcinfo->dobj.name));
|
|
+ appendPQExpBuffer(delq, " USING %s;\n",
|
|
+ fmtId(amname));
|
|
+
|
|
+ /* Build the fixed portion of the CREATE command */
|
|
+ appendPQExpBuffer(q, "CREATE OPERATOR CLASS %s\n ",
|
|
+ fmtId(opcinfo->dobj.name));
|
|
+ if (strcmp(opcdefault, "t") == 0)
|
|
+ appendPQExpBufferStr(q, "DEFAULT ");
|
|
+ appendPQExpBuffer(q, "FOR TYPE %s USING %s",
|
|
+ opcintype,
|
|
+ fmtId(amname));
|
|
+ if (strlen(opcfamilyname) > 0 &&
|
|
+ (strcmp(opcfamilyname, opcinfo->dobj.name) != 0 ||
|
|
+ strcmp(opcfamilynsp, opcinfo->dobj.namespace->dobj.name) != 0))
|
|
+ {
|
|
+ appendPQExpBufferStr(q, " FAMILY ");
|
|
+ if (strcmp(opcfamilynsp, opcinfo->dobj.namespace->dobj.name) != 0)
|
|
+ appendPQExpBuffer(q, "%s.", fmtId(opcfamilynsp));
|
|
+ appendPQExpBuffer(q, "%s", fmtId(opcfamilyname));
|
|
+ }
|
|
+ appendPQExpBufferStr(q, " AS\n ");
|
|
+
|
|
+ needComma = false;
|
|
+
|
|
+ if (strcmp(opckeytype, "-") != 0)
|
|
+ {
|
|
+ appendPQExpBuffer(q, "STORAGE %s",
|
|
+ opckeytype);
|
|
+ needComma = true;
|
|
+ }
|
|
+
|
|
+ PQclear(res);
|
|
+
|
|
+ /*
|
|
+ * Now fetch and print the OPERATOR entries (pg_amop rows).
|
|
+ *
|
|
+ * Print only those opfamily members that are tied to the opclass by
|
|
+ * pg_depend entries.
|
|
+ *
|
|
+ * XXX RECHECK is gone as of 8.4, but we'll still print it if dumping an
|
|
+ * older server's opclass in which it is used. This is to avoid
|
|
+ * hard-to-detect breakage if a newer pg_dump is used to dump from an
|
|
+ * older server and then reload into that old version. This can go away
|
|
+ * once 8.3 is so old as to not be of interest to anyone.
|
|
+ */
|
|
+ resetPQExpBuffer(query);
|
|
+
|
|
+ if (fout->remoteVersion >= 90100)
|
|
+ {
|
|
+ appendPQExpBuffer(query, "SELECT amopstrategy, false AS amopreqcheck, "
|
|
+ "amopopr::pg_catalog.regoperator, "
|
|
+ "opfname AS sortfamily, "
|
|
+ "nspname AS sortfamilynsp "
|
|
+ "FROM pg_catalog.pg_amop ao JOIN pg_catalog.pg_depend ON "
|
|
+ "(classid = 'pg_catalog.pg_amop'::pg_catalog.regclass AND objid = ao.oid) "
|
|
+ "LEFT JOIN pg_catalog.pg_opfamily f ON f.oid = amopsortfamily "
|
|
+ "LEFT JOIN pg_catalog.pg_namespace n ON n.oid = opfnamespace "
|
|
+ "WHERE refclassid = 'pg_catalog.pg_opclass'::pg_catalog.regclass "
|
|
+ "AND refobjid = '%u'::pg_catalog.oid "
|
|
+ "AND amopfamily = '%s'::pg_catalog.oid "
|
|
+ "ORDER BY amopstrategy",
|
|
+ opcinfo->dobj.catId.oid,
|
|
+ opcfamily);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 80400)
|
|
+ {
|
|
+ appendPQExpBuffer(query, "SELECT amopstrategy, false AS amopreqcheck, "
|
|
+ "amopopr::pg_catalog.regoperator, "
|
|
+ "NULL AS sortfamily, "
|
|
+ "NULL AS sortfamilynsp "
|
|
+ "FROM pg_catalog.pg_amop ao, pg_catalog.pg_depend "
|
|
+ "WHERE refclassid = 'pg_catalog.pg_opclass'::pg_catalog.regclass "
|
|
+ "AND refobjid = '%u'::pg_catalog.oid "
|
|
+ "AND classid = 'pg_catalog.pg_amop'::pg_catalog.regclass "
|
|
+ "AND objid = ao.oid "
|
|
+ "ORDER BY amopstrategy",
|
|
+ opcinfo->dobj.catId.oid);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 80300)
|
|
+ {
|
|
+ appendPQExpBuffer(query, "SELECT amopstrategy, amopreqcheck, "
|
|
+ "amopopr::pg_catalog.regoperator, "
|
|
+ "NULL AS sortfamily, "
|
|
+ "NULL AS sortfamilynsp "
|
|
+ "FROM pg_catalog.pg_amop ao, pg_catalog.pg_depend "
|
|
+ "WHERE refclassid = 'pg_catalog.pg_opclass'::pg_catalog.regclass "
|
|
+ "AND refobjid = '%u'::pg_catalog.oid "
|
|
+ "AND classid = 'pg_catalog.pg_amop'::pg_catalog.regclass "
|
|
+ "AND objid = ao.oid "
|
|
+ "ORDER BY amopstrategy",
|
|
+ opcinfo->dobj.catId.oid);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ /*
|
|
+ * Here, we print all entries since there are no opfamilies and hence
|
|
+ * no loose operators to worry about.
|
|
+ */
|
|
+ appendPQExpBuffer(query, "SELECT amopstrategy, amopreqcheck, "
|
|
+ "amopopr::pg_catalog.regoperator, "
|
|
+ "NULL AS sortfamily, "
|
|
+ "NULL AS sortfamilynsp "
|
|
+ "FROM pg_catalog.pg_amop "
|
|
+ "WHERE amopclaid = '%u'::pg_catalog.oid "
|
|
+ "ORDER BY amopstrategy",
|
|
+ opcinfo->dobj.catId.oid);
|
|
+ }
|
|
+
|
|
+ res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
|
|
+
|
|
+ ntups = PQntuples(res);
|
|
+
|
|
+ i_amopstrategy = PQfnumber(res, "amopstrategy");
|
|
+ i_amopreqcheck = PQfnumber(res, "amopreqcheck");
|
|
+ i_amopopr = PQfnumber(res, "amopopr");
|
|
+ i_sortfamily = PQfnumber(res, "sortfamily");
|
|
+ i_sortfamilynsp = PQfnumber(res, "sortfamilynsp");
|
|
+
|
|
+ for (i = 0; i < ntups; i++)
|
|
+ {
|
|
+ amopstrategy = PQgetvalue(res, i, i_amopstrategy);
|
|
+ amopreqcheck = PQgetvalue(res, i, i_amopreqcheck);
|
|
+ amopopr = PQgetvalue(res, i, i_amopopr);
|
|
+ sortfamily = PQgetvalue(res, i, i_sortfamily);
|
|
+ sortfamilynsp = PQgetvalue(res, i, i_sortfamilynsp);
|
|
+
|
|
+ if (needComma)
|
|
+ appendPQExpBufferStr(q, " ,\n ");
|
|
+
|
|
+ appendPQExpBuffer(q, "OPERATOR %s %s",
|
|
+ amopstrategy, amopopr);
|
|
+
|
|
+ if (strlen(sortfamily) > 0)
|
|
+ {
|
|
+ appendPQExpBufferStr(q, " FOR ORDER BY ");
|
|
+ if (strcmp(sortfamilynsp, opcinfo->dobj.namespace->dobj.name) != 0)
|
|
+ appendPQExpBuffer(q, "%s.", fmtId(sortfamilynsp));
|
|
+ appendPQExpBufferStr(q, fmtId(sortfamily));
|
|
+ }
|
|
+
|
|
+ if (strcmp(amopreqcheck, "t") == 0)
|
|
+ appendPQExpBufferStr(q, " RECHECK");
|
|
+
|
|
+ needComma = true;
|
|
+ }
|
|
+
|
|
+ PQclear(res);
|
|
+
|
|
+ /*
|
|
+ * Now fetch and print the FUNCTION entries (pg_amproc rows).
|
|
+ *
|
|
+ * Print only those opfamily members that are tied to the opclass by
|
|
+ * pg_depend entries.
|
|
+ *
|
|
+ * We print the amproclefttype/amprocrighttype even though in most cases
|
|
+ * the backend could deduce the right values, because of the corner case
|
|
+ * of a btree sort support function for a cross-type comparison. That's
|
|
+ * only allowed in 9.2 and later, but for simplicity print them in all
|
|
+ * versions that have the columns.
|
|
+ */
|
|
+ resetPQExpBuffer(query);
|
|
+
|
|
+ if (fout->remoteVersion >= 80300)
|
|
+ {
|
|
+ appendPQExpBuffer(query, "SELECT amprocnum, "
|
|
+ "amproc::pg_catalog.regprocedure, "
|
|
+ "amproclefttype::pg_catalog.regtype, "
|
|
+ "amprocrighttype::pg_catalog.regtype "
|
|
+ "FROM pg_catalog.pg_amproc ap, pg_catalog.pg_depend "
|
|
+ "WHERE refclassid = 'pg_catalog.pg_opclass'::pg_catalog.regclass "
|
|
+ "AND refobjid = '%u'::pg_catalog.oid "
|
|
+ "AND classid = 'pg_catalog.pg_amproc'::pg_catalog.regclass "
|
|
+ "AND objid = ap.oid "
|
|
+ "ORDER BY amprocnum",
|
|
+ opcinfo->dobj.catId.oid);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ appendPQExpBuffer(query, "SELECT amprocnum, "
|
|
+ "amproc::pg_catalog.regprocedure, "
|
|
+ "'' AS amproclefttype, "
|
|
+ "'' AS amprocrighttype "
|
|
+ "FROM pg_catalog.pg_amproc "
|
|
+ "WHERE amopclaid = '%u'::pg_catalog.oid "
|
|
+ "ORDER BY amprocnum",
|
|
+ opcinfo->dobj.catId.oid);
|
|
+ }
|
|
+
|
|
+ res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
|
|
+
|
|
+ ntups = PQntuples(res);
|
|
+
|
|
+ i_amprocnum = PQfnumber(res, "amprocnum");
|
|
+ i_amproc = PQfnumber(res, "amproc");
|
|
+ i_amproclefttype = PQfnumber(res, "amproclefttype");
|
|
+ i_amprocrighttype = PQfnumber(res, "amprocrighttype");
|
|
+
|
|
+ for (i = 0; i < ntups; i++)
|
|
+ {
|
|
+ amprocnum = PQgetvalue(res, i, i_amprocnum);
|
|
+ amproc = PQgetvalue(res, i, i_amproc);
|
|
+ amproclefttype = PQgetvalue(res, i, i_amproclefttype);
|
|
+ amprocrighttype = PQgetvalue(res, i, i_amprocrighttype);
|
|
+
|
|
+ if (needComma)
|
|
+ appendPQExpBufferStr(q, " ,\n ");
|
|
+
|
|
+ appendPQExpBuffer(q, "FUNCTION %s", amprocnum);
|
|
+
|
|
+ if (*amproclefttype && *amprocrighttype)
|
|
+ appendPQExpBuffer(q, " (%s, %s)", amproclefttype, amprocrighttype);
|
|
+
|
|
+ appendPQExpBuffer(q, " %s", amproc);
|
|
+
|
|
+ needComma = true;
|
|
+ }
|
|
+
|
|
+ PQclear(res);
|
|
+
|
|
+ appendPQExpBufferStr(q, ";\n");
|
|
+
|
|
+ appendPQExpBuffer(labelq, "OPERATOR CLASS %s",
|
|
+ fmtId(opcinfo->dobj.name));
|
|
+ appendPQExpBuffer(labelq, " USING %s",
|
|
+ fmtId(amname));
|
|
+
|
|
+ if (binary_upgrade)
|
|
+ binary_upgrade_extension_member(q, &opcinfo->dobj, labelq->data);
|
|
+
|
|
+ ArchiveEntry(fout, opcinfo->dobj.catId, opcinfo->dobj.dumpId,
|
|
+ opcinfo->dobj.name,
|
|
+ opcinfo->dobj.namespace->dobj.name,
|
|
+ NULL,
|
|
+ opcinfo->rolname,
|
|
+ false, "OPERATOR CLASS", SECTION_PRE_DATA,
|
|
+ q->data, delq->data, NULL,
|
|
+ NULL, 0,
|
|
+ NULL, NULL);
|
|
+
|
|
+ /* Dump Operator Class Comments */
|
|
+ dumpComment(fout, labelq->data,
|
|
+ NULL, opcinfo->rolname,
|
|
+ opcinfo->dobj.catId, 0, opcinfo->dobj.dumpId);
|
|
+
|
|
+ free(amname);
|
|
+ destroyPQExpBuffer(query);
|
|
+ destroyPQExpBuffer(q);
|
|
+ destroyPQExpBuffer(delq);
|
|
+ destroyPQExpBuffer(labelq);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * dumpOpfamily
|
|
+ * write out a single operator family definition
|
|
+ *
|
|
+ * Note: this also dumps any "loose" operator members that aren't bound to a
|
|
+ * specific opclass within the opfamily.
|
|
+ */
|
|
+static void
|
|
+dumpOpfamily(Archive *fout, OpfamilyInfo *opfinfo)
|
|
+{
|
|
+ PQExpBuffer query;
|
|
+ PQExpBuffer q;
|
|
+ PQExpBuffer delq;
|
|
+ PQExpBuffer labelq;
|
|
+ PGresult *res;
|
|
+ PGresult *res_ops;
|
|
+ PGresult *res_procs;
|
|
+ int ntups;
|
|
+ int i_amname;
|
|
+ int i_amopstrategy;
|
|
+ int i_amopreqcheck;
|
|
+ int i_amopopr;
|
|
+ int i_sortfamily;
|
|
+ int i_sortfamilynsp;
|
|
+ int i_amprocnum;
|
|
+ int i_amproc;
|
|
+ int i_amproclefttype;
|
|
+ int i_amprocrighttype;
|
|
+ char *amname;
|
|
+ char *amopstrategy;
|
|
+ char *amopreqcheck;
|
|
+ char *amopopr;
|
|
+ char *sortfamily;
|
|
+ char *sortfamilynsp;
|
|
+ char *amprocnum;
|
|
+ char *amproc;
|
|
+ char *amproclefttype;
|
|
+ char *amprocrighttype;
|
|
+ bool needComma;
|
|
+ int i;
|
|
+
|
|
+ /* Skip if not to be dumped */
|
|
+ if (!opfinfo->dobj.dump || dataOnly)
|
|
+ return;
|
|
+
|
|
+ /*
|
|
+ * We want to dump the opfamily only if (1) it contains "loose" operators
|
|
+ * or functions, or (2) it contains an opclass with a different name or
|
|
+ * owner. Otherwise it's sufficient to let it be created during creation
|
|
+ * of the contained opclass, and not dumping it improves portability of
|
|
+ * the dump. Since we have to fetch the loose operators/funcs anyway, do
|
|
+ * that first.
|
|
+ */
|
|
+
|
|
+ query = createPQExpBuffer();
|
|
+ q = createPQExpBuffer();
|
|
+ delq = createPQExpBuffer();
|
|
+ labelq = createPQExpBuffer();
|
|
+
|
|
+ /* Make sure we are in proper schema so regoperator works correctly */
|
|
+ selectSourceSchema(fout, opfinfo->dobj.namespace->dobj.name);
|
|
+
|
|
+ /*
|
|
+ * Fetch only those opfamily members that are tied directly to the
|
|
+ * opfamily by pg_depend entries.
|
|
+ *
|
|
+ * XXX RECHECK is gone as of 8.4, but we'll still print it if dumping an
|
|
+ * older server's opclass in which it is used. This is to avoid
|
|
+ * hard-to-detect breakage if a newer pg_dump is used to dump from an
|
|
+ * older server and then reload into that old version. This can go away
|
|
+ * once 8.3 is so old as to not be of interest to anyone.
|
|
+ */
|
|
+ if (fout->remoteVersion >= 90100)
|
|
+ {
|
|
+ appendPQExpBuffer(query, "SELECT amopstrategy, false AS amopreqcheck, "
|
|
+ "amopopr::pg_catalog.regoperator, "
|
|
+ "opfname AS sortfamily, "
|
|
+ "nspname AS sortfamilynsp "
|
|
+ "FROM pg_catalog.pg_amop ao JOIN pg_catalog.pg_depend ON "
|
|
+ "(classid = 'pg_catalog.pg_amop'::pg_catalog.regclass AND objid = ao.oid) "
|
|
+ "LEFT JOIN pg_catalog.pg_opfamily f ON f.oid = amopsortfamily "
|
|
+ "LEFT JOIN pg_catalog.pg_namespace n ON n.oid = opfnamespace "
|
|
+ "WHERE refclassid = 'pg_catalog.pg_opfamily'::pg_catalog.regclass "
|
|
+ "AND refobjid = '%u'::pg_catalog.oid "
|
|
+ "AND amopfamily = '%u'::pg_catalog.oid "
|
|
+ "ORDER BY amopstrategy",
|
|
+ opfinfo->dobj.catId.oid,
|
|
+ opfinfo->dobj.catId.oid);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 80400)
|
|
+ {
|
|
+ appendPQExpBuffer(query, "SELECT amopstrategy, false AS amopreqcheck, "
|
|
+ "amopopr::pg_catalog.regoperator, "
|
|
+ "NULL AS sortfamily, "
|
|
+ "NULL AS sortfamilynsp "
|
|
+ "FROM pg_catalog.pg_amop ao, pg_catalog.pg_depend "
|
|
+ "WHERE refclassid = 'pg_catalog.pg_opfamily'::pg_catalog.regclass "
|
|
+ "AND refobjid = '%u'::pg_catalog.oid "
|
|
+ "AND classid = 'pg_catalog.pg_amop'::pg_catalog.regclass "
|
|
+ "AND objid = ao.oid "
|
|
+ "ORDER BY amopstrategy",
|
|
+ opfinfo->dobj.catId.oid);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ appendPQExpBuffer(query, "SELECT amopstrategy, amopreqcheck, "
|
|
+ "amopopr::pg_catalog.regoperator, "
|
|
+ "NULL AS sortfamily, "
|
|
+ "NULL AS sortfamilynsp "
|
|
+ "FROM pg_catalog.pg_amop ao, pg_catalog.pg_depend "
|
|
+ "WHERE refclassid = 'pg_catalog.pg_opfamily'::pg_catalog.regclass "
|
|
+ "AND refobjid = '%u'::pg_catalog.oid "
|
|
+ "AND classid = 'pg_catalog.pg_amop'::pg_catalog.regclass "
|
|
+ "AND objid = ao.oid "
|
|
+ "ORDER BY amopstrategy",
|
|
+ opfinfo->dobj.catId.oid);
|
|
+ }
|
|
+
|
|
+ res_ops = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
|
|
+
|
|
+ resetPQExpBuffer(query);
|
|
+
|
|
+ appendPQExpBuffer(query, "SELECT amprocnum, "
|
|
+ "amproc::pg_catalog.regprocedure, "
|
|
+ "amproclefttype::pg_catalog.regtype, "
|
|
+ "amprocrighttype::pg_catalog.regtype "
|
|
+ "FROM pg_catalog.pg_amproc ap, pg_catalog.pg_depend "
|
|
+ "WHERE refclassid = 'pg_catalog.pg_opfamily'::pg_catalog.regclass "
|
|
+ "AND refobjid = '%u'::pg_catalog.oid "
|
|
+ "AND classid = 'pg_catalog.pg_amproc'::pg_catalog.regclass "
|
|
+ "AND objid = ap.oid "
|
|
+ "ORDER BY amprocnum",
|
|
+ opfinfo->dobj.catId.oid);
|
|
+
|
|
+ res_procs = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
|
|
+
|
|
+ if (PQntuples(res_ops) == 0 && PQntuples(res_procs) == 0)
|
|
+ {
|
|
+ /* No loose members, so check contained opclasses */
|
|
+ resetPQExpBuffer(query);
|
|
+
|
|
+ appendPQExpBuffer(query, "SELECT 1 "
|
|
+ "FROM pg_catalog.pg_opclass c, pg_catalog.pg_opfamily f, pg_catalog.pg_depend "
|
|
+ "WHERE f.oid = '%u'::pg_catalog.oid "
|
|
+ "AND refclassid = 'pg_catalog.pg_opfamily'::pg_catalog.regclass "
|
|
+ "AND refobjid = f.oid "
|
|
+ "AND classid = 'pg_catalog.pg_opclass'::pg_catalog.regclass "
|
|
+ "AND objid = c.oid "
|
|
+ "AND (opcname != opfname OR opcnamespace != opfnamespace OR opcowner != opfowner) "
|
|
+ "LIMIT 1",
|
|
+ opfinfo->dobj.catId.oid);
|
|
+
|
|
+ res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
|
|
+
|
|
+ if (PQntuples(res) == 0)
|
|
+ {
|
|
+ /* no need to dump it, so bail out */
|
|
+ PQclear(res);
|
|
+ PQclear(res_ops);
|
|
+ PQclear(res_procs);
|
|
+ destroyPQExpBuffer(query);
|
|
+ destroyPQExpBuffer(q);
|
|
+ destroyPQExpBuffer(delq);
|
|
+ destroyPQExpBuffer(labelq);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ PQclear(res);
|
|
+ }
|
|
+
|
|
+ /* Get additional fields from the pg_opfamily row */
|
|
+ resetPQExpBuffer(query);
|
|
+
|
|
+ appendPQExpBuffer(query, "SELECT "
|
|
+ "(SELECT amname FROM pg_catalog.pg_am WHERE oid = opfmethod) AS amname "
|
|
+ "FROM pg_catalog.pg_opfamily "
|
|
+ "WHERE oid = '%u'::pg_catalog.oid",
|
|
+ opfinfo->dobj.catId.oid);
|
|
+
|
|
+ res = ExecuteSqlQueryForSingleRow(fout, query->data);
|
|
+
|
|
+ i_amname = PQfnumber(res, "amname");
|
|
+
|
|
+ /* amname will still be needed after we PQclear res */
|
|
+ amname = pg_strdup(PQgetvalue(res, 0, i_amname));
|
|
+
|
|
+ /*
|
|
+ * DROP must be fully qualified in case same name appears in pg_catalog
|
|
+ */
|
|
+ appendPQExpBuffer(delq, "DROP OPERATOR FAMILY %s",
|
|
+ fmtId(opfinfo->dobj.namespace->dobj.name));
|
|
+ appendPQExpBuffer(delq, ".%s",
|
|
+ fmtId(opfinfo->dobj.name));
|
|
+ appendPQExpBuffer(delq, " USING %s;\n",
|
|
+ fmtId(amname));
|
|
+
|
|
+ /* Build the fixed portion of the CREATE command */
|
|
+ appendPQExpBuffer(q, "CREATE OPERATOR FAMILY %s",
|
|
+ fmtId(opfinfo->dobj.name));
|
|
+ appendPQExpBuffer(q, " USING %s;\n",
|
|
+ fmtId(amname));
|
|
+
|
|
+ PQclear(res);
|
|
+
|
|
+ /* Do we need an ALTER to add loose members? */
|
|
+ if (PQntuples(res_ops) > 0 || PQntuples(res_procs) > 0)
|
|
+ {
|
|
+ appendPQExpBuffer(q, "ALTER OPERATOR FAMILY %s",
|
|
+ fmtId(opfinfo->dobj.name));
|
|
+ appendPQExpBuffer(q, " USING %s ADD\n ",
|
|
+ fmtId(amname));
|
|
+
|
|
+ needComma = false;
|
|
+
|
|
+ /*
|
|
+ * Now fetch and print the OPERATOR entries (pg_amop rows).
|
|
+ */
|
|
+ ntups = PQntuples(res_ops);
|
|
+
|
|
+ i_amopstrategy = PQfnumber(res_ops, "amopstrategy");
|
|
+ i_amopreqcheck = PQfnumber(res_ops, "amopreqcheck");
|
|
+ i_amopopr = PQfnumber(res_ops, "amopopr");
|
|
+ i_sortfamily = PQfnumber(res_ops, "sortfamily");
|
|
+ i_sortfamilynsp = PQfnumber(res_ops, "sortfamilynsp");
|
|
+
|
|
+ for (i = 0; i < ntups; i++)
|
|
+ {
|
|
+ amopstrategy = PQgetvalue(res_ops, i, i_amopstrategy);
|
|
+ amopreqcheck = PQgetvalue(res_ops, i, i_amopreqcheck);
|
|
+ amopopr = PQgetvalue(res_ops, i, i_amopopr);
|
|
+ sortfamily = PQgetvalue(res_ops, i, i_sortfamily);
|
|
+ sortfamilynsp = PQgetvalue(res_ops, i, i_sortfamilynsp);
|
|
+
|
|
+ if (needComma)
|
|
+ appendPQExpBufferStr(q, " ,\n ");
|
|
+
|
|
+ appendPQExpBuffer(q, "OPERATOR %s %s",
|
|
+ amopstrategy, amopopr);
|
|
+
|
|
+ if (strlen(sortfamily) > 0)
|
|
+ {
|
|
+ appendPQExpBufferStr(q, " FOR ORDER BY ");
|
|
+ if (strcmp(sortfamilynsp, opfinfo->dobj.namespace->dobj.name) != 0)
|
|
+ appendPQExpBuffer(q, "%s.", fmtId(sortfamilynsp));
|
|
+ appendPQExpBufferStr(q, fmtId(sortfamily));
|
|
+ }
|
|
+
|
|
+ if (strcmp(amopreqcheck, "t") == 0)
|
|
+ appendPQExpBufferStr(q, " RECHECK");
|
|
+
|
|
+ needComma = true;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Now fetch and print the FUNCTION entries (pg_amproc rows).
|
|
+ */
|
|
+ ntups = PQntuples(res_procs);
|
|
+
|
|
+ i_amprocnum = PQfnumber(res_procs, "amprocnum");
|
|
+ i_amproc = PQfnumber(res_procs, "amproc");
|
|
+ i_amproclefttype = PQfnumber(res_procs, "amproclefttype");
|
|
+ i_amprocrighttype = PQfnumber(res_procs, "amprocrighttype");
|
|
+
|
|
+ for (i = 0; i < ntups; i++)
|
|
+ {
|
|
+ amprocnum = PQgetvalue(res_procs, i, i_amprocnum);
|
|
+ amproc = PQgetvalue(res_procs, i, i_amproc);
|
|
+ amproclefttype = PQgetvalue(res_procs, i, i_amproclefttype);
|
|
+ amprocrighttype = PQgetvalue(res_procs, i, i_amprocrighttype);
|
|
+
|
|
+ if (needComma)
|
|
+ appendPQExpBufferStr(q, " ,\n ");
|
|
+
|
|
+ appendPQExpBuffer(q, "FUNCTION %s (%s, %s) %s",
|
|
+ amprocnum, amproclefttype, amprocrighttype,
|
|
+ amproc);
|
|
+
|
|
+ needComma = true;
|
|
+ }
|
|
+
|
|
+ appendPQExpBufferStr(q, ";\n");
|
|
+ }
|
|
+
|
|
+ appendPQExpBuffer(labelq, "OPERATOR FAMILY %s",
|
|
+ fmtId(opfinfo->dobj.name));
|
|
+ appendPQExpBuffer(labelq, " USING %s",
|
|
+ fmtId(amname));
|
|
+
|
|
+ if (binary_upgrade)
|
|
+ binary_upgrade_extension_member(q, &opfinfo->dobj, labelq->data);
|
|
+
|
|
+ ArchiveEntry(fout, opfinfo->dobj.catId, opfinfo->dobj.dumpId,
|
|
+ opfinfo->dobj.name,
|
|
+ opfinfo->dobj.namespace->dobj.name,
|
|
+ NULL,
|
|
+ opfinfo->rolname,
|
|
+ false, "OPERATOR FAMILY", SECTION_PRE_DATA,
|
|
+ q->data, delq->data, NULL,
|
|
+ NULL, 0,
|
|
+ NULL, NULL);
|
|
+
|
|
+ /* Dump Operator Family Comments */
|
|
+ dumpComment(fout, labelq->data,
|
|
+ NULL, opfinfo->rolname,
|
|
+ opfinfo->dobj.catId, 0, opfinfo->dobj.dumpId);
|
|
+
|
|
+ free(amname);
|
|
+ PQclear(res_ops);
|
|
+ PQclear(res_procs);
|
|
+ destroyPQExpBuffer(query);
|
|
+ destroyPQExpBuffer(q);
|
|
+ destroyPQExpBuffer(delq);
|
|
+ destroyPQExpBuffer(labelq);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * dumpCollation
|
|
+ * write out a single collation definition
|
|
+ */
|
|
+static void
|
|
+dumpCollation(Archive *fout, CollInfo *collinfo)
|
|
+{
|
|
+ PQExpBuffer query;
|
|
+ PQExpBuffer q;
|
|
+ PQExpBuffer delq;
|
|
+ PQExpBuffer labelq;
|
|
+ PGresult *res;
|
|
+ int i_collcollate;
|
|
+ int i_collctype;
|
|
+ const char *collcollate;
|
|
+ const char *collctype;
|
|
+
|
|
+ /* Skip if not to be dumped */
|
|
+ if (!collinfo->dobj.dump || dataOnly)
|
|
+ return;
|
|
+
|
|
+ query = createPQExpBuffer();
|
|
+ q = createPQExpBuffer();
|
|
+ delq = createPQExpBuffer();
|
|
+ labelq = createPQExpBuffer();
|
|
+
|
|
+ /* Make sure we are in proper schema */
|
|
+ selectSourceSchema(fout, collinfo->dobj.namespace->dobj.name);
|
|
+
|
|
+ /* Get conversion-specific details */
|
|
+ appendPQExpBuffer(query, "SELECT "
|
|
+ "collcollate, "
|
|
+ "collctype "
|
|
+ "FROM pg_catalog.pg_collation c "
|
|
+ "WHERE c.oid = '%u'::pg_catalog.oid",
|
|
+ collinfo->dobj.catId.oid);
|
|
+
|
|
+ res = ExecuteSqlQueryForSingleRow(fout, query->data);
|
|
+
|
|
+ i_collcollate = PQfnumber(res, "collcollate");
|
|
+ i_collctype = PQfnumber(res, "collctype");
|
|
+
|
|
+ collcollate = PQgetvalue(res, 0, i_collcollate);
|
|
+ collctype = PQgetvalue(res, 0, i_collctype);
|
|
+
|
|
+ /*
|
|
+ * DROP must be fully qualified in case same name appears in pg_catalog
|
|
+ */
|
|
+ appendPQExpBuffer(delq, "DROP COLLATION %s",
|
|
+ fmtId(collinfo->dobj.namespace->dobj.name));
|
|
+ appendPQExpBuffer(delq, ".%s;\n",
|
|
+ fmtId(collinfo->dobj.name));
|
|
+
|
|
+ appendPQExpBuffer(q, "CREATE COLLATION %s (lc_collate = ",
|
|
+ fmtId(collinfo->dobj.name));
|
|
+ appendStringLiteralAH(q, collcollate, fout);
|
|
+ appendPQExpBufferStr(q, ", lc_ctype = ");
|
|
+ appendStringLiteralAH(q, collctype, fout);
|
|
+ appendPQExpBufferStr(q, ");\n");
|
|
+
|
|
+ appendPQExpBuffer(labelq, "COLLATION %s", fmtId(collinfo->dobj.name));
|
|
+
|
|
+ if (binary_upgrade)
|
|
+ binary_upgrade_extension_member(q, &collinfo->dobj, labelq->data);
|
|
+
|
|
+ ArchiveEntry(fout, collinfo->dobj.catId, collinfo->dobj.dumpId,
|
|
+ collinfo->dobj.name,
|
|
+ collinfo->dobj.namespace->dobj.name,
|
|
+ NULL,
|
|
+ collinfo->rolname,
|
|
+ false, "COLLATION", SECTION_PRE_DATA,
|
|
+ q->data, delq->data, NULL,
|
|
+ NULL, 0,
|
|
+ NULL, NULL);
|
|
+
|
|
+ /* Dump Collation Comments */
|
|
+ dumpComment(fout, labelq->data,
|
|
+ collinfo->dobj.namespace->dobj.name, collinfo->rolname,
|
|
+ collinfo->dobj.catId, 0, collinfo->dobj.dumpId);
|
|
+
|
|
+ PQclear(res);
|
|
+
|
|
+ destroyPQExpBuffer(query);
|
|
+ destroyPQExpBuffer(q);
|
|
+ destroyPQExpBuffer(delq);
|
|
+ destroyPQExpBuffer(labelq);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * dumpConversion
|
|
+ * write out a single conversion definition
|
|
+ */
|
|
+static void
|
|
+dumpConversion(Archive *fout, ConvInfo *convinfo)
|
|
+{
|
|
+ PQExpBuffer query;
|
|
+ PQExpBuffer q;
|
|
+ PQExpBuffer delq;
|
|
+ PQExpBuffer labelq;
|
|
+ PGresult *res;
|
|
+ int i_conforencoding;
|
|
+ int i_contoencoding;
|
|
+ int i_conproc;
|
|
+ int i_condefault;
|
|
+ const char *conforencoding;
|
|
+ const char *contoencoding;
|
|
+ const char *conproc;
|
|
+ bool condefault;
|
|
+
|
|
+ /* Skip if not to be dumped */
|
|
+ if (!convinfo->dobj.dump || dataOnly)
|
|
+ return;
|
|
+
|
|
+ query = createPQExpBuffer();
|
|
+ q = createPQExpBuffer();
|
|
+ delq = createPQExpBuffer();
|
|
+ labelq = createPQExpBuffer();
|
|
+
|
|
+ /* Make sure we are in proper schema */
|
|
+ selectSourceSchema(fout, convinfo->dobj.namespace->dobj.name);
|
|
+
|
|
+ /* Get conversion-specific details */
|
|
+ appendPQExpBuffer(query, "SELECT "
|
|
+ "pg_catalog.pg_encoding_to_char(conforencoding) AS conforencoding, "
|
|
+ "pg_catalog.pg_encoding_to_char(contoencoding) AS contoencoding, "
|
|
+ "conproc, condefault "
|
|
+ "FROM pg_catalog.pg_conversion c "
|
|
+ "WHERE c.oid = '%u'::pg_catalog.oid",
|
|
+ convinfo->dobj.catId.oid);
|
|
+
|
|
+ res = ExecuteSqlQueryForSingleRow(fout, query->data);
|
|
+
|
|
+ i_conforencoding = PQfnumber(res, "conforencoding");
|
|
+ i_contoencoding = PQfnumber(res, "contoencoding");
|
|
+ i_conproc = PQfnumber(res, "conproc");
|
|
+ i_condefault = PQfnumber(res, "condefault");
|
|
+
|
|
+ conforencoding = PQgetvalue(res, 0, i_conforencoding);
|
|
+ contoencoding = PQgetvalue(res, 0, i_contoencoding);
|
|
+ conproc = PQgetvalue(res, 0, i_conproc);
|
|
+ condefault = (PQgetvalue(res, 0, i_condefault)[0] == 't');
|
|
+
|
|
+ /*
|
|
+ * DROP must be fully qualified in case same name appears in pg_catalog
|
|
+ */
|
|
+ appendPQExpBuffer(delq, "DROP CONVERSION %s",
|
|
+ fmtId(convinfo->dobj.namespace->dobj.name));
|
|
+ appendPQExpBuffer(delq, ".%s;\n",
|
|
+ fmtId(convinfo->dobj.name));
|
|
+
|
|
+ appendPQExpBuffer(q, "CREATE %sCONVERSION %s FOR ",
|
|
+ (condefault) ? "DEFAULT " : "",
|
|
+ fmtId(convinfo->dobj.name));
|
|
+ appendStringLiteralAH(q, conforencoding, fout);
|
|
+ appendPQExpBufferStr(q, " TO ");
|
|
+ appendStringLiteralAH(q, contoencoding, fout);
|
|
+ /* regproc is automatically quoted in 7.3 and above */
|
|
+ appendPQExpBuffer(q, " FROM %s;\n", conproc);
|
|
+
|
|
+ appendPQExpBuffer(labelq, "CONVERSION %s", fmtId(convinfo->dobj.name));
|
|
+
|
|
+ if (binary_upgrade)
|
|
+ binary_upgrade_extension_member(q, &convinfo->dobj, labelq->data);
|
|
+
|
|
+ ArchiveEntry(fout, convinfo->dobj.catId, convinfo->dobj.dumpId,
|
|
+ convinfo->dobj.name,
|
|
+ convinfo->dobj.namespace->dobj.name,
|
|
+ NULL,
|
|
+ convinfo->rolname,
|
|
+ false, "CONVERSION", SECTION_PRE_DATA,
|
|
+ q->data, delq->data, NULL,
|
|
+ NULL, 0,
|
|
+ NULL, NULL);
|
|
+
|
|
+ /* Dump Conversion Comments */
|
|
+ dumpComment(fout, labelq->data,
|
|
+ convinfo->dobj.namespace->dobj.name, convinfo->rolname,
|
|
+ convinfo->dobj.catId, 0, convinfo->dobj.dumpId);
|
|
+
|
|
+ PQclear(res);
|
|
+
|
|
+ destroyPQExpBuffer(query);
|
|
+ destroyPQExpBuffer(q);
|
|
+ destroyPQExpBuffer(delq);
|
|
+ destroyPQExpBuffer(labelq);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * format_aggregate_signature: generate aggregate name and argument list
|
|
+ *
|
|
+ * The argument type names are qualified if needed. The aggregate name
|
|
+ * is never qualified.
|
|
+ */
|
|
+static char *
|
|
+format_aggregate_signature(AggInfo *agginfo, Archive *fout, bool honor_quotes)
|
|
+{
|
|
+ PQExpBufferData buf;
|
|
+ int j;
|
|
+
|
|
+ initPQExpBuffer(&buf);
|
|
+ if (honor_quotes)
|
|
+ appendPQExpBufferStr(&buf, fmtId(agginfo->aggfn.dobj.name));
|
|
+ else
|
|
+ appendPQExpBufferStr(&buf, agginfo->aggfn.dobj.name);
|
|
+
|
|
+ if (agginfo->aggfn.nargs == 0)
|
|
+ appendPQExpBuffer(&buf, "(*)");
|
|
+ else
|
|
+ {
|
|
+ appendPQExpBufferChar(&buf, '(');
|
|
+ for (j = 0; j < agginfo->aggfn.nargs; j++)
|
|
+ {
|
|
+ char *typname;
|
|
+
|
|
+ typname = getFormattedTypeName(fout, agginfo->aggfn.argtypes[j],
|
|
+ zeroAsOpaque);
|
|
+
|
|
+ appendPQExpBuffer(&buf, "%s%s",
|
|
+ (j > 0) ? ", " : "",
|
|
+ typname);
|
|
+ free(typname);
|
|
+ }
|
|
+ appendPQExpBufferChar(&buf, ')');
|
|
+ }
|
|
+ return buf.data;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * dumpAgg
|
|
+ * write out a single aggregate definition
|
|
+ */
|
|
+static void
|
|
+dumpAgg(Archive *fout, AggInfo *agginfo)
|
|
+{
|
|
+ PQExpBuffer query;
|
|
+ PQExpBuffer q;
|
|
+ PQExpBuffer delq;
|
|
+ PQExpBuffer labelq;
|
|
+ PQExpBuffer details;
|
|
+ char *aggsig; /* identity signature */
|
|
+ char *aggfullsig = NULL; /* full signature */
|
|
+ char *aggsig_tag;
|
|
+ PGresult *res;
|
|
+ int i_aggtransfn;
|
|
+ int i_aggfinalfn;
|
|
+ int i_aggmtransfn;
|
|
+ int i_aggminvtransfn;
|
|
+ int i_aggmfinalfn;
|
|
+ int i_aggfinalextra;
|
|
+ int i_aggmfinalextra;
|
|
+ int i_aggsortop;
|
|
+ int i_hypothetical;
|
|
+ int i_aggtranstype;
|
|
+ int i_aggtransspace;
|
|
+ int i_aggmtranstype;
|
|
+ int i_aggmtransspace;
|
|
+ int i_agginitval;
|
|
+ int i_aggminitval;
|
|
+ int i_convertok;
|
|
+ const char *aggtransfn;
|
|
+ const char *aggfinalfn;
|
|
+ const char *aggmtransfn;
|
|
+ const char *aggminvtransfn;
|
|
+ const char *aggmfinalfn;
|
|
+ bool aggfinalextra;
|
|
+ bool aggmfinalextra;
|
|
+ const char *aggsortop;
|
|
+ char *aggsortconvop;
|
|
+ bool hypothetical;
|
|
+ const char *aggtranstype;
|
|
+ const char *aggtransspace;
|
|
+ const char *aggmtranstype;
|
|
+ const char *aggmtransspace;
|
|
+ const char *agginitval;
|
|
+ const char *aggminitval;
|
|
+ bool convertok;
|
|
+
|
|
+ /* Skip if not to be dumped */
|
|
+ if (!agginfo->aggfn.dobj.dump || dataOnly)
|
|
+ return;
|
|
+
|
|
+ query = createPQExpBuffer();
|
|
+ q = createPQExpBuffer();
|
|
+ delq = createPQExpBuffer();
|
|
+ labelq = createPQExpBuffer();
|
|
+ details = createPQExpBuffer();
|
|
+
|
|
+ /* Make sure we are in proper schema */
|
|
+ selectSourceSchema(fout, agginfo->aggfn.dobj.namespace->dobj.name);
|
|
+
|
|
+ /* Get aggregate-specific details */
|
|
+ if (fout->remoteVersion >= 90400)
|
|
+ {
|
|
+ appendPQExpBuffer(query, "SELECT aggtransfn, "
|
|
+ "aggfinalfn, aggtranstype::pg_catalog.regtype, "
|
|
+ "aggmtransfn, aggminvtransfn, aggmfinalfn, "
|
|
+ "aggmtranstype::pg_catalog.regtype, "
|
|
+ "aggfinalextra, aggmfinalextra, "
|
|
+ "aggsortop::pg_catalog.regoperator, "
|
|
+ "(aggkind = 'h') AS hypothetical, "
|
|
+ "aggtransspace, agginitval, "
|
|
+ "aggmtransspace, aggminitval, "
|
|
+ "true AS convertok, "
|
|
+ "pg_catalog.pg_get_function_arguments(p.oid) AS funcargs, "
|
|
+ "pg_catalog.pg_get_function_identity_arguments(p.oid) AS funciargs "
|
|
+ "FROM pg_catalog.pg_aggregate a, pg_catalog.pg_proc p "
|
|
+ "WHERE a.aggfnoid = p.oid "
|
|
+ "AND p.oid = '%u'::pg_catalog.oid",
|
|
+ agginfo->aggfn.dobj.catId.oid);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 80400)
|
|
+ {
|
|
+ appendPQExpBuffer(query, "SELECT aggtransfn, "
|
|
+ "aggfinalfn, aggtranstype::pg_catalog.regtype, "
|
|
+ "'-' AS aggmtransfn, '-' AS aggminvtransfn, "
|
|
+ "'-' AS aggmfinalfn, 0 AS aggmtranstype, "
|
|
+ "false AS aggfinalextra, false AS aggmfinalextra, "
|
|
+ "aggsortop::pg_catalog.regoperator, "
|
|
+ "false AS hypothetical, "
|
|
+ "0 AS aggtransspace, agginitval, "
|
|
+ "0 AS aggmtransspace, NULL AS aggminitval, "
|
|
+ "true AS convertok, "
|
|
+ "pg_catalog.pg_get_function_arguments(p.oid) AS funcargs, "
|
|
+ "pg_catalog.pg_get_function_identity_arguments(p.oid) AS funciargs "
|
|
+ "FROM pg_catalog.pg_aggregate a, pg_catalog.pg_proc p "
|
|
+ "WHERE a.aggfnoid = p.oid "
|
|
+ "AND p.oid = '%u'::pg_catalog.oid",
|
|
+ agginfo->aggfn.dobj.catId.oid);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 80100)
|
|
+ {
|
|
+ appendPQExpBuffer(query, "SELECT aggtransfn, "
|
|
+ "aggfinalfn, aggtranstype::pg_catalog.regtype, "
|
|
+ "'-' AS aggmtransfn, '-' AS aggminvtransfn, "
|
|
+ "'-' AS aggmfinalfn, 0 AS aggmtranstype, "
|
|
+ "false AS aggfinalextra, false AS aggmfinalextra, "
|
|
+ "aggsortop::pg_catalog.regoperator, "
|
|
+ "false AS hypothetical, "
|
|
+ "0 AS aggtransspace, agginitval, "
|
|
+ "0 AS aggmtransspace, NULL AS aggminitval, "
|
|
+ "true AS convertok "
|
|
+ "FROM pg_catalog.pg_aggregate a, pg_catalog.pg_proc p "
|
|
+ "WHERE a.aggfnoid = p.oid "
|
|
+ "AND p.oid = '%u'::pg_catalog.oid",
|
|
+ agginfo->aggfn.dobj.catId.oid);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 70300)
|
|
+ {
|
|
+ appendPQExpBuffer(query, "SELECT aggtransfn, "
|
|
+ "aggfinalfn, aggtranstype::pg_catalog.regtype, "
|
|
+ "'-' AS aggmtransfn, '-' AS aggminvtransfn, "
|
|
+ "'-' AS aggmfinalfn, 0 AS aggmtranstype, "
|
|
+ "false AS aggfinalextra, false AS aggmfinalextra, "
|
|
+ "0 AS aggsortop, "
|
|
+ "false AS hypothetical, "
|
|
+ "0 AS aggtransspace, agginitval, "
|
|
+ "0 AS aggmtransspace, NULL AS aggminitval, "
|
|
+ "true AS convertok "
|
|
+ "FROM pg_catalog.pg_aggregate a, pg_catalog.pg_proc p "
|
|
+ "WHERE a.aggfnoid = p.oid "
|
|
+ "AND p.oid = '%u'::pg_catalog.oid",
|
|
+ agginfo->aggfn.dobj.catId.oid);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 70100)
|
|
+ {
|
|
+ appendPQExpBuffer(query, "SELECT aggtransfn, aggfinalfn, "
|
|
+ "format_type(aggtranstype, NULL) AS aggtranstype, "
|
|
+ "'-' AS aggmtransfn, '-' AS aggminvtransfn, "
|
|
+ "'-' AS aggmfinalfn, 0 AS aggmtranstype, "
|
|
+ "false AS aggfinalextra, false AS aggmfinalextra, "
|
|
+ "0 AS aggsortop, "
|
|
+ "false AS hypothetical, "
|
|
+ "0 AS aggtransspace, agginitval, "
|
|
+ "0 AS aggmtransspace, NULL AS aggminitval, "
|
|
+ "true AS convertok "
|
|
+ "FROM pg_aggregate "
|
|
+ "WHERE oid = '%u'::oid",
|
|
+ agginfo->aggfn.dobj.catId.oid);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ appendPQExpBuffer(query, "SELECT aggtransfn1 AS aggtransfn, "
|
|
+ "aggfinalfn, "
|
|
+ "(SELECT typname FROM pg_type WHERE oid = aggtranstype1) AS aggtranstype, "
|
|
+ "'-' AS aggmtransfn, '-' AS aggminvtransfn, "
|
|
+ "'-' AS aggmfinalfn, 0 AS aggmtranstype, "
|
|
+ "false AS aggfinalextra, false AS aggmfinalextra, "
|
|
+ "0 AS aggsortop, "
|
|
+ "false AS hypothetical, "
|
|
+ "0 AS aggtransspace, agginitval1 AS agginitval, "
|
|
+ "0 AS aggmtransspace, NULL AS aggminitval, "
|
|
+ "(aggtransfn2 = 0 and aggtranstype2 = 0 and agginitval2 is null) AS convertok "
|
|
+ "FROM pg_aggregate "
|
|
+ "WHERE oid = '%u'::oid",
|
|
+ agginfo->aggfn.dobj.catId.oid);
|
|
+ }
|
|
+
|
|
+ res = ExecuteSqlQueryForSingleRow(fout, query->data);
|
|
+
|
|
+ i_aggtransfn = PQfnumber(res, "aggtransfn");
|
|
+ i_aggfinalfn = PQfnumber(res, "aggfinalfn");
|
|
+ i_aggmtransfn = PQfnumber(res, "aggmtransfn");
|
|
+ i_aggminvtransfn = PQfnumber(res, "aggminvtransfn");
|
|
+ i_aggmfinalfn = PQfnumber(res, "aggmfinalfn");
|
|
+ i_aggfinalextra = PQfnumber(res, "aggfinalextra");
|
|
+ i_aggmfinalextra = PQfnumber(res, "aggmfinalextra");
|
|
+ i_aggsortop = PQfnumber(res, "aggsortop");
|
|
+ i_hypothetical = PQfnumber(res, "hypothetical");
|
|
+ i_aggtranstype = PQfnumber(res, "aggtranstype");
|
|
+ i_aggtransspace = PQfnumber(res, "aggtransspace");
|
|
+ i_aggmtranstype = PQfnumber(res, "aggmtranstype");
|
|
+ i_aggmtransspace = PQfnumber(res, "aggmtransspace");
|
|
+ i_agginitval = PQfnumber(res, "agginitval");
|
|
+ i_aggminitval = PQfnumber(res, "aggminitval");
|
|
+ i_convertok = PQfnumber(res, "convertok");
|
|
+
|
|
+ aggtransfn = PQgetvalue(res, 0, i_aggtransfn);
|
|
+ aggfinalfn = PQgetvalue(res, 0, i_aggfinalfn);
|
|
+ aggmtransfn = PQgetvalue(res, 0, i_aggmtransfn);
|
|
+ aggminvtransfn = PQgetvalue(res, 0, i_aggminvtransfn);
|
|
+ aggmfinalfn = PQgetvalue(res, 0, i_aggmfinalfn);
|
|
+ aggfinalextra = (PQgetvalue(res, 0, i_aggfinalextra)[0] == 't');
|
|
+ aggmfinalextra = (PQgetvalue(res, 0, i_aggmfinalextra)[0] == 't');
|
|
+ aggsortop = PQgetvalue(res, 0, i_aggsortop);
|
|
+ hypothetical = (PQgetvalue(res, 0, i_hypothetical)[0] == 't');
|
|
+ aggtranstype = PQgetvalue(res, 0, i_aggtranstype);
|
|
+ aggtransspace = PQgetvalue(res, 0, i_aggtransspace);
|
|
+ aggmtranstype = PQgetvalue(res, 0, i_aggmtranstype);
|
|
+ aggmtransspace = PQgetvalue(res, 0, i_aggmtransspace);
|
|
+ agginitval = PQgetvalue(res, 0, i_agginitval);
|
|
+ aggminitval = PQgetvalue(res, 0, i_aggminitval);
|
|
+ convertok = (PQgetvalue(res, 0, i_convertok)[0] == 't');
|
|
+
|
|
+ if (fout->remoteVersion >= 80400)
|
|
+ {
|
|
+ /* 8.4 or later; we rely on server-side code for most of the work */
|
|
+ char *funcargs;
|
|
+ char *funciargs;
|
|
+
|
|
+ funcargs = PQgetvalue(res, 0, PQfnumber(res, "funcargs"));
|
|
+ funciargs = PQgetvalue(res, 0, PQfnumber(res, "funciargs"));
|
|
+ aggfullsig = format_function_arguments(&agginfo->aggfn, funcargs, true);
|
|
+ aggsig = format_function_arguments(&agginfo->aggfn, funciargs, true);
|
|
+ }
|
|
+ else
|
|
+ /* pre-8.4, do it ourselves */
|
|
+ aggsig = format_aggregate_signature(agginfo, fout, true);
|
|
+
|
|
+ aggsig_tag = format_aggregate_signature(agginfo, fout, false);
|
|
+
|
|
+ if (!convertok)
|
|
+ {
|
|
+ write_msg(NULL, "WARNING: aggregate function %s could not be dumped correctly for this database version; ignored\n",
|
|
+ aggsig);
|
|
+
|
|
+ if (aggfullsig)
|
|
+ free(aggfullsig);
|
|
+
|
|
+ free(aggsig);
|
|
+
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (fout->remoteVersion >= 70300)
|
|
+ {
|
|
+ /* If using 7.3's regproc or regtype, data is already quoted */
|
|
+ appendPQExpBuffer(details, " SFUNC = %s,\n STYPE = %s",
|
|
+ aggtransfn,
|
|
+ aggtranstype);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 70100)
|
|
+ {
|
|
+ /* format_type quotes, regproc does not */
|
|
+ appendPQExpBuffer(details, " SFUNC = %s,\n STYPE = %s",
|
|
+ fmtId(aggtransfn),
|
|
+ aggtranstype);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ /* need quotes all around */
|
|
+ appendPQExpBuffer(details, " SFUNC = %s,\n",
|
|
+ fmtId(aggtransfn));
|
|
+ appendPQExpBuffer(details, " STYPE = %s",
|
|
+ fmtId(aggtranstype));
|
|
+ }
|
|
+
|
|
+ if (strcmp(aggtransspace, "0") != 0)
|
|
+ {
|
|
+ appendPQExpBuffer(details, ",\n SSPACE = %s",
|
|
+ aggtransspace);
|
|
+ }
|
|
+
|
|
+ if (!PQgetisnull(res, 0, i_agginitval))
|
|
+ {
|
|
+ appendPQExpBufferStr(details, ",\n INITCOND = ");
|
|
+ appendStringLiteralAH(details, agginitval, fout);
|
|
+ }
|
|
+
|
|
+ if (strcmp(aggfinalfn, "-") != 0)
|
|
+ {
|
|
+ appendPQExpBuffer(details, ",\n FINALFUNC = %s",
|
|
+ aggfinalfn);
|
|
+ if (aggfinalextra)
|
|
+ appendPQExpBufferStr(details, ",\n FINALFUNC_EXTRA");
|
|
+ }
|
|
+
|
|
+ if (strcmp(aggmtransfn, "-") != 0)
|
|
+ {
|
|
+ appendPQExpBuffer(details, ",\n MSFUNC = %s,\n MINVFUNC = %s,\n MSTYPE = %s",
|
|
+ aggmtransfn,
|
|
+ aggminvtransfn,
|
|
+ aggmtranstype);
|
|
+ }
|
|
+
|
|
+ if (strcmp(aggmtransspace, "0") != 0)
|
|
+ {
|
|
+ appendPQExpBuffer(details, ",\n MSSPACE = %s",
|
|
+ aggmtransspace);
|
|
+ }
|
|
+
|
|
+ if (!PQgetisnull(res, 0, i_aggminitval))
|
|
+ {
|
|
+ appendPQExpBufferStr(details, ",\n MINITCOND = ");
|
|
+ appendStringLiteralAH(details, aggminitval, fout);
|
|
+ }
|
|
+
|
|
+ if (strcmp(aggmfinalfn, "-") != 0)
|
|
+ {
|
|
+ appendPQExpBuffer(details, ",\n MFINALFUNC = %s",
|
|
+ aggmfinalfn);
|
|
+ if (aggmfinalextra)
|
|
+ appendPQExpBufferStr(details, ",\n MFINALFUNC_EXTRA");
|
|
+ }
|
|
+
|
|
+ aggsortconvop = convertOperatorReference(fout, aggsortop);
|
|
+ if (aggsortconvop)
|
|
+ {
|
|
+ appendPQExpBuffer(details, ",\n SORTOP = %s",
|
|
+ aggsortconvop);
|
|
+ free(aggsortconvop);
|
|
+ }
|
|
+
|
|
+ if (hypothetical)
|
|
+ appendPQExpBufferStr(details, ",\n HYPOTHETICAL");
|
|
+
|
|
+ /*
|
|
+ * DROP must be fully qualified in case same name appears in pg_catalog
|
|
+ */
|
|
+ appendPQExpBuffer(delq, "DROP AGGREGATE %s.%s;\n",
|
|
+ fmtId(agginfo->aggfn.dobj.namespace->dobj.name),
|
|
+ aggsig);
|
|
+
|
|
+ appendPQExpBuffer(q, "CREATE AGGREGATE %s (\n%s\n);\n",
|
|
+ aggfullsig ? aggfullsig : aggsig, details->data);
|
|
+
|
|
+ appendPQExpBuffer(labelq, "AGGREGATE %s", aggsig);
|
|
+
|
|
+ if (binary_upgrade)
|
|
+ binary_upgrade_extension_member(q, &agginfo->aggfn.dobj, labelq->data);
|
|
+
|
|
+ ArchiveEntry(fout, agginfo->aggfn.dobj.catId, agginfo->aggfn.dobj.dumpId,
|
|
+ aggsig_tag,
|
|
+ agginfo->aggfn.dobj.namespace->dobj.name,
|
|
+ NULL,
|
|
+ agginfo->aggfn.rolname,
|
|
+ false, "AGGREGATE", SECTION_PRE_DATA,
|
|
+ q->data, delq->data, NULL,
|
|
+ NULL, 0,
|
|
+ NULL, NULL);
|
|
+
|
|
+ /* Dump Aggregate Comments */
|
|
+ dumpComment(fout, labelq->data,
|
|
+ agginfo->aggfn.dobj.namespace->dobj.name, agginfo->aggfn.rolname,
|
|
+ agginfo->aggfn.dobj.catId, 0, agginfo->aggfn.dobj.dumpId);
|
|
+ dumpSecLabel(fout, labelq->data,
|
|
+ agginfo->aggfn.dobj.namespace->dobj.name, agginfo->aggfn.rolname,
|
|
+ agginfo->aggfn.dobj.catId, 0, agginfo->aggfn.dobj.dumpId);
|
|
+
|
|
+ /*
|
|
+ * Since there is no GRANT ON AGGREGATE syntax, we have to make the ACL
|
|
+ * command look like a function's GRANT; in particular this affects the
|
|
+ * syntax for zero-argument aggregates and ordered-set aggregates.
|
|
+ */
|
|
+ free(aggsig);
|
|
+ free(aggsig_tag);
|
|
+
|
|
+ aggsig = format_function_signature(fout, &agginfo->aggfn, true);
|
|
+ aggsig_tag = format_function_signature(fout, &agginfo->aggfn, false);
|
|
+
|
|
+ dumpACL(fout, agginfo->aggfn.dobj.catId, agginfo->aggfn.dobj.dumpId,
|
|
+ "FUNCTION",
|
|
+ aggsig, NULL, aggsig_tag,
|
|
+ agginfo->aggfn.dobj.namespace->dobj.name,
|
|
+ agginfo->aggfn.rolname, agginfo->aggfn.proacl);
|
|
+
|
|
+ free(aggsig);
|
|
+ if (aggfullsig)
|
|
+ free(aggfullsig);
|
|
+ free(aggsig_tag);
|
|
+
|
|
+ PQclear(res);
|
|
+
|
|
+ destroyPQExpBuffer(query);
|
|
+ destroyPQExpBuffer(q);
|
|
+ destroyPQExpBuffer(delq);
|
|
+ destroyPQExpBuffer(labelq);
|
|
+ destroyPQExpBuffer(details);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * dumpTSParser
|
|
+ * write out a single text search parser
|
|
+ */
|
|
+static void
|
|
+dumpTSParser(Archive *fout, TSParserInfo *prsinfo)
|
|
+{
|
|
+ PQExpBuffer q;
|
|
+ PQExpBuffer delq;
|
|
+ PQExpBuffer labelq;
|
|
+
|
|
+ /* Skip if not to be dumped */
|
|
+ if (!prsinfo->dobj.dump || dataOnly)
|
|
+ return;
|
|
+
|
|
+ q = createPQExpBuffer();
|
|
+ delq = createPQExpBuffer();
|
|
+ labelq = createPQExpBuffer();
|
|
+
|
|
+ /* Make sure we are in proper schema */
|
|
+ selectSourceSchema(fout, prsinfo->dobj.namespace->dobj.name);
|
|
+
|
|
+ appendPQExpBuffer(q, "CREATE TEXT SEARCH PARSER %s (\n",
|
|
+ fmtId(prsinfo->dobj.name));
|
|
+
|
|
+ appendPQExpBuffer(q, " START = %s,\n",
|
|
+ convertTSFunction(fout, prsinfo->prsstart));
|
|
+ appendPQExpBuffer(q, " GETTOKEN = %s,\n",
|
|
+ convertTSFunction(fout, prsinfo->prstoken));
|
|
+ appendPQExpBuffer(q, " END = %s,\n",
|
|
+ convertTSFunction(fout, prsinfo->prsend));
|
|
+ if (prsinfo->prsheadline != InvalidOid)
|
|
+ appendPQExpBuffer(q, " HEADLINE = %s,\n",
|
|
+ convertTSFunction(fout, prsinfo->prsheadline));
|
|
+ appendPQExpBuffer(q, " LEXTYPES = %s );\n",
|
|
+ convertTSFunction(fout, prsinfo->prslextype));
|
|
+
|
|
+ /*
|
|
+ * DROP must be fully qualified in case same name appears in pg_catalog
|
|
+ */
|
|
+ appendPQExpBuffer(delq, "DROP TEXT SEARCH PARSER %s",
|
|
+ fmtId(prsinfo->dobj.namespace->dobj.name));
|
|
+ appendPQExpBuffer(delq, ".%s;\n",
|
|
+ fmtId(prsinfo->dobj.name));
|
|
+
|
|
+ appendPQExpBuffer(labelq, "TEXT SEARCH PARSER %s",
|
|
+ fmtId(prsinfo->dobj.name));
|
|
+
|
|
+ if (binary_upgrade)
|
|
+ binary_upgrade_extension_member(q, &prsinfo->dobj, labelq->data);
|
|
+
|
|
+ ArchiveEntry(fout, prsinfo->dobj.catId, prsinfo->dobj.dumpId,
|
|
+ prsinfo->dobj.name,
|
|
+ prsinfo->dobj.namespace->dobj.name,
|
|
+ NULL,
|
|
+ "",
|
|
+ false, "TEXT SEARCH PARSER", SECTION_PRE_DATA,
|
|
+ q->data, delq->data, NULL,
|
|
+ NULL, 0,
|
|
+ NULL, NULL);
|
|
+
|
|
+ /* Dump Parser Comments */
|
|
+ dumpComment(fout, labelq->data,
|
|
+ NULL, "",
|
|
+ prsinfo->dobj.catId, 0, prsinfo->dobj.dumpId);
|
|
+
|
|
+ destroyPQExpBuffer(q);
|
|
+ destroyPQExpBuffer(delq);
|
|
+ destroyPQExpBuffer(labelq);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * dumpTSDictionary
|
|
+ * write out a single text search dictionary
|
|
+ */
|
|
+static void
|
|
+dumpTSDictionary(Archive *fout, TSDictInfo *dictinfo)
|
|
+{
|
|
+ PQExpBuffer q;
|
|
+ PQExpBuffer delq;
|
|
+ PQExpBuffer labelq;
|
|
+ PQExpBuffer query;
|
|
+ PGresult *res;
|
|
+ char *nspname;
|
|
+ char *tmplname;
|
|
+
|
|
+ /* Skip if not to be dumped */
|
|
+ if (!dictinfo->dobj.dump || dataOnly)
|
|
+ return;
|
|
+
|
|
+ q = createPQExpBuffer();
|
|
+ delq = createPQExpBuffer();
|
|
+ labelq = createPQExpBuffer();
|
|
+ query = createPQExpBuffer();
|
|
+
|
|
+ /* Fetch name and namespace of the dictionary's template */
|
|
+ selectSourceSchema(fout, "pg_catalog");
|
|
+ appendPQExpBuffer(query, "SELECT nspname, tmplname "
|
|
+ "FROM pg_ts_template p, pg_namespace n "
|
|
+ "WHERE p.oid = '%u' AND n.oid = tmplnamespace",
|
|
+ dictinfo->dicttemplate);
|
|
+ res = ExecuteSqlQueryForSingleRow(fout, query->data);
|
|
+ nspname = PQgetvalue(res, 0, 0);
|
|
+ tmplname = PQgetvalue(res, 0, 1);
|
|
+
|
|
+ /* Make sure we are in proper schema */
|
|
+ selectSourceSchema(fout, dictinfo->dobj.namespace->dobj.name);
|
|
+
|
|
+ appendPQExpBuffer(q, "CREATE TEXT SEARCH DICTIONARY %s (\n",
|
|
+ fmtId(dictinfo->dobj.name));
|
|
+
|
|
+ appendPQExpBufferStr(q, " TEMPLATE = ");
|
|
+ if (strcmp(nspname, dictinfo->dobj.namespace->dobj.name) != 0)
|
|
+ appendPQExpBuffer(q, "%s.", fmtId(nspname));
|
|
+ appendPQExpBufferStr(q, fmtId(tmplname));
|
|
+
|
|
+ PQclear(res);
|
|
+
|
|
+ /* the dictinitoption can be dumped straight into the command */
|
|
+ if (dictinfo->dictinitoption)
|
|
+ appendPQExpBuffer(q, ",\n %s", dictinfo->dictinitoption);
|
|
+
|
|
+ appendPQExpBufferStr(q, " );\n");
|
|
+
|
|
+ /*
|
|
+ * DROP must be fully qualified in case same name appears in pg_catalog
|
|
+ */
|
|
+ appendPQExpBuffer(delq, "DROP TEXT SEARCH DICTIONARY %s",
|
|
+ fmtId(dictinfo->dobj.namespace->dobj.name));
|
|
+ appendPQExpBuffer(delq, ".%s;\n",
|
|
+ fmtId(dictinfo->dobj.name));
|
|
+
|
|
+ appendPQExpBuffer(labelq, "TEXT SEARCH DICTIONARY %s",
|
|
+ fmtId(dictinfo->dobj.name));
|
|
+
|
|
+ if (binary_upgrade)
|
|
+ binary_upgrade_extension_member(q, &dictinfo->dobj, labelq->data);
|
|
+
|
|
+ ArchiveEntry(fout, dictinfo->dobj.catId, dictinfo->dobj.dumpId,
|
|
+ dictinfo->dobj.name,
|
|
+ dictinfo->dobj.namespace->dobj.name,
|
|
+ NULL,
|
|
+ dictinfo->rolname,
|
|
+ false, "TEXT SEARCH DICTIONARY", SECTION_PRE_DATA,
|
|
+ q->data, delq->data, NULL,
|
|
+ NULL, 0,
|
|
+ NULL, NULL);
|
|
+
|
|
+ /* Dump Dictionary Comments */
|
|
+ dumpComment(fout, labelq->data,
|
|
+ NULL, dictinfo->rolname,
|
|
+ dictinfo->dobj.catId, 0, dictinfo->dobj.dumpId);
|
|
+
|
|
+ destroyPQExpBuffer(q);
|
|
+ destroyPQExpBuffer(delq);
|
|
+ destroyPQExpBuffer(labelq);
|
|
+ destroyPQExpBuffer(query);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * dumpTSTemplate
|
|
+ * write out a single text search template
|
|
+ */
|
|
+static void
|
|
+dumpTSTemplate(Archive *fout, TSTemplateInfo *tmplinfo)
|
|
+{
|
|
+ PQExpBuffer q;
|
|
+ PQExpBuffer delq;
|
|
+ PQExpBuffer labelq;
|
|
+
|
|
+ /* Skip if not to be dumped */
|
|
+ if (!tmplinfo->dobj.dump || dataOnly)
|
|
+ return;
|
|
+
|
|
+ q = createPQExpBuffer();
|
|
+ delq = createPQExpBuffer();
|
|
+ labelq = createPQExpBuffer();
|
|
+
|
|
+ /* Make sure we are in proper schema */
|
|
+ selectSourceSchema(fout, tmplinfo->dobj.namespace->dobj.name);
|
|
+
|
|
+ appendPQExpBuffer(q, "CREATE TEXT SEARCH TEMPLATE %s (\n",
|
|
+ fmtId(tmplinfo->dobj.name));
|
|
+
|
|
+ if (tmplinfo->tmplinit != InvalidOid)
|
|
+ appendPQExpBuffer(q, " INIT = %s,\n",
|
|
+ convertTSFunction(fout, tmplinfo->tmplinit));
|
|
+ appendPQExpBuffer(q, " LEXIZE = %s );\n",
|
|
+ convertTSFunction(fout, tmplinfo->tmpllexize));
|
|
+
|
|
+ /*
|
|
+ * DROP must be fully qualified in case same name appears in pg_catalog
|
|
+ */
|
|
+ appendPQExpBuffer(delq, "DROP TEXT SEARCH TEMPLATE %s",
|
|
+ fmtId(tmplinfo->dobj.namespace->dobj.name));
|
|
+ appendPQExpBuffer(delq, ".%s;\n",
|
|
+ fmtId(tmplinfo->dobj.name));
|
|
+
|
|
+ appendPQExpBuffer(labelq, "TEXT SEARCH TEMPLATE %s",
|
|
+ fmtId(tmplinfo->dobj.name));
|
|
+
|
|
+ if (binary_upgrade)
|
|
+ binary_upgrade_extension_member(q, &tmplinfo->dobj, labelq->data);
|
|
+
|
|
+ ArchiveEntry(fout, tmplinfo->dobj.catId, tmplinfo->dobj.dumpId,
|
|
+ tmplinfo->dobj.name,
|
|
+ tmplinfo->dobj.namespace->dobj.name,
|
|
+ NULL,
|
|
+ "",
|
|
+ false, "TEXT SEARCH TEMPLATE", SECTION_PRE_DATA,
|
|
+ q->data, delq->data, NULL,
|
|
+ NULL, 0,
|
|
+ NULL, NULL);
|
|
+
|
|
+ /* Dump Template Comments */
|
|
+ dumpComment(fout, labelq->data,
|
|
+ NULL, "",
|
|
+ tmplinfo->dobj.catId, 0, tmplinfo->dobj.dumpId);
|
|
+
|
|
+ destroyPQExpBuffer(q);
|
|
+ destroyPQExpBuffer(delq);
|
|
+ destroyPQExpBuffer(labelq);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * dumpTSConfig
|
|
+ * write out a single text search configuration
|
|
+ */
|
|
+static void
|
|
+dumpTSConfig(Archive *fout, TSConfigInfo *cfginfo)
|
|
+{
|
|
+ PQExpBuffer q;
|
|
+ PQExpBuffer delq;
|
|
+ PQExpBuffer labelq;
|
|
+ PQExpBuffer query;
|
|
+ PGresult *res;
|
|
+ char *nspname;
|
|
+ char *prsname;
|
|
+ int ntups,
|
|
+ i;
|
|
+ int i_tokenname;
|
|
+ int i_dictname;
|
|
+
|
|
+ /* Skip if not to be dumped */
|
|
+ if (!cfginfo->dobj.dump || dataOnly)
|
|
+ return;
|
|
+
|
|
+ q = createPQExpBuffer();
|
|
+ delq = createPQExpBuffer();
|
|
+ labelq = createPQExpBuffer();
|
|
+ query = createPQExpBuffer();
|
|
+
|
|
+ /* Fetch name and namespace of the config's parser */
|
|
+ selectSourceSchema(fout, "pg_catalog");
|
|
+ appendPQExpBuffer(query, "SELECT nspname, prsname "
|
|
+ "FROM pg_ts_parser p, pg_namespace n "
|
|
+ "WHERE p.oid = '%u' AND n.oid = prsnamespace",
|
|
+ cfginfo->cfgparser);
|
|
+ res = ExecuteSqlQueryForSingleRow(fout, query->data);
|
|
+ nspname = PQgetvalue(res, 0, 0);
|
|
+ prsname = PQgetvalue(res, 0, 1);
|
|
+
|
|
+ /* Make sure we are in proper schema */
|
|
+ selectSourceSchema(fout, cfginfo->dobj.namespace->dobj.name);
|
|
+
|
|
+ appendPQExpBuffer(q, "CREATE TEXT SEARCH CONFIGURATION %s (\n",
|
|
+ fmtId(cfginfo->dobj.name));
|
|
+
|
|
+ appendPQExpBufferStr(q, " PARSER = ");
|
|
+ if (strcmp(nspname, cfginfo->dobj.namespace->dobj.name) != 0)
|
|
+ appendPQExpBuffer(q, "%s.", fmtId(nspname));
|
|
+ appendPQExpBuffer(q, "%s );\n", fmtId(prsname));
|
|
+
|
|
+ PQclear(res);
|
|
+
|
|
+ resetPQExpBuffer(query);
|
|
+ appendPQExpBuffer(query,
|
|
+ "SELECT \n"
|
|
+ " ( SELECT alias FROM pg_catalog.ts_token_type('%u'::pg_catalog.oid) AS t \n"
|
|
+ " WHERE t.tokid = m.maptokentype ) AS tokenname, \n"
|
|
+ " m.mapdict::pg_catalog.regdictionary AS dictname \n"
|
|
+ "FROM pg_catalog.pg_ts_config_map AS m \n"
|
|
+ "WHERE m.mapcfg = '%u' \n"
|
|
+ "ORDER BY m.mapcfg, m.maptokentype, m.mapseqno",
|
|
+ cfginfo->cfgparser, cfginfo->dobj.catId.oid);
|
|
+
|
|
+ res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
|
|
+ ntups = PQntuples(res);
|
|
+
|
|
+ i_tokenname = PQfnumber(res, "tokenname");
|
|
+ i_dictname = PQfnumber(res, "dictname");
|
|
+
|
|
+ for (i = 0; i < ntups; i++)
|
|
+ {
|
|
+ char *tokenname = PQgetvalue(res, i, i_tokenname);
|
|
+ char *dictname = PQgetvalue(res, i, i_dictname);
|
|
+
|
|
+ if (i == 0 ||
|
|
+ strcmp(tokenname, PQgetvalue(res, i - 1, i_tokenname)) != 0)
|
|
+ {
|
|
+ /* starting a new token type, so start a new command */
|
|
+ if (i > 0)
|
|
+ appendPQExpBufferStr(q, ";\n");
|
|
+ appendPQExpBuffer(q, "\nALTER TEXT SEARCH CONFIGURATION %s\n",
|
|
+ fmtId(cfginfo->dobj.name));
|
|
+ /* tokenname needs quoting, dictname does NOT */
|
|
+ appendPQExpBuffer(q, " ADD MAPPING FOR %s WITH %s",
|
|
+ fmtId(tokenname), dictname);
|
|
+ }
|
|
+ else
|
|
+ appendPQExpBuffer(q, ", %s", dictname);
|
|
+ }
|
|
+
|
|
+ if (ntups > 0)
|
|
+ appendPQExpBufferStr(q, ";\n");
|
|
+
|
|
+ PQclear(res);
|
|
+
|
|
+ /*
|
|
+ * DROP must be fully qualified in case same name appears in pg_catalog
|
|
+ */
|
|
+ appendPQExpBuffer(delq, "DROP TEXT SEARCH CONFIGURATION %s",
|
|
+ fmtId(cfginfo->dobj.namespace->dobj.name));
|
|
+ appendPQExpBuffer(delq, ".%s;\n",
|
|
+ fmtId(cfginfo->dobj.name));
|
|
+
|
|
+ appendPQExpBuffer(labelq, "TEXT SEARCH CONFIGURATION %s",
|
|
+ fmtId(cfginfo->dobj.name));
|
|
+
|
|
+ if (binary_upgrade)
|
|
+ binary_upgrade_extension_member(q, &cfginfo->dobj, labelq->data);
|
|
+
|
|
+ ArchiveEntry(fout, cfginfo->dobj.catId, cfginfo->dobj.dumpId,
|
|
+ cfginfo->dobj.name,
|
|
+ cfginfo->dobj.namespace->dobj.name,
|
|
+ NULL,
|
|
+ cfginfo->rolname,
|
|
+ false, "TEXT SEARCH CONFIGURATION", SECTION_PRE_DATA,
|
|
+ q->data, delq->data, NULL,
|
|
+ NULL, 0,
|
|
+ NULL, NULL);
|
|
+
|
|
+ /* Dump Configuration Comments */
|
|
+ dumpComment(fout, labelq->data,
|
|
+ NULL, cfginfo->rolname,
|
|
+ cfginfo->dobj.catId, 0, cfginfo->dobj.dumpId);
|
|
+
|
|
+ destroyPQExpBuffer(q);
|
|
+ destroyPQExpBuffer(delq);
|
|
+ destroyPQExpBuffer(labelq);
|
|
+ destroyPQExpBuffer(query);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * dumpForeignDataWrapper
|
|
+ * write out a single foreign-data wrapper definition
|
|
+ */
|
|
+static void
|
|
+dumpForeignDataWrapper(Archive *fout, FdwInfo *fdwinfo)
|
|
+{
|
|
+ PQExpBuffer q;
|
|
+ PQExpBuffer delq;
|
|
+ PQExpBuffer labelq;
|
|
+ char *qfdwname;
|
|
+
|
|
+ /* Skip if not to be dumped */
|
|
+ if (!fdwinfo->dobj.dump || dataOnly)
|
|
+ return;
|
|
+
|
|
+ /*
|
|
+ * FDWs that belong to an extension are dumped based on their "dump"
|
|
+ * field. Otherwise omit them if we are only dumping some specific object.
|
|
+ */
|
|
+ if (!fdwinfo->dobj.ext_member)
|
|
+ if (!include_everything)
|
|
+ return;
|
|
+
|
|
+ q = createPQExpBuffer();
|
|
+ delq = createPQExpBuffer();
|
|
+ labelq = createPQExpBuffer();
|
|
+
|
|
+ qfdwname = pg_strdup(fmtId(fdwinfo->dobj.name));
|
|
+
|
|
+ appendPQExpBuffer(q, "CREATE FOREIGN DATA WRAPPER %s",
|
|
+ qfdwname);
|
|
+
|
|
+ if (strcmp(fdwinfo->fdwhandler, "-") != 0)
|
|
+ appendPQExpBuffer(q, " HANDLER %s", fdwinfo->fdwhandler);
|
|
+
|
|
+ if (strcmp(fdwinfo->fdwvalidator, "-") != 0)
|
|
+ appendPQExpBuffer(q, " VALIDATOR %s", fdwinfo->fdwvalidator);
|
|
+
|
|
+ if (strlen(fdwinfo->fdwoptions) > 0)
|
|
+ appendPQExpBuffer(q, " OPTIONS (\n %s\n)", fdwinfo->fdwoptions);
|
|
+
|
|
+ appendPQExpBufferStr(q, ";\n");
|
|
+
|
|
+ appendPQExpBuffer(delq, "DROP FOREIGN DATA WRAPPER %s;\n",
|
|
+ qfdwname);
|
|
+
|
|
+ appendPQExpBuffer(labelq, "FOREIGN DATA WRAPPER %s",
|
|
+ qfdwname);
|
|
+
|
|
+ if (binary_upgrade)
|
|
+ binary_upgrade_extension_member(q, &fdwinfo->dobj, labelq->data);
|
|
+
|
|
+ ArchiveEntry(fout, fdwinfo->dobj.catId, fdwinfo->dobj.dumpId,
|
|
+ fdwinfo->dobj.name,
|
|
+ NULL,
|
|
+ NULL,
|
|
+ fdwinfo->rolname,
|
|
+ false, "FOREIGN DATA WRAPPER", SECTION_PRE_DATA,
|
|
+ q->data, delq->data, NULL,
|
|
+ NULL, 0,
|
|
+ NULL, NULL);
|
|
+
|
|
+ /* Handle the ACL */
|
|
+ dumpACL(fout, fdwinfo->dobj.catId, fdwinfo->dobj.dumpId,
|
|
+ "FOREIGN DATA WRAPPER",
|
|
+ qfdwname, NULL, fdwinfo->dobj.name,
|
|
+ NULL, fdwinfo->rolname,
|
|
+ fdwinfo->fdwacl);
|
|
+
|
|
+ /* Dump Foreign Data Wrapper Comments */
|
|
+ dumpComment(fout, labelq->data,
|
|
+ NULL, fdwinfo->rolname,
|
|
+ fdwinfo->dobj.catId, 0, fdwinfo->dobj.dumpId);
|
|
+
|
|
+ free(qfdwname);
|
|
+
|
|
+ destroyPQExpBuffer(q);
|
|
+ destroyPQExpBuffer(delq);
|
|
+ destroyPQExpBuffer(labelq);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * dumpForeignServer
|
|
+ * write out a foreign server definition
|
|
+ */
|
|
+static void
|
|
+dumpForeignServer(Archive *fout, ForeignServerInfo *srvinfo)
|
|
+{
|
|
+ PQExpBuffer q;
|
|
+ PQExpBuffer delq;
|
|
+ PQExpBuffer labelq;
|
|
+ PQExpBuffer query;
|
|
+ PGresult *res;
|
|
+ char *qsrvname;
|
|
+ char *fdwname;
|
|
+
|
|
+ /* Skip if not to be dumped */
|
|
+ if (!srvinfo->dobj.dump || dataOnly || !include_everything)
|
|
+ return;
|
|
+
|
|
+ q = createPQExpBuffer();
|
|
+ delq = createPQExpBuffer();
|
|
+ labelq = createPQExpBuffer();
|
|
+ query = createPQExpBuffer();
|
|
+
|
|
+ qsrvname = pg_strdup(fmtId(srvinfo->dobj.name));
|
|
+
|
|
+ /* look up the foreign-data wrapper */
|
|
+ selectSourceSchema(fout, "pg_catalog");
|
|
+ appendPQExpBuffer(query, "SELECT fdwname "
|
|
+ "FROM pg_foreign_data_wrapper w "
|
|
+ "WHERE w.oid = '%u'",
|
|
+ srvinfo->srvfdw);
|
|
+ res = ExecuteSqlQueryForSingleRow(fout, query->data);
|
|
+ fdwname = PQgetvalue(res, 0, 0);
|
|
+
|
|
+ appendPQExpBuffer(q, "CREATE SERVER %s", qsrvname);
|
|
+ if (srvinfo->srvtype && strlen(srvinfo->srvtype) > 0)
|
|
+ {
|
|
+ appendPQExpBufferStr(q, " TYPE ");
|
|
+ appendStringLiteralAH(q, srvinfo->srvtype, fout);
|
|
+ }
|
|
+ if (srvinfo->srvversion && strlen(srvinfo->srvversion) > 0)
|
|
+ {
|
|
+ appendPQExpBufferStr(q, " VERSION ");
|
|
+ appendStringLiteralAH(q, srvinfo->srvversion, fout);
|
|
+ }
|
|
+
|
|
+ appendPQExpBufferStr(q, " FOREIGN DATA WRAPPER ");
|
|
+ appendPQExpBufferStr(q, fmtId(fdwname));
|
|
+
|
|
+ if (srvinfo->srvoptions && strlen(srvinfo->srvoptions) > 0)
|
|
+ appendPQExpBuffer(q, " OPTIONS (\n %s\n)", srvinfo->srvoptions);
|
|
+
|
|
+ appendPQExpBufferStr(q, ";\n");
|
|
+
|
|
+ appendPQExpBuffer(delq, "DROP SERVER %s;\n",
|
|
+ qsrvname);
|
|
+
|
|
+ appendPQExpBuffer(labelq, "SERVER %s", qsrvname);
|
|
+
|
|
+ if (binary_upgrade)
|
|
+ binary_upgrade_extension_member(q, &srvinfo->dobj, labelq->data);
|
|
+
|
|
+ ArchiveEntry(fout, srvinfo->dobj.catId, srvinfo->dobj.dumpId,
|
|
+ srvinfo->dobj.name,
|
|
+ NULL,
|
|
+ NULL,
|
|
+ srvinfo->rolname,
|
|
+ false, "SERVER", SECTION_PRE_DATA,
|
|
+ q->data, delq->data, NULL,
|
|
+ NULL, 0,
|
|
+ NULL, NULL);
|
|
+
|
|
+ /* Handle the ACL */
|
|
+ dumpACL(fout, srvinfo->dobj.catId, srvinfo->dobj.dumpId,
|
|
+ "FOREIGN SERVER",
|
|
+ qsrvname, NULL, srvinfo->dobj.name,
|
|
+ NULL, srvinfo->rolname,
|
|
+ srvinfo->srvacl);
|
|
+
|
|
+ /* Dump user mappings */
|
|
+ dumpUserMappings(fout,
|
|
+ srvinfo->dobj.name, NULL,
|
|
+ srvinfo->rolname,
|
|
+ srvinfo->dobj.catId, srvinfo->dobj.dumpId);
|
|
+
|
|
+ /* Dump Foreign Server Comments */
|
|
+ dumpComment(fout, labelq->data,
|
|
+ NULL, srvinfo->rolname,
|
|
+ srvinfo->dobj.catId, 0, srvinfo->dobj.dumpId);
|
|
+
|
|
+ free(qsrvname);
|
|
+
|
|
+ destroyPQExpBuffer(q);
|
|
+ destroyPQExpBuffer(delq);
|
|
+ destroyPQExpBuffer(labelq);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * dumpUserMappings
|
|
+ *
|
|
+ * This routine is used to dump any user mappings associated with the
|
|
+ * server handed to this routine. Should be called after ArchiveEntry()
|
|
+ * for the server.
|
|
+ */
|
|
+static void
|
|
+dumpUserMappings(Archive *fout,
|
|
+ const char *servername, const char *namespace,
|
|
+ const char *owner,
|
|
+ CatalogId catalogId, DumpId dumpId)
|
|
+{
|
|
+ PQExpBuffer q;
|
|
+ PQExpBuffer delq;
|
|
+ PQExpBuffer query;
|
|
+ PQExpBuffer tag;
|
|
+ PGresult *res;
|
|
+ int ntups;
|
|
+ int i_usename;
|
|
+ int i_umoptions;
|
|
+ int i;
|
|
+
|
|
+ q = createPQExpBuffer();
|
|
+ tag = createPQExpBuffer();
|
|
+ delq = createPQExpBuffer();
|
|
+ query = createPQExpBuffer();
|
|
+
|
|
+ /*
|
|
+ * We read from the publicly accessible view pg_user_mappings, so as not
|
|
+ * to fail if run by a non-superuser. Note that the view will show
|
|
+ * umoptions as null if the user hasn't got privileges for the associated
|
|
+ * server; this means that pg_dump will dump such a mapping, but with no
|
|
+ * OPTIONS clause. A possible alternative is to skip such mappings
|
|
+ * altogether, but it's not clear that that's an improvement.
|
|
+ */
|
|
+ selectSourceSchema(fout, "pg_catalog");
|
|
+
|
|
+ appendPQExpBuffer(query,
|
|
+ "SELECT usename, "
|
|
+ "array_to_string(ARRAY("
|
|
+ "SELECT quote_ident(option_name) || ' ' || "
|
|
+ "quote_literal(option_value) "
|
|
+ "FROM pg_options_to_table(umoptions) "
|
|
+ "ORDER BY option_name"
|
|
+ "), E',\n ') AS umoptions "
|
|
+ "FROM pg_user_mappings "
|
|
+ "WHERE srvid = '%u' "
|
|
+ "ORDER BY usename",
|
|
+ catalogId.oid);
|
|
+
|
|
+ res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
|
|
+
|
|
+ ntups = PQntuples(res);
|
|
+ i_usename = PQfnumber(res, "usename");
|
|
+ i_umoptions = PQfnumber(res, "umoptions");
|
|
+
|
|
+ for (i = 0; i < ntups; i++)
|
|
+ {
|
|
+ char *usename;
|
|
+ char *umoptions;
|
|
+
|
|
+ usename = PQgetvalue(res, i, i_usename);
|
|
+ umoptions = PQgetvalue(res, i, i_umoptions);
|
|
+
|
|
+ resetPQExpBuffer(q);
|
|
+ appendPQExpBuffer(q, "CREATE USER MAPPING FOR %s", fmtId(usename));
|
|
+ appendPQExpBuffer(q, " SERVER %s", fmtId(servername));
|
|
+
|
|
+ if (umoptions && strlen(umoptions) > 0)
|
|
+ appendPQExpBuffer(q, " OPTIONS (\n %s\n)", umoptions);
|
|
+
|
|
+ appendPQExpBufferStr(q, ";\n");
|
|
+
|
|
+ resetPQExpBuffer(delq);
|
|
+ appendPQExpBuffer(delq, "DROP USER MAPPING FOR %s", fmtId(usename));
|
|
+ appendPQExpBuffer(delq, " SERVER %s;\n", fmtId(servername));
|
|
+
|
|
+ resetPQExpBuffer(tag);
|
|
+ appendPQExpBuffer(tag, "USER MAPPING %s SERVER %s",
|
|
+ usename, servername);
|
|
+
|
|
+ ArchiveEntry(fout, nilCatalogId, createDumpId(),
|
|
+ tag->data,
|
|
+ namespace,
|
|
+ NULL,
|
|
+ owner, false,
|
|
+ "USER MAPPING", SECTION_PRE_DATA,
|
|
+ q->data, delq->data, NULL,
|
|
+ &dumpId, 1,
|
|
+ NULL, NULL);
|
|
+ }
|
|
+
|
|
+ PQclear(res);
|
|
+
|
|
+ destroyPQExpBuffer(query);
|
|
+ destroyPQExpBuffer(delq);
|
|
+ destroyPQExpBuffer(tag);
|
|
+ destroyPQExpBuffer(q);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Write out default privileges information
|
|
+ */
|
|
+static void
|
|
+dumpDefaultACL(Archive *fout, DefaultACLInfo *daclinfo)
|
|
+{
|
|
+ PQExpBuffer q;
|
|
+ PQExpBuffer tag;
|
|
+ const char *type;
|
|
+
|
|
+ /* Skip if not to be dumped */
|
|
+ if (!daclinfo->dobj.dump || dataOnly || aclsSkip)
|
|
+ return;
|
|
+
|
|
+ q = createPQExpBuffer();
|
|
+ tag = createPQExpBuffer();
|
|
+
|
|
+ switch (daclinfo->defaclobjtype)
|
|
+ {
|
|
+ case DEFACLOBJ_RELATION:
|
|
+ type = "TABLES";
|
|
+ break;
|
|
+ case DEFACLOBJ_SEQUENCE:
|
|
+ type = "SEQUENCES";
|
|
+ break;
|
|
+ case DEFACLOBJ_FUNCTION:
|
|
+ type = "FUNCTIONS";
|
|
+ break;
|
|
+ case DEFACLOBJ_TYPE:
|
|
+ type = "TYPES";
|
|
+ break;
|
|
+ default:
|
|
+ /* shouldn't get here */
|
|
+ exit_horribly(NULL,
|
|
+ "unrecognized object type in default privileges: %d\n",
|
|
+ (int) daclinfo->defaclobjtype);
|
|
+ type = ""; /* keep compiler quiet */
|
|
+ }
|
|
+
|
|
+ appendPQExpBuffer(tag, "DEFAULT PRIVILEGES FOR %s", type);
|
|
+
|
|
+ /* build the actual command(s) for this tuple */
|
|
+ if (!buildDefaultACLCommands(type,
|
|
+ daclinfo->dobj.namespace != NULL ?
|
|
+ daclinfo->dobj.namespace->dobj.name : NULL,
|
|
+ daclinfo->defaclacl,
|
|
+ daclinfo->defaclrole,
|
|
+ fout->remoteVersion,
|
|
+ q))
|
|
+ exit_horribly(NULL, "could not parse default ACL list (%s)\n",
|
|
+ daclinfo->defaclacl);
|
|
+
|
|
+ ArchiveEntry(fout, daclinfo->dobj.catId, daclinfo->dobj.dumpId,
|
|
+ tag->data,
|
|
+ daclinfo->dobj.namespace ? daclinfo->dobj.namespace->dobj.name : NULL,
|
|
+ NULL,
|
|
+ daclinfo->defaclrole,
|
|
+ false, "DEFAULT ACL", SECTION_POST_DATA,
|
|
+ q->data, "", NULL,
|
|
+ NULL, 0,
|
|
+ NULL, NULL);
|
|
+
|
|
+ destroyPQExpBuffer(tag);
|
|
+ destroyPQExpBuffer(q);
|
|
+}
|
|
+
|
|
+/*----------
|
|
+ * Write out grant/revoke information
|
|
+ *
|
|
+ * 'objCatId' is the catalog ID of the underlying object.
|
|
+ * 'objDumpId' is the dump ID of the underlying object.
|
|
+ * 'type' must be one of
|
|
+ * TABLE, SEQUENCE, FUNCTION, LANGUAGE, SCHEMA, DATABASE, TABLESPACE,
|
|
+ * FOREIGN DATA WRAPPER, SERVER, or LARGE OBJECT.
|
|
+ * 'name' is the formatted name of the object. Must be quoted etc. already.
|
|
+ * 'subname' is the formatted name of the sub-object, if any. Must be quoted.
|
|
+ * 'tag' is the tag for the archive entry (typ. unquoted name of object).
|
|
+ * 'nspname' is the namespace the object is in (NULL if none).
|
|
+ * 'owner' is the owner, NULL if there is no owner (for languages).
|
|
+ * 'acls' is the string read out of the fooacl system catalog field;
|
|
+ * it will be parsed here.
|
|
+ *----------
|
|
+ */
|
|
+static void
|
|
+dumpACL(Archive *fout, CatalogId objCatId, DumpId objDumpId,
|
|
+ const char *type, const char *name, const char *subname,
|
|
+ const char *tag, const char *nspname, const char *owner,
|
|
+ const char *acls)
|
|
+{
|
|
+ PQExpBuffer sql;
|
|
+
|
|
+ /* Do nothing if ACL dump is not enabled */
|
|
+ if (aclsSkip)
|
|
+ return;
|
|
+
|
|
+ /* --data-only skips ACLs *except* BLOB ACLs */
|
|
+ if (dataOnly && strcmp(type, "LARGE OBJECT") != 0)
|
|
+ return;
|
|
+
|
|
+ sql = createPQExpBuffer();
|
|
+
|
|
+ if (!buildACLCommands(name, subname, type, acls, owner,
|
|
+ "", fout->remoteVersion, sql))
|
|
+ exit_horribly(NULL,
|
|
+ "could not parse ACL list (%s) for object \"%s\" (%s)\n",
|
|
+ acls, name, type);
|
|
+
|
|
+ if (sql->len > 0)
|
|
+ ArchiveEntry(fout, nilCatalogId, createDumpId(),
|
|
+ tag, nspname,
|
|
+ NULL,
|
|
+ owner ? owner : "",
|
|
+ false, "ACL", SECTION_NONE,
|
|
+ sql->data, "", NULL,
|
|
+ &(objDumpId), 1,
|
|
+ NULL, NULL);
|
|
+
|
|
+ destroyPQExpBuffer(sql);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * dumpSecLabel
|
|
+ *
|
|
+ * This routine is used to dump any security labels associated with the
|
|
+ * object handed to this routine. The routine takes a constant character
|
|
+ * string for the target part of the security-label command, plus
|
|
+ * the namespace and owner of the object (for labeling the ArchiveEntry),
|
|
+ * plus catalog ID and subid which are the lookup key for pg_seclabel,
|
|
+ * plus the dump ID for the object (for setting a dependency).
|
|
+ * If a matching pg_seclabel entry is found, it is dumped.
|
|
+ *
|
|
+ * Note: although this routine takes a dumpId for dependency purposes,
|
|
+ * that purpose is just to mark the dependency in the emitted dump file
|
|
+ * for possible future use by pg_restore. We do NOT use it for determining
|
|
+ * ordering of the label in the dump file, because this routine is called
|
|
+ * after dependency sorting occurs. This routine should be called just after
|
|
+ * calling ArchiveEntry() for the specified object.
|
|
+ */
|
|
+static void
|
|
+dumpSecLabel(Archive *fout, const char *target,
|
|
+ const char *namespace, const char *owner,
|
|
+ CatalogId catalogId, int subid, DumpId dumpId)
|
|
+{
|
|
+ SecLabelItem *labels;
|
|
+ int nlabels;
|
|
+ int i;
|
|
+ PQExpBuffer query;
|
|
+
|
|
+ /* do nothing, if --no-security-labels is supplied */
|
|
+ if (no_security_labels)
|
|
+ return;
|
|
+
|
|
+ /* Comments are schema not data ... except blob comments are data */
|
|
+ if (strncmp(target, "LARGE OBJECT ", 13) != 0)
|
|
+ {
|
|
+ if (dataOnly)
|
|
+ return;
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ if (schemaOnly)
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ /* Search for security labels associated with catalogId, using table */
|
|
+ nlabels = findSecLabels(fout, catalogId.tableoid, catalogId.oid, &labels);
|
|
+
|
|
+ query = createPQExpBuffer();
|
|
+
|
|
+ for (i = 0; i < nlabels; i++)
|
|
+ {
|
|
+ /*
|
|
+ * Ignore label entries for which the subid doesn't match.
|
|
+ */
|
|
+ if (labels[i].objsubid != subid)
|
|
+ continue;
|
|
+
|
|
+ appendPQExpBuffer(query,
|
|
+ "SECURITY LABEL FOR %s ON %s IS ",
|
|
+ fmtId(labels[i].provider), target);
|
|
+ appendStringLiteralAH(query, labels[i].label, fout);
|
|
+ appendPQExpBufferStr(query, ";\n");
|
|
+ }
|
|
+
|
|
+ if (query->len > 0)
|
|
+ {
|
|
+ ArchiveEntry(fout, nilCatalogId, createDumpId(),
|
|
+ target, namespace, NULL, owner,
|
|
+ false, "SECURITY LABEL", SECTION_NONE,
|
|
+ query->data, "", NULL,
|
|
+ &(dumpId), 1,
|
|
+ NULL, NULL);
|
|
+ }
|
|
+ destroyPQExpBuffer(query);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * dumpTableSecLabel
|
|
+ *
|
|
+ * As above, but dump security label for both the specified table (or view)
|
|
+ * and its columns.
|
|
+ */
|
|
+static void
|
|
+dumpTableSecLabel(Archive *fout, TableInfo *tbinfo, const char *reltypename)
|
|
+{
|
|
+ SecLabelItem *labels;
|
|
+ int nlabels;
|
|
+ int i;
|
|
+ PQExpBuffer query;
|
|
+ PQExpBuffer target;
|
|
+
|
|
+ /* do nothing, if --no-security-labels is supplied */
|
|
+ if (no_security_labels)
|
|
+ return;
|
|
+
|
|
+ /* SecLabel are SCHEMA not data */
|
|
+ if (dataOnly)
|
|
+ return;
|
|
+
|
|
+ /* Search for comments associated with relation, using table */
|
|
+ nlabels = findSecLabels(fout,
|
|
+ tbinfo->dobj.catId.tableoid,
|
|
+ tbinfo->dobj.catId.oid,
|
|
+ &labels);
|
|
+
|
|
+ /* If security labels exist, build SECURITY LABEL statements */
|
|
+ if (nlabels <= 0)
|
|
+ return;
|
|
+
|
|
+ query = createPQExpBuffer();
|
|
+ target = createPQExpBuffer();
|
|
+
|
|
+ for (i = 0; i < nlabels; i++)
|
|
+ {
|
|
+ const char *colname;
|
|
+ const char *provider = labels[i].provider;
|
|
+ const char *label = labels[i].label;
|
|
+ int objsubid = labels[i].objsubid;
|
|
+
|
|
+ resetPQExpBuffer(target);
|
|
+ if (objsubid == 0)
|
|
+ {
|
|
+ appendPQExpBuffer(target, "%s %s", reltypename,
|
|
+ fmtId(tbinfo->dobj.name));
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ colname = getAttrName(objsubid, tbinfo);
|
|
+ /* first fmtId result must be consumed before calling it again */
|
|
+ appendPQExpBuffer(target, "COLUMN %s", fmtId(tbinfo->dobj.name));
|
|
+ appendPQExpBuffer(target, ".%s", fmtId(colname));
|
|
+ }
|
|
+ appendPQExpBuffer(query, "SECURITY LABEL FOR %s ON %s IS ",
|
|
+ fmtId(provider), target->data);
|
|
+ appendStringLiteralAH(query, label, fout);
|
|
+ appendPQExpBufferStr(query, ";\n");
|
|
+ }
|
|
+ if (query->len > 0)
|
|
+ {
|
|
+ resetPQExpBuffer(target);
|
|
+ appendPQExpBuffer(target, "%s %s", reltypename,
|
|
+ fmtId(tbinfo->dobj.name));
|
|
+ ArchiveEntry(fout, nilCatalogId, createDumpId(),
|
|
+ target->data,
|
|
+ tbinfo->dobj.namespace->dobj.name,
|
|
+ NULL, tbinfo->rolname,
|
|
+ false, "SECURITY LABEL", SECTION_NONE,
|
|
+ query->data, "", NULL,
|
|
+ &(tbinfo->dobj.dumpId), 1,
|
|
+ NULL, NULL);
|
|
+ }
|
|
+ destroyPQExpBuffer(query);
|
|
+ destroyPQExpBuffer(target);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * findSecLabels
|
|
+ *
|
|
+ * Find the security label(s), if any, associated with the given object.
|
|
+ * All the objsubid values associated with the given classoid/objoid are
|
|
+ * found with one search.
|
|
+ */
|
|
+static int
|
|
+findSecLabels(Archive *fout, Oid classoid, Oid objoid, SecLabelItem **items)
|
|
+{
|
|
+ /* static storage for table of security labels */
|
|
+ static SecLabelItem *labels = NULL;
|
|
+ static int nlabels = -1;
|
|
+
|
|
+ SecLabelItem *middle = NULL;
|
|
+ SecLabelItem *low;
|
|
+ SecLabelItem *high;
|
|
+ int nmatch;
|
|
+
|
|
+ /* Get security labels if we didn't already */
|
|
+ if (nlabels < 0)
|
|
+ nlabels = collectSecLabels(fout, &labels);
|
|
+
|
|
+ if (nlabels <= 0) /* no labels, so no match is possible */
|
|
+ {
|
|
+ *items = NULL;
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Do binary search to find some item matching the object.
|
|
+ */
|
|
+ low = &labels[0];
|
|
+ high = &labels[nlabels - 1];
|
|
+ while (low <= high)
|
|
+ {
|
|
+ middle = low + (high - low) / 2;
|
|
+
|
|
+ if (classoid < middle->classoid)
|
|
+ high = middle - 1;
|
|
+ else if (classoid > middle->classoid)
|
|
+ low = middle + 1;
|
|
+ else if (objoid < middle->objoid)
|
|
+ high = middle - 1;
|
|
+ else if (objoid > middle->objoid)
|
|
+ low = middle + 1;
|
|
+ else
|
|
+ break; /* found a match */
|
|
+ }
|
|
+
|
|
+ if (low > high) /* no matches */
|
|
+ {
|
|
+ *items = NULL;
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Now determine how many items match the object. The search loop
|
|
+ * invariant still holds: only items between low and high inclusive could
|
|
+ * match.
|
|
+ */
|
|
+ nmatch = 1;
|
|
+ while (middle > low)
|
|
+ {
|
|
+ if (classoid != middle[-1].classoid ||
|
|
+ objoid != middle[-1].objoid)
|
|
+ break;
|
|
+ middle--;
|
|
+ nmatch++;
|
|
+ }
|
|
+
|
|
+ *items = middle;
|
|
+
|
|
+ middle += nmatch;
|
|
+ while (middle <= high)
|
|
+ {
|
|
+ if (classoid != middle->classoid ||
|
|
+ objoid != middle->objoid)
|
|
+ break;
|
|
+ middle++;
|
|
+ nmatch++;
|
|
+ }
|
|
+
|
|
+ return nmatch;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * collectSecLabels
|
|
+ *
|
|
+ * Construct a table of all security labels available for database objects.
|
|
+ * It's much faster to pull them all at once.
|
|
+ *
|
|
+ * The table is sorted by classoid/objid/objsubid for speed in lookup.
|
|
+ */
|
|
+static int
|
|
+collectSecLabels(Archive *fout, SecLabelItem **items)
|
|
+{
|
|
+ PGresult *res;
|
|
+ PQExpBuffer query;
|
|
+ int i_label;
|
|
+ int i_provider;
|
|
+ int i_classoid;
|
|
+ int i_objoid;
|
|
+ int i_objsubid;
|
|
+ int ntups;
|
|
+ int i;
|
|
+ SecLabelItem *labels;
|
|
+
|
|
+ query = createPQExpBuffer();
|
|
+
|
|
+ appendPQExpBufferStr(query,
|
|
+ "SELECT label, provider, classoid, objoid, objsubid "
|
|
+ "FROM pg_catalog.pg_seclabel "
|
|
+ "ORDER BY classoid, objoid, objsubid");
|
|
+
|
|
+ res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
|
|
+
|
|
+ /* Construct lookup table containing OIDs in numeric form */
|
|
+ i_label = PQfnumber(res, "label");
|
|
+ i_provider = PQfnumber(res, "provider");
|
|
+ i_classoid = PQfnumber(res, "classoid");
|
|
+ i_objoid = PQfnumber(res, "objoid");
|
|
+ i_objsubid = PQfnumber(res, "objsubid");
|
|
+
|
|
+ ntups = PQntuples(res);
|
|
+
|
|
+ labels = (SecLabelItem *) pg_malloc(ntups * sizeof(SecLabelItem));
|
|
+
|
|
+ for (i = 0; i < ntups; i++)
|
|
+ {
|
|
+ labels[i].label = PQgetvalue(res, i, i_label);
|
|
+ labels[i].provider = PQgetvalue(res, i, i_provider);
|
|
+ labels[i].classoid = atooid(PQgetvalue(res, i, i_classoid));
|
|
+ labels[i].objoid = atooid(PQgetvalue(res, i, i_objoid));
|
|
+ labels[i].objsubid = atoi(PQgetvalue(res, i, i_objsubid));
|
|
+ }
|
|
+
|
|
+ /* Do NOT free the PGresult since we are keeping pointers into it */
|
|
+ destroyPQExpBuffer(query);
|
|
+
|
|
+ *items = labels;
|
|
+ return ntups;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * dumpTable
|
|
+ * write out to fout the declarations (not data) of a user-defined table
|
|
+ */
|
|
+static void
|
|
+dumpTable(Archive *fout, TableInfo *tbinfo)
|
|
+{
|
|
+ if (tbinfo->dobj.dump && !dataOnly)
|
|
+ {
|
|
+ char *namecopy;
|
|
+
|
|
+ if (tbinfo->relkind == RELKIND_SEQUENCE)
|
|
+ dumpSequence(fout, tbinfo);
|
|
+ else
|
|
+ dumpTableSchema(fout, tbinfo);
|
|
+
|
|
+ /* Handle the ACL here */
|
|
+ namecopy = pg_strdup(fmtId(tbinfo->dobj.name));
|
|
+ dumpACL(fout, tbinfo->dobj.catId, tbinfo->dobj.dumpId,
|
|
+ (tbinfo->relkind == RELKIND_SEQUENCE) ? "SEQUENCE" :
|
|
+ "TABLE",
|
|
+ namecopy, NULL, tbinfo->dobj.name,
|
|
+ tbinfo->dobj.namespace->dobj.name, tbinfo->rolname,
|
|
+ tbinfo->relacl);
|
|
+
|
|
+ /*
|
|
+ * Handle column ACLs, if any. Note: we pull these with a separate
|
|
+ * query rather than trying to fetch them during getTableAttrs, so
|
|
+ * that we won't miss ACLs on system columns.
|
|
+ */
|
|
+ if (fout->remoteVersion >= 80400)
|
|
+ {
|
|
+ PQExpBuffer query = createPQExpBuffer();
|
|
+ PGresult *res;
|
|
+ int i;
|
|
+
|
|
+ appendPQExpBuffer(query,
|
|
+ "SELECT attname, attacl FROM pg_catalog.pg_attribute "
|
|
+ "WHERE attrelid = '%u' AND NOT attisdropped AND attacl IS NOT NULL "
|
|
+ "ORDER BY attnum",
|
|
+ tbinfo->dobj.catId.oid);
|
|
+ res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
|
|
+
|
|
+ for (i = 0; i < PQntuples(res); i++)
|
|
+ {
|
|
+ char *attname = PQgetvalue(res, i, 0);
|
|
+ char *attacl = PQgetvalue(res, i, 1);
|
|
+ char *attnamecopy;
|
|
+ char *acltag;
|
|
+
|
|
+ attnamecopy = pg_strdup(fmtId(attname));
|
|
+ acltag = psprintf("%s.%s", tbinfo->dobj.name, attname);
|
|
+ /* Column's GRANT type is always TABLE */
|
|
+ dumpACL(fout, tbinfo->dobj.catId, tbinfo->dobj.dumpId, "TABLE",
|
|
+ namecopy, attnamecopy, acltag,
|
|
+ tbinfo->dobj.namespace->dobj.name, tbinfo->rolname,
|
|
+ attacl);
|
|
+ free(attnamecopy);
|
|
+ free(acltag);
|
|
+ }
|
|
+ PQclear(res);
|
|
+ destroyPQExpBuffer(query);
|
|
+ }
|
|
+
|
|
+ free(namecopy);
|
|
+ }
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Create the AS clause for a view or materialized view. The semicolon is
|
|
+ * stripped because a materialized view must add a WITH NO DATA clause.
|
|
+ *
|
|
+ * This returns a new buffer which must be freed by the caller.
|
|
+ */
|
|
+static PQExpBuffer
|
|
+createViewAsClause(Archive *fout, TableInfo *tbinfo)
|
|
+{
|
|
+ PQExpBuffer query = createPQExpBuffer();
|
|
+ PQExpBuffer result = createPQExpBuffer();
|
|
+ PGresult *res;
|
|
+ int len;
|
|
+
|
|
+ /* Fetch the view definition */
|
|
+ if (fout->remoteVersion >= 70300)
|
|
+ {
|
|
+ /* Beginning in 7.3, viewname is not unique; rely on OID */
|
|
+ appendPQExpBuffer(query,
|
|
+ "SELECT pg_catalog.pg_get_viewdef('%u'::pg_catalog.oid) AS viewdef",
|
|
+ tbinfo->dobj.catId.oid);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ appendPQExpBufferStr(query, "SELECT definition AS viewdef "
|
|
+ "FROM pg_views WHERE viewname = ");
|
|
+ appendStringLiteralAH(query, tbinfo->dobj.name, fout);
|
|
+ }
|
|
+
|
|
+ res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
|
|
+
|
|
+ if (PQntuples(res) != 1)
|
|
+ {
|
|
+ if (PQntuples(res) < 1)
|
|
+ exit_horribly(NULL, "query to obtain definition of view \"%s\" returned no data\n",
|
|
+ tbinfo->dobj.name);
|
|
+ else
|
|
+ exit_horribly(NULL, "query to obtain definition of view \"%s\" returned more than one definition\n",
|
|
+ tbinfo->dobj.name);
|
|
+ }
|
|
+
|
|
+ len = PQgetlength(res, 0, 0);
|
|
+
|
|
+ if (len == 0)
|
|
+ exit_horribly(NULL, "definition of view \"%s\" appears to be empty (length zero)\n",
|
|
+ tbinfo->dobj.name);
|
|
+
|
|
+ /* Strip off the trailing semicolon so that other things may follow. */
|
|
+ Assert(PQgetvalue(res, 0, 0)[len - 1] == ';');
|
|
+ appendBinaryPQExpBuffer(result, PQgetvalue(res, 0, 0), len - 1);
|
|
+
|
|
+ PQclear(res);
|
|
+ destroyPQExpBuffer(query);
|
|
+
|
|
+ return result;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * dumpTableSchema
|
|
+ * write the declaration (not data) of one user-defined table or view
|
|
+ */
|
|
+static void
|
|
+dumpTableSchema(Archive *fout, TableInfo *tbinfo)
|
|
+{
|
|
+ PQExpBuffer q = createPQExpBuffer();
|
|
+ PQExpBuffer delq = createPQExpBuffer();
|
|
+ PQExpBuffer labelq = createPQExpBuffer();
|
|
+ int numParents;
|
|
+ TableInfo **parents;
|
|
+ int actual_atts; /* number of attrs in this CREATE statement */
|
|
+ const char *reltypename;
|
|
+ char *storage;
|
|
+ char *srvname;
|
|
+ char *ftoptions;
|
|
+ int j,
|
|
+ k;
|
|
+
|
|
+ /* Make sure we are in proper schema */
|
|
+ selectSourceSchema(fout, tbinfo->dobj.namespace->dobj.name);
|
|
+
|
|
+ if (binary_upgrade)
|
|
+ binary_upgrade_set_type_oids_by_rel_oid(fout, q,
|
|
+ tbinfo->dobj.catId.oid);
|
|
+
|
|
+ /* Is it a table or a view? */
|
|
+ if (tbinfo->relkind == RELKIND_VIEW)
|
|
+ {
|
|
+ PQExpBuffer result;
|
|
+
|
|
+ reltypename = "VIEW";
|
|
+
|
|
+ /*
|
|
+ * DROP must be fully qualified in case same name appears in
|
|
+ * pg_catalog
|
|
+ */
|
|
+ appendPQExpBuffer(delq, "DROP VIEW %s.",
|
|
+ fmtId(tbinfo->dobj.namespace->dobj.name));
|
|
+ appendPQExpBuffer(delq, "%s;\n",
|
|
+ fmtId(tbinfo->dobj.name));
|
|
+
|
|
+ if (binary_upgrade)
|
|
+ binary_upgrade_set_pg_class_oids(fout, q,
|
|
+ tbinfo->dobj.catId.oid, false);
|
|
+
|
|
+ appendPQExpBuffer(q, "CREATE VIEW %s", fmtId(tbinfo->dobj.name));
|
|
+ if (tbinfo->reloptions && strlen(tbinfo->reloptions) > 0)
|
|
+ appendPQExpBuffer(q, " WITH (%s)", tbinfo->reloptions);
|
|
+ result = createViewAsClause(fout, tbinfo);
|
|
+ appendPQExpBuffer(q, " AS\n%s", result->data);
|
|
+ destroyPQExpBuffer(result);
|
|
+
|
|
+ if (tbinfo->checkoption != NULL)
|
|
+ appendPQExpBuffer(q, "\n WITH %s CHECK OPTION", tbinfo->checkoption);
|
|
+ appendPQExpBufferStr(q, ";\n");
|
|
+
|
|
+ appendPQExpBuffer(labelq, "VIEW %s",
|
|
+ fmtId(tbinfo->dobj.name));
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ switch (tbinfo->relkind)
|
|
+ {
|
|
+ case (RELKIND_FOREIGN_TABLE):
|
|
+ {
|
|
+ PQExpBuffer query = createPQExpBuffer();
|
|
+ PGresult *res;
|
|
+ int i_srvname;
|
|
+ int i_ftoptions;
|
|
+
|
|
+ reltypename = "FOREIGN TABLE";
|
|
+
|
|
+ /* retrieve name of foreign server and generic options */
|
|
+ appendPQExpBuffer(query,
|
|
+ "SELECT fs.srvname, "
|
|
+ "pg_catalog.array_to_string(ARRAY("
|
|
+ "SELECT pg_catalog.quote_ident(option_name) || "
|
|
+ "' ' || pg_catalog.quote_literal(option_value) "
|
|
+ "FROM pg_catalog.pg_options_to_table(ftoptions) "
|
|
+ "ORDER BY option_name"
|
|
+ "), E',\n ') AS ftoptions "
|
|
+ "FROM pg_catalog.pg_foreign_table ft "
|
|
+ "JOIN pg_catalog.pg_foreign_server fs "
|
|
+ "ON (fs.oid = ft.ftserver) "
|
|
+ "WHERE ft.ftrelid = '%u'",
|
|
+ tbinfo->dobj.catId.oid);
|
|
+ res = ExecuteSqlQueryForSingleRow(fout, query->data);
|
|
+ i_srvname = PQfnumber(res, "srvname");
|
|
+ i_ftoptions = PQfnumber(res, "ftoptions");
|
|
+ srvname = pg_strdup(PQgetvalue(res, 0, i_srvname));
|
|
+ ftoptions = pg_strdup(PQgetvalue(res, 0, i_ftoptions));
|
|
+ PQclear(res);
|
|
+ destroyPQExpBuffer(query);
|
|
+ break;
|
|
+ }
|
|
+ case (RELKIND_MATVIEW):
|
|
+ reltypename = "MATERIALIZED VIEW";
|
|
+ srvname = NULL;
|
|
+ ftoptions = NULL;
|
|
+ break;
|
|
+ default:
|
|
+ reltypename = "TABLE";
|
|
+ srvname = NULL;
|
|
+ ftoptions = NULL;
|
|
+ }
|
|
+
|
|
+ numParents = tbinfo->numParents;
|
|
+ parents = tbinfo->parents;
|
|
+
|
|
+ /*
|
|
+ * DROP must be fully qualified in case same name appears in
|
|
+ * pg_catalog
|
|
+ */
|
|
+ appendPQExpBuffer(delq, "DROP %s %s.", reltypename,
|
|
+ fmtId(tbinfo->dobj.namespace->dobj.name));
|
|
+ appendPQExpBuffer(delq, "%s;\n",
|
|
+ fmtId(tbinfo->dobj.name));
|
|
+
|
|
+ appendPQExpBuffer(labelq, "%s %s", reltypename,
|
|
+ fmtId(tbinfo->dobj.name));
|
|
+
|
|
+ if (binary_upgrade)
|
|
+ binary_upgrade_set_pg_class_oids(fout, q,
|
|
+ tbinfo->dobj.catId.oid, false);
|
|
+
|
|
+ appendPQExpBuffer(q, "CREATE %s%s %s",
|
|
+ tbinfo->relpersistence == RELPERSISTENCE_UNLOGGED ?
|
|
+ "UNLOGGED " : "",
|
|
+ reltypename,
|
|
+ fmtId(tbinfo->dobj.name));
|
|
+
|
|
+ /*
|
|
+ * Attach to type, if reloftype; except in case of a binary upgrade,
|
|
+ * we dump the table normally and attach it to the type afterward.
|
|
+ */
|
|
+ if (tbinfo->reloftype && !binary_upgrade)
|
|
+ appendPQExpBuffer(q, " OF %s", tbinfo->reloftype);
|
|
+
|
|
+ if (tbinfo->relkind != RELKIND_MATVIEW)
|
|
+ {
|
|
+ /* Dump the attributes */
|
|
+ actual_atts = 0;
|
|
+ for (j = 0; j < tbinfo->numatts; j++)
|
|
+ {
|
|
+ /*
|
|
+ * Normally, dump if it's locally defined in this table, and
|
|
+ * not dropped. But for binary upgrade, we'll dump all the
|
|
+ * columns, and then fix up the dropped and nonlocal cases
|
|
+ * below.
|
|
+ */
|
|
+ if (shouldPrintColumn(tbinfo, j))
|
|
+ {
|
|
+ /*
|
|
+ * Default value --- suppress if to be printed separately.
|
|
+ */
|
|
+ bool has_default = (tbinfo->attrdefs[j] != NULL &&
|
|
+ !tbinfo->attrdefs[j]->separate);
|
|
+
|
|
+ /*
|
|
+ * Not Null constraint --- suppress if inherited, except
|
|
+ * in binary-upgrade case where that won't work.
|
|
+ */
|
|
+ bool has_notnull = (tbinfo->notnull[j] &&
|
|
+ (!tbinfo->inhNotNull[j] ||
|
|
+ binary_upgrade));
|
|
+
|
|
+ /* Skip column if fully defined by reloftype */
|
|
+ if (tbinfo->reloftype &&
|
|
+ !has_default && !has_notnull && !binary_upgrade)
|
|
+ continue;
|
|
+
|
|
+ /* Format properly if not first attr */
|
|
+ if (actual_atts == 0)
|
|
+ appendPQExpBufferStr(q, " (");
|
|
+ else
|
|
+ appendPQExpBufferStr(q, ",");
|
|
+ appendPQExpBufferStr(q, "\n ");
|
|
+ actual_atts++;
|
|
+
|
|
+ /* Attribute name */
|
|
+ appendPQExpBufferStr(q, fmtId(tbinfo->attnames[j]));
|
|
+
|
|
+ if (tbinfo->attisdropped[j])
|
|
+ {
|
|
+ /*
|
|
+ * ALTER TABLE DROP COLUMN clears
|
|
+ * pg_attribute.atttypid, so we will not have gotten a
|
|
+ * valid type name; insert INTEGER as a stopgap. We'll
|
|
+ * clean things up later.
|
|
+ */
|
|
+ appendPQExpBufferStr(q, " INTEGER /* dummy */");
|
|
+ /* Skip all the rest, too */
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ /* Attribute type */
|
|
+ if (tbinfo->reloftype && !binary_upgrade)
|
|
+ {
|
|
+ appendPQExpBufferStr(q, " WITH OPTIONS");
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 70100)
|
|
+ {
|
|
+ appendPQExpBuffer(q, " %s",
|
|
+ tbinfo->atttypnames[j]);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ /* If no format_type, fake it */
|
|
+ appendPQExpBuffer(q, " %s",
|
|
+ myFormatType(tbinfo->atttypnames[j],
|
|
+ tbinfo->atttypmod[j]));
|
|
+ }
|
|
+
|
|
+ /* Add collation if not default for the type */
|
|
+ if (OidIsValid(tbinfo->attcollation[j]))
|
|
+ {
|
|
+ CollInfo *coll;
|
|
+
|
|
+ coll = findCollationByOid(tbinfo->attcollation[j]);
|
|
+ if (coll)
|
|
+ {
|
|
+ /* always schema-qualify, don't try to be smart */
|
|
+ appendPQExpBuffer(q, " COLLATE %s.",
|
|
+ fmtId(coll->dobj.namespace->dobj.name));
|
|
+ appendPQExpBufferStr(q, fmtId(coll->dobj.name));
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (has_default)
|
|
+ appendPQExpBuffer(q, " DEFAULT %s",
|
|
+ tbinfo->attrdefs[j]->adef_expr);
|
|
+
|
|
+ if (has_notnull)
|
|
+ appendPQExpBufferStr(q, " NOT NULL");
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Add non-inherited CHECK constraints, if any.
|
|
+ */
|
|
+ for (j = 0; j < tbinfo->ncheck; j++)
|
|
+ {
|
|
+ ConstraintInfo *constr = &(tbinfo->checkexprs[j]);
|
|
+
|
|
+ if (constr->separate || !constr->conislocal)
|
|
+ continue;
|
|
+
|
|
+ if (actual_atts == 0)
|
|
+ appendPQExpBufferStr(q, " (\n ");
|
|
+ else
|
|
+ appendPQExpBufferStr(q, ",\n ");
|
|
+
|
|
+ appendPQExpBuffer(q, "CONSTRAINT %s ",
|
|
+ fmtId(constr->dobj.name));
|
|
+ appendPQExpBufferStr(q, constr->condef);
|
|
+
|
|
+ actual_atts++;
|
|
+ }
|
|
+
|
|
+ if (actual_atts)
|
|
+ appendPQExpBufferStr(q, "\n)");
|
|
+ else if (!(tbinfo->reloftype && !binary_upgrade))
|
|
+ {
|
|
+ /*
|
|
+ * We must have a parenthesized attribute list, even though
|
|
+ * empty, when not using the OF TYPE syntax.
|
|
+ */
|
|
+ appendPQExpBufferStr(q, " (\n)");
|
|
+ }
|
|
+
|
|
+ if (numParents > 0 && !binary_upgrade)
|
|
+ {
|
|
+ appendPQExpBufferStr(q, "\nINHERITS (");
|
|
+ for (k = 0; k < numParents; k++)
|
|
+ {
|
|
+ TableInfo *parentRel = parents[k];
|
|
+
|
|
+ if (k > 0)
|
|
+ appendPQExpBufferStr(q, ", ");
|
|
+ if (parentRel->dobj.namespace != tbinfo->dobj.namespace)
|
|
+ appendPQExpBuffer(q, "%s.",
|
|
+ fmtId(parentRel->dobj.namespace->dobj.name));
|
|
+ appendPQExpBufferStr(q, fmtId(parentRel->dobj.name));
|
|
+ }
|
|
+ appendPQExpBufferChar(q, ')');
|
|
+ }
|
|
+
|
|
+ if (tbinfo->relkind == RELKIND_FOREIGN_TABLE)
|
|
+ appendPQExpBuffer(q, "\nSERVER %s", fmtId(srvname));
|
|
+ }
|
|
+
|
|
+ if ((tbinfo->reloptions && strlen(tbinfo->reloptions) > 0) ||
|
|
+ (tbinfo->toast_reloptions && strlen(tbinfo->toast_reloptions) > 0))
|
|
+ {
|
|
+ bool addcomma = false;
|
|
+
|
|
+ appendPQExpBufferStr(q, "\nWITH (");
|
|
+ if (tbinfo->reloptions && strlen(tbinfo->reloptions) > 0)
|
|
+ {
|
|
+ addcomma = true;
|
|
+ appendPQExpBufferStr(q, tbinfo->reloptions);
|
|
+ }
|
|
+ if (tbinfo->toast_reloptions && strlen(tbinfo->toast_reloptions) > 0)
|
|
+ {
|
|
+ appendPQExpBuffer(q, "%s%s", addcomma ? ", " : "",
|
|
+ tbinfo->toast_reloptions);
|
|
+ }
|
|
+ appendPQExpBufferChar(q, ')');
|
|
+ }
|
|
+
|
|
+ /* Dump generic options if any */
|
|
+ if (ftoptions && ftoptions[0])
|
|
+ appendPQExpBuffer(q, "\nOPTIONS (\n %s\n)", ftoptions);
|
|
+
|
|
+ /*
|
|
+ * For materialized views, create the AS clause just like a view. At
|
|
+ * this point, we always mark the view as not populated.
|
|
+ */
|
|
+ if (tbinfo->relkind == RELKIND_MATVIEW)
|
|
+ {
|
|
+ PQExpBuffer result;
|
|
+
|
|
+ result = createViewAsClause(fout, tbinfo);
|
|
+ appendPQExpBuffer(q, " AS\n%s\n WITH NO DATA;\n",
|
|
+ result->data);
|
|
+ destroyPQExpBuffer(result);
|
|
+ }
|
|
+ else
|
|
+ appendPQExpBufferStr(q, ";\n");
|
|
+
|
|
+ /*
|
|
+ * To create binary-compatible heap files, we have to ensure the same
|
|
+ * physical column order, including dropped columns, as in the
|
|
+ * original. Therefore, we create dropped columns above and drop them
|
|
+ * here, also updating their attlen/attalign values so that the
|
|
+ * dropped column can be skipped properly. (We do not bother with
|
|
+ * restoring the original attbyval setting.) Also, inheritance
|
|
+ * relationships are set up by doing ALTER INHERIT rather than using
|
|
+ * an INHERITS clause --- the latter would possibly mess up the column
|
|
+ * order. That also means we have to take care about setting
|
|
+ * attislocal correctly, plus fix up any inherited CHECK constraints.
|
|
+ * Analogously, we set up typed tables using ALTER TABLE / OF here.
|
|
+ */
|
|
+ if (binary_upgrade && (tbinfo->relkind == RELKIND_RELATION ||
|
|
+ tbinfo->relkind == RELKIND_FOREIGN_TABLE))
|
|
+ {
|
|
+ for (j = 0; j < tbinfo->numatts; j++)
|
|
+ {
|
|
+ if (tbinfo->attisdropped[j])
|
|
+ {
|
|
+ appendPQExpBufferStr(q, "\n-- For binary upgrade, recreate dropped column.\n");
|
|
+ appendPQExpBuffer(q, "UPDATE pg_catalog.pg_attribute\n"
|
|
+ "SET attlen = %d, "
|
|
+ "attalign = '%c', attbyval = false\n"
|
|
+ "WHERE attname = ",
|
|
+ tbinfo->attlen[j],
|
|
+ tbinfo->attalign[j]);
|
|
+ appendStringLiteralAH(q, tbinfo->attnames[j], fout);
|
|
+ appendPQExpBufferStr(q, "\n AND attrelid = ");
|
|
+ appendStringLiteralAH(q, fmtId(tbinfo->dobj.name), fout);
|
|
+ appendPQExpBufferStr(q, "::pg_catalog.regclass;\n");
|
|
+
|
|
+ if (tbinfo->relkind == RELKIND_RELATION)
|
|
+ appendPQExpBuffer(q, "ALTER TABLE ONLY %s ",
|
|
+ fmtId(tbinfo->dobj.name));
|
|
+ else
|
|
+ appendPQExpBuffer(q, "ALTER FOREIGN TABLE %s ",
|
|
+ fmtId(tbinfo->dobj.name));
|
|
+
|
|
+ appendPQExpBuffer(q, "DROP COLUMN %s;\n",
|
|
+ fmtId(tbinfo->attnames[j]));
|
|
+ }
|
|
+ else if (!tbinfo->attislocal[j])
|
|
+ {
|
|
+ Assert(tbinfo->relkind != RELKIND_FOREIGN_TABLE);
|
|
+ appendPQExpBufferStr(q, "\n-- For binary upgrade, recreate inherited column.\n");
|
|
+ appendPQExpBufferStr(q, "UPDATE pg_catalog.pg_attribute\n"
|
|
+ "SET attislocal = false\n"
|
|
+ "WHERE attname = ");
|
|
+ appendStringLiteralAH(q, tbinfo->attnames[j], fout);
|
|
+ appendPQExpBufferStr(q, "\n AND attrelid = ");
|
|
+ appendStringLiteralAH(q, fmtId(tbinfo->dobj.name), fout);
|
|
+ appendPQExpBufferStr(q, "::pg_catalog.regclass;\n");
|
|
+ }
|
|
+ }
|
|
+
|
|
+ for (k = 0; k < tbinfo->ncheck; k++)
|
|
+ {
|
|
+ ConstraintInfo *constr = &(tbinfo->checkexprs[k]);
|
|
+
|
|
+ if (constr->separate || constr->conislocal)
|
|
+ continue;
|
|
+
|
|
+ appendPQExpBufferStr(q, "\n-- For binary upgrade, set up inherited constraint.\n");
|
|
+ appendPQExpBuffer(q, "ALTER TABLE ONLY %s ",
|
|
+ fmtId(tbinfo->dobj.name));
|
|
+ appendPQExpBuffer(q, " ADD CONSTRAINT %s ",
|
|
+ fmtId(constr->dobj.name));
|
|
+ appendPQExpBuffer(q, "%s;\n", constr->condef);
|
|
+ appendPQExpBufferStr(q, "UPDATE pg_catalog.pg_constraint\n"
|
|
+ "SET conislocal = false\n"
|
|
+ "WHERE contype = 'c' AND conname = ");
|
|
+ appendStringLiteralAH(q, constr->dobj.name, fout);
|
|
+ appendPQExpBufferStr(q, "\n AND conrelid = ");
|
|
+ appendStringLiteralAH(q, fmtId(tbinfo->dobj.name), fout);
|
|
+ appendPQExpBufferStr(q, "::pg_catalog.regclass;\n");
|
|
+ }
|
|
+
|
|
+ if (numParents > 0)
|
|
+ {
|
|
+ appendPQExpBufferStr(q, "\n-- For binary upgrade, set up inheritance this way.\n");
|
|
+ for (k = 0; k < numParents; k++)
|
|
+ {
|
|
+ TableInfo *parentRel = parents[k];
|
|
+
|
|
+ appendPQExpBuffer(q, "ALTER TABLE ONLY %s INHERIT ",
|
|
+ fmtId(tbinfo->dobj.name));
|
|
+ if (parentRel->dobj.namespace != tbinfo->dobj.namespace)
|
|
+ appendPQExpBuffer(q, "%s.",
|
|
+ fmtId(parentRel->dobj.namespace->dobj.name));
|
|
+ appendPQExpBuffer(q, "%s;\n",
|
|
+ fmtId(parentRel->dobj.name));
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (tbinfo->reloftype)
|
|
+ {
|
|
+ appendPQExpBufferStr(q, "\n-- For binary upgrade, set up typed tables this way.\n");
|
|
+ appendPQExpBuffer(q, "ALTER TABLE ONLY %s OF %s;\n",
|
|
+ fmtId(tbinfo->dobj.name),
|
|
+ tbinfo->reloftype);
|
|
+ }
|
|
+
|
|
+ appendPQExpBufferStr(q, "\n-- For binary upgrade, set heap's relfrozenxid and relminmxid\n");
|
|
+ appendPQExpBuffer(q, "UPDATE pg_catalog.pg_class\n"
|
|
+ "SET relfrozenxid = '%u', relminmxid = '%u'\n"
|
|
+ "WHERE oid = ",
|
|
+ tbinfo->frozenxid, tbinfo->minmxid);
|
|
+ appendStringLiteralAH(q, fmtId(tbinfo->dobj.name), fout);
|
|
+ appendPQExpBufferStr(q, "::pg_catalog.regclass;\n");
|
|
+
|
|
+ if (tbinfo->toast_oid)
|
|
+ {
|
|
+ /* We preserve the toast oids, so we can use it during restore */
|
|
+ appendPQExpBufferStr(q, "\n-- For binary upgrade, set toast's relfrozenxid and relminmxid\n");
|
|
+ appendPQExpBuffer(q, "UPDATE pg_catalog.pg_class\n"
|
|
+ "SET relfrozenxid = '%u', relminmxid = '%u'\n"
|
|
+ "WHERE oid = '%u';\n",
|
|
+ tbinfo->toast_frozenxid,
|
|
+ tbinfo->toast_minmxid, tbinfo->toast_oid);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * In binary_upgrade mode, restore matviews' populated status by
|
|
+ * poking pg_class directly. This is pretty ugly, but we can't use
|
|
+ * REFRESH MATERIALIZED VIEW since it's possible that some underlying
|
|
+ * matview is not populated even though this matview is.
|
|
+ */
|
|
+ if (binary_upgrade && tbinfo->relkind == RELKIND_MATVIEW &&
|
|
+ tbinfo->relispopulated)
|
|
+ {
|
|
+ appendPQExpBufferStr(q, "\n-- For binary upgrade, mark materialized view as populated\n");
|
|
+ appendPQExpBufferStr(q, "UPDATE pg_catalog.pg_class\n"
|
|
+ "SET relispopulated = 't'\n"
|
|
+ "WHERE oid = ");
|
|
+ appendStringLiteralAH(q, fmtId(tbinfo->dobj.name), fout);
|
|
+ appendPQExpBufferStr(q, "::pg_catalog.regclass;\n");
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Dump additional per-column properties that we can't handle in the
|
|
+ * main CREATE TABLE command.
|
|
+ */
|
|
+ for (j = 0; j < tbinfo->numatts; j++)
|
|
+ {
|
|
+ /* None of this applies to dropped columns */
|
|
+ if (tbinfo->attisdropped[j])
|
|
+ continue;
|
|
+
|
|
+ /*
|
|
+ * If we didn't dump the column definition explicitly above, and
|
|
+ * it is NOT NULL and did not inherit that property from a parent,
|
|
+ * we have to mark it separately.
|
|
+ */
|
|
+ if (!shouldPrintColumn(tbinfo, j) &&
|
|
+ tbinfo->notnull[j] && !tbinfo->inhNotNull[j])
|
|
+ {
|
|
+ appendPQExpBuffer(q, "ALTER TABLE ONLY %s ",
|
|
+ fmtId(tbinfo->dobj.name));
|
|
+ appendPQExpBuffer(q, "ALTER COLUMN %s SET NOT NULL;\n",
|
|
+ fmtId(tbinfo->attnames[j]));
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Dump per-column statistics information. We only issue an ALTER
|
|
+ * TABLE statement if the attstattarget entry for this column is
|
|
+ * non-negative (i.e. it's not the default value)
|
|
+ */
|
|
+ if (tbinfo->attstattarget[j] >= 0)
|
|
+ {
|
|
+ appendPQExpBuffer(q, "ALTER TABLE ONLY %s ",
|
|
+ fmtId(tbinfo->dobj.name));
|
|
+ appendPQExpBuffer(q, "ALTER COLUMN %s ",
|
|
+ fmtId(tbinfo->attnames[j]));
|
|
+ appendPQExpBuffer(q, "SET STATISTICS %d;\n",
|
|
+ tbinfo->attstattarget[j]);
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Dump per-column storage information. The statement is only
|
|
+ * dumped if the storage has been changed from the type's default.
|
|
+ */
|
|
+ if (tbinfo->attstorage[j] != tbinfo->typstorage[j])
|
|
+ {
|
|
+ switch (tbinfo->attstorage[j])
|
|
+ {
|
|
+ case 'p':
|
|
+ storage = "PLAIN";
|
|
+ break;
|
|
+ case 'e':
|
|
+ storage = "EXTERNAL";
|
|
+ break;
|
|
+ case 'm':
|
|
+ storage = "MAIN";
|
|
+ break;
|
|
+ case 'x':
|
|
+ storage = "EXTENDED";
|
|
+ break;
|
|
+ default:
|
|
+ storage = NULL;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Only dump the statement if it's a storage type we recognize
|
|
+ */
|
|
+ if (storage != NULL)
|
|
+ {
|
|
+ appendPQExpBuffer(q, "ALTER TABLE ONLY %s ",
|
|
+ fmtId(tbinfo->dobj.name));
|
|
+ appendPQExpBuffer(q, "ALTER COLUMN %s ",
|
|
+ fmtId(tbinfo->attnames[j]));
|
|
+ appendPQExpBuffer(q, "SET STORAGE %s;\n",
|
|
+ storage);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Dump per-column attributes.
|
|
+ */
|
|
+ if (tbinfo->attoptions[j] && tbinfo->attoptions[j][0] != '\0')
|
|
+ {
|
|
+ appendPQExpBuffer(q, "ALTER TABLE ONLY %s ",
|
|
+ fmtId(tbinfo->dobj.name));
|
|
+ appendPQExpBuffer(q, "ALTER COLUMN %s ",
|
|
+ fmtId(tbinfo->attnames[j]));
|
|
+ appendPQExpBuffer(q, "SET (%s);\n",
|
|
+ tbinfo->attoptions[j]);
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Dump per-column fdw options.
|
|
+ */
|
|
+ if (tbinfo->relkind == RELKIND_FOREIGN_TABLE &&
|
|
+ tbinfo->attfdwoptions[j] &&
|
|
+ tbinfo->attfdwoptions[j][0] != '\0')
|
|
+ {
|
|
+ appendPQExpBuffer(q, "ALTER FOREIGN TABLE %s ",
|
|
+ fmtId(tbinfo->dobj.name));
|
|
+ appendPQExpBuffer(q, "ALTER COLUMN %s ",
|
|
+ fmtId(tbinfo->attnames[j]));
|
|
+ appendPQExpBuffer(q, "OPTIONS (\n %s\n);\n",
|
|
+ tbinfo->attfdwoptions[j]);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * dump properties we only have ALTER TABLE syntax for
|
|
+ */
|
|
+ if ((tbinfo->relkind == RELKIND_RELATION || tbinfo->relkind == RELKIND_MATVIEW) &&
|
|
+ tbinfo->relreplident != REPLICA_IDENTITY_DEFAULT)
|
|
+ {
|
|
+ if (tbinfo->relreplident == REPLICA_IDENTITY_INDEX)
|
|
+ {
|
|
+ /* nothing to do, will be set when the index is dumped */
|
|
+ }
|
|
+ else if (tbinfo->relreplident == REPLICA_IDENTITY_NOTHING)
|
|
+ {
|
|
+ appendPQExpBuffer(q, "\nALTER TABLE ONLY %s REPLICA IDENTITY NOTHING;\n",
|
|
+ fmtId(tbinfo->dobj.name));
|
|
+ }
|
|
+ else if (tbinfo->relreplident == REPLICA_IDENTITY_FULL)
|
|
+ {
|
|
+ appendPQExpBuffer(q, "\nALTER TABLE ONLY %s REPLICA IDENTITY FULL;\n",
|
|
+ fmtId(tbinfo->dobj.name));
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (binary_upgrade)
|
|
+ binary_upgrade_extension_member(q, &tbinfo->dobj, labelq->data);
|
|
+
|
|
+ ArchiveEntry(fout, tbinfo->dobj.catId, tbinfo->dobj.dumpId,
|
|
+ tbinfo->dobj.name,
|
|
+ tbinfo->dobj.namespace->dobj.name,
|
|
+ (tbinfo->relkind == RELKIND_VIEW) ? NULL : tbinfo->reltablespace,
|
|
+ tbinfo->rolname,
|
|
+ (strcmp(reltypename, "TABLE") == 0) ? tbinfo->hasoids : false,
|
|
+ reltypename,
|
|
+ tbinfo->postponed_def ? SECTION_POST_DATA : SECTION_PRE_DATA,
|
|
+ q->data, delq->data, NULL,
|
|
+ NULL, 0,
|
|
+ NULL, NULL);
|
|
+
|
|
+
|
|
+ /* Dump Table Comments */
|
|
+ dumpTableComment(fout, tbinfo, reltypename);
|
|
+
|
|
+ /* Dump Table Security Labels */
|
|
+ dumpTableSecLabel(fout, tbinfo, reltypename);
|
|
+
|
|
+ /* Dump comments on inlined table constraints */
|
|
+ for (j = 0; j < tbinfo->ncheck; j++)
|
|
+ {
|
|
+ ConstraintInfo *constr = &(tbinfo->checkexprs[j]);
|
|
+
|
|
+ if (constr->separate || !constr->conislocal)
|
|
+ continue;
|
|
+
|
|
+ dumpTableConstraintComment(fout, constr);
|
|
+ }
|
|
+
|
|
+ destroyPQExpBuffer(q);
|
|
+ destroyPQExpBuffer(delq);
|
|
+ destroyPQExpBuffer(labelq);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * dumpAttrDef --- dump an attribute's default-value declaration
|
|
+ */
|
|
+static void
|
|
+dumpAttrDef(Archive *fout, AttrDefInfo *adinfo)
|
|
+{
|
|
+ TableInfo *tbinfo = adinfo->adtable;
|
|
+ int adnum = adinfo->adnum;
|
|
+ PQExpBuffer q;
|
|
+ PQExpBuffer delq;
|
|
+
|
|
+ /* Skip if table definition not to be dumped */
|
|
+ if (!tbinfo->dobj.dump || dataOnly)
|
|
+ return;
|
|
+
|
|
+ /* Skip if not "separate"; it was dumped in the table's definition */
|
|
+ if (!adinfo->separate)
|
|
+ return;
|
|
+
|
|
+ q = createPQExpBuffer();
|
|
+ delq = createPQExpBuffer();
|
|
+
|
|
+ appendPQExpBuffer(q, "ALTER TABLE ONLY %s ",
|
|
+ fmtId(tbinfo->dobj.name));
|
|
+ appendPQExpBuffer(q, "ALTER COLUMN %s SET DEFAULT %s;\n",
|
|
+ fmtId(tbinfo->attnames[adnum - 1]),
|
|
+ adinfo->adef_expr);
|
|
+
|
|
+ /*
|
|
+ * DROP must be fully qualified in case same name appears in pg_catalog
|
|
+ */
|
|
+ appendPQExpBuffer(delq, "ALTER TABLE %s.",
|
|
+ fmtId(tbinfo->dobj.namespace->dobj.name));
|
|
+ appendPQExpBuffer(delq, "%s ",
|
|
+ fmtId(tbinfo->dobj.name));
|
|
+ appendPQExpBuffer(delq, "ALTER COLUMN %s DROP DEFAULT;\n",
|
|
+ fmtId(tbinfo->attnames[adnum - 1]));
|
|
+
|
|
+ ArchiveEntry(fout, adinfo->dobj.catId, adinfo->dobj.dumpId,
|
|
+ tbinfo->attnames[adnum - 1],
|
|
+ tbinfo->dobj.namespace->dobj.name,
|
|
+ NULL,
|
|
+ tbinfo->rolname,
|
|
+ false, "DEFAULT", SECTION_PRE_DATA,
|
|
+ q->data, delq->data, NULL,
|
|
+ NULL, 0,
|
|
+ NULL, NULL);
|
|
+
|
|
+ destroyPQExpBuffer(q);
|
|
+ destroyPQExpBuffer(delq);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * getAttrName: extract the correct name for an attribute
|
|
+ *
|
|
+ * The array tblInfo->attnames[] only provides names of user attributes;
|
|
+ * if a system attribute number is supplied, we have to fake it.
|
|
+ * We also do a little bit of bounds checking for safety's sake.
|
|
+ */
|
|
+static const char *
|
|
+getAttrName(int attrnum, TableInfo *tblInfo)
|
|
+{
|
|
+ if (attrnum > 0 && attrnum <= tblInfo->numatts)
|
|
+ return tblInfo->attnames[attrnum - 1];
|
|
+ switch (attrnum)
|
|
+ {
|
|
+ case SelfItemPointerAttributeNumber:
|
|
+ return "ctid";
|
|
+ case ObjectIdAttributeNumber:
|
|
+ return "oid";
|
|
+ case MinTransactionIdAttributeNumber:
|
|
+ return "xmin";
|
|
+ case MinCommandIdAttributeNumber:
|
|
+ return "cmin";
|
|
+ case MaxTransactionIdAttributeNumber:
|
|
+ return "xmax";
|
|
+ case MaxCommandIdAttributeNumber:
|
|
+ return "cmax";
|
|
+ case TableOidAttributeNumber:
|
|
+ return "tableoid";
|
|
+ }
|
|
+ exit_horribly(NULL, "invalid column number %d for table \"%s\"\n",
|
|
+ attrnum, tblInfo->dobj.name);
|
|
+ return NULL; /* keep compiler quiet */
|
|
+}
|
|
+
|
|
+/*
|
|
+ * dumpIndex
|
|
+ * write out to fout a user-defined index
|
|
+ */
|
|
+static void
|
|
+dumpIndex(Archive *fout, IndxInfo *indxinfo)
|
|
+{
|
|
+ TableInfo *tbinfo = indxinfo->indextable;
|
|
+ bool is_constraint = (indxinfo->indexconstraint != 0);
|
|
+ PQExpBuffer q;
|
|
+ PQExpBuffer delq;
|
|
+ PQExpBuffer labelq;
|
|
+
|
|
+ if (dataOnly)
|
|
+ return;
|
|
+
|
|
+ q = createPQExpBuffer();
|
|
+ delq = createPQExpBuffer();
|
|
+ labelq = createPQExpBuffer();
|
|
+
|
|
+ appendPQExpBuffer(labelq, "INDEX %s",
|
|
+ fmtId(indxinfo->dobj.name));
|
|
+
|
|
+ /*
|
|
+ * If there's an associated constraint, don't dump the index per se, but
|
|
+ * do dump any comment for it. (This is safe because dependency ordering
|
|
+ * will have ensured the constraint is emitted first.) Note that the
|
|
+ * emitted comment has to be shown as depending on the constraint, not the
|
|
+ * index, in such cases.
|
|
+ */
|
|
+ if (!is_constraint)
|
|
+ {
|
|
+ if (binary_upgrade)
|
|
+ binary_upgrade_set_pg_class_oids(fout, q,
|
|
+ indxinfo->dobj.catId.oid, true);
|
|
+
|
|
+ /* Plain secondary index */
|
|
+ appendPQExpBuffer(q, "%s;\n", indxinfo->indexdef);
|
|
+
|
|
+ /* If the index is clustered, we need to record that. */
|
|
+ if (indxinfo->indisclustered)
|
|
+ {
|
|
+ appendPQExpBuffer(q, "\nALTER TABLE %s CLUSTER",
|
|
+ fmtId(tbinfo->dobj.name));
|
|
+ appendPQExpBuffer(q, " ON %s;\n",
|
|
+ fmtId(indxinfo->dobj.name));
|
|
+ }
|
|
+
|
|
+ /* If the index defines identity, we need to record that. */
|
|
+ if (indxinfo->indisreplident)
|
|
+ {
|
|
+ appendPQExpBuffer(q, "\nALTER TABLE ONLY %s REPLICA IDENTITY USING",
|
|
+ fmtId(tbinfo->dobj.name));
|
|
+ appendPQExpBuffer(q, " INDEX %s;\n",
|
|
+ fmtId(indxinfo->dobj.name));
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * DROP must be fully qualified in case same name appears in
|
|
+ * pg_catalog
|
|
+ */
|
|
+ appendPQExpBuffer(delq, "DROP INDEX %s.",
|
|
+ fmtId(tbinfo->dobj.namespace->dobj.name));
|
|
+ appendPQExpBuffer(delq, "%s;\n",
|
|
+ fmtId(indxinfo->dobj.name));
|
|
+
|
|
+ ArchiveEntry(fout, indxinfo->dobj.catId, indxinfo->dobj.dumpId,
|
|
+ indxinfo->dobj.name,
|
|
+ tbinfo->dobj.namespace->dobj.name,
|
|
+ indxinfo->tablespace,
|
|
+ tbinfo->rolname, false,
|
|
+ "INDEX", SECTION_POST_DATA,
|
|
+ q->data, delq->data, NULL,
|
|
+ NULL, 0,
|
|
+ NULL, NULL);
|
|
+ }
|
|
+
|
|
+ /* Dump Index Comments */
|
|
+ dumpComment(fout, labelq->data,
|
|
+ tbinfo->dobj.namespace->dobj.name,
|
|
+ tbinfo->rolname,
|
|
+ indxinfo->dobj.catId, 0,
|
|
+ is_constraint ? indxinfo->indexconstraint :
|
|
+ indxinfo->dobj.dumpId);
|
|
+
|
|
+ destroyPQExpBuffer(q);
|
|
+ destroyPQExpBuffer(delq);
|
|
+ destroyPQExpBuffer(labelq);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * dumpConstraint
|
|
+ * write out to fout a user-defined constraint
|
|
+ */
|
|
+static void
|
|
+dumpConstraint(Archive *fout, ConstraintInfo *coninfo)
|
|
+{
|
|
+ TableInfo *tbinfo = coninfo->contable;
|
|
+ PQExpBuffer q;
|
|
+ PQExpBuffer delq;
|
|
+
|
|
+ /* Skip if not to be dumped */
|
|
+ if (!coninfo->dobj.dump || dataOnly)
|
|
+ return;
|
|
+
|
|
+ q = createPQExpBuffer();
|
|
+ delq = createPQExpBuffer();
|
|
+
|
|
+ if (coninfo->contype == 'p' ||
|
|
+ coninfo->contype == 'u' ||
|
|
+ coninfo->contype == 'x')
|
|
+ {
|
|
+ /* Index-related constraint */
|
|
+ IndxInfo *indxinfo;
|
|
+ int k;
|
|
+
|
|
+ indxinfo = (IndxInfo *) findObjectByDumpId(coninfo->conindex);
|
|
+
|
|
+ if (indxinfo == NULL)
|
|
+ exit_horribly(NULL, "missing index for constraint \"%s\"\n",
|
|
+ coninfo->dobj.name);
|
|
+
|
|
+ if (binary_upgrade)
|
|
+ binary_upgrade_set_pg_class_oids(fout, q,
|
|
+ indxinfo->dobj.catId.oid, true);
|
|
+
|
|
+ appendPQExpBuffer(q, "ALTER TABLE ONLY %s\n",
|
|
+ fmtId(tbinfo->dobj.name));
|
|
+ appendPQExpBuffer(q, " ADD CONSTRAINT %s ",
|
|
+ fmtId(coninfo->dobj.name));
|
|
+
|
|
+ if (coninfo->condef)
|
|
+ {
|
|
+ /* pg_get_constraintdef should have provided everything */
|
|
+ appendPQExpBuffer(q, "%s;\n", coninfo->condef);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ appendPQExpBuffer(q, "%s (",
|
|
+ coninfo->contype == 'p' ? "PRIMARY KEY" : "UNIQUE");
|
|
+ for (k = 0; k < indxinfo->indnkeys; k++)
|
|
+ {
|
|
+ int indkey = (int) indxinfo->indkeys[k];
|
|
+ const char *attname;
|
|
+
|
|
+ if (indkey == InvalidAttrNumber)
|
|
+ break;
|
|
+ attname = getAttrName(indkey, tbinfo);
|
|
+
|
|
+ appendPQExpBuffer(q, "%s%s",
|
|
+ (k == 0) ? "" : ", ",
|
|
+ fmtId(attname));
|
|
+ }
|
|
+
|
|
+ appendPQExpBufferChar(q, ')');
|
|
+
|
|
+ if (indxinfo->options && strlen(indxinfo->options) > 0)
|
|
+ appendPQExpBuffer(q, " WITH (%s)", indxinfo->options);
|
|
+
|
|
+ if (coninfo->condeferrable)
|
|
+ {
|
|
+ appendPQExpBufferStr(q, " DEFERRABLE");
|
|
+ if (coninfo->condeferred)
|
|
+ appendPQExpBufferStr(q, " INITIALLY DEFERRED");
|
|
+ }
|
|
+
|
|
+ appendPQExpBufferStr(q, ";\n");
|
|
+ }
|
|
+
|
|
+ /* If the index is clustered, we need to record that. */
|
|
+ if (indxinfo->indisclustered)
|
|
+ {
|
|
+ appendPQExpBuffer(q, "\nALTER TABLE %s CLUSTER",
|
|
+ fmtId(tbinfo->dobj.name));
|
|
+ appendPQExpBuffer(q, " ON %s;\n",
|
|
+ fmtId(indxinfo->dobj.name));
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * DROP must be fully qualified in case same name appears in
|
|
+ * pg_catalog
|
|
+ */
|
|
+ appendPQExpBuffer(delq, "ALTER TABLE ONLY %s.",
|
|
+ fmtId(tbinfo->dobj.namespace->dobj.name));
|
|
+ appendPQExpBuffer(delq, "%s ",
|
|
+ fmtId(tbinfo->dobj.name));
|
|
+ appendPQExpBuffer(delq, "DROP CONSTRAINT %s;\n",
|
|
+ fmtId(coninfo->dobj.name));
|
|
+
|
|
+ ArchiveEntry(fout, coninfo->dobj.catId, coninfo->dobj.dumpId,
|
|
+ coninfo->dobj.name,
|
|
+ tbinfo->dobj.namespace->dobj.name,
|
|
+ indxinfo->tablespace,
|
|
+ tbinfo->rolname, false,
|
|
+ "CONSTRAINT", SECTION_POST_DATA,
|
|
+ q->data, delq->data, NULL,
|
|
+ NULL, 0,
|
|
+ NULL, NULL);
|
|
+ }
|
|
+ else if (coninfo->contype == 'f')
|
|
+ {
|
|
+ /*
|
|
+ * XXX Potentially wrap in a 'SET CONSTRAINTS OFF' block so that the
|
|
+ * current table data is not processed
|
|
+ */
|
|
+ appendPQExpBuffer(q, "ALTER TABLE ONLY %s\n",
|
|
+ fmtId(tbinfo->dobj.name));
|
|
+ appendPQExpBuffer(q, " ADD CONSTRAINT %s %s;\n",
|
|
+ fmtId(coninfo->dobj.name),
|
|
+ coninfo->condef);
|
|
+
|
|
+ /*
|
|
+ * DROP must be fully qualified in case same name appears in
|
|
+ * pg_catalog
|
|
+ */
|
|
+ appendPQExpBuffer(delq, "ALTER TABLE ONLY %s.",
|
|
+ fmtId(tbinfo->dobj.namespace->dobj.name));
|
|
+ appendPQExpBuffer(delq, "%s ",
|
|
+ fmtId(tbinfo->dobj.name));
|
|
+ appendPQExpBuffer(delq, "DROP CONSTRAINT %s;\n",
|
|
+ fmtId(coninfo->dobj.name));
|
|
+
|
|
+ ArchiveEntry(fout, coninfo->dobj.catId, coninfo->dobj.dumpId,
|
|
+ coninfo->dobj.name,
|
|
+ tbinfo->dobj.namespace->dobj.name,
|
|
+ NULL,
|
|
+ tbinfo->rolname, false,
|
|
+ "FK CONSTRAINT", SECTION_POST_DATA,
|
|
+ q->data, delq->data, NULL,
|
|
+ NULL, 0,
|
|
+ NULL, NULL);
|
|
+ }
|
|
+ else if (coninfo->contype == 'c' && tbinfo)
|
|
+ {
|
|
+ /* CHECK constraint on a table */
|
|
+
|
|
+ /* Ignore if not to be dumped separately, or if it was inherited */
|
|
+ if (coninfo->separate && coninfo->conislocal)
|
|
+ {
|
|
+ /* not ONLY since we want it to propagate to children */
|
|
+ appendPQExpBuffer(q, "ALTER TABLE %s\n",
|
|
+ fmtId(tbinfo->dobj.name));
|
|
+ appendPQExpBuffer(q, " ADD CONSTRAINT %s %s;\n",
|
|
+ fmtId(coninfo->dobj.name),
|
|
+ coninfo->condef);
|
|
+
|
|
+ /*
|
|
+ * DROP must be fully qualified in case same name appears in
|
|
+ * pg_catalog
|
|
+ */
|
|
+ appendPQExpBuffer(delq, "ALTER TABLE %s.",
|
|
+ fmtId(tbinfo->dobj.namespace->dobj.name));
|
|
+ appendPQExpBuffer(delq, "%s ",
|
|
+ fmtId(tbinfo->dobj.name));
|
|
+ appendPQExpBuffer(delq, "DROP CONSTRAINT %s;\n",
|
|
+ fmtId(coninfo->dobj.name));
|
|
+
|
|
+ ArchiveEntry(fout, coninfo->dobj.catId, coninfo->dobj.dumpId,
|
|
+ coninfo->dobj.name,
|
|
+ tbinfo->dobj.namespace->dobj.name,
|
|
+ NULL,
|
|
+ tbinfo->rolname, false,
|
|
+ "CHECK CONSTRAINT", SECTION_POST_DATA,
|
|
+ q->data, delq->data, NULL,
|
|
+ NULL, 0,
|
|
+ NULL, NULL);
|
|
+ }
|
|
+ }
|
|
+ else if (coninfo->contype == 'c' && tbinfo == NULL)
|
|
+ {
|
|
+ /* CHECK constraint on a domain */
|
|
+ TypeInfo *tyinfo = coninfo->condomain;
|
|
+
|
|
+ /* Ignore if not to be dumped separately */
|
|
+ if (coninfo->separate)
|
|
+ {
|
|
+ appendPQExpBuffer(q, "ALTER DOMAIN %s\n",
|
|
+ fmtId(tyinfo->dobj.name));
|
|
+ appendPQExpBuffer(q, " ADD CONSTRAINT %s %s;\n",
|
|
+ fmtId(coninfo->dobj.name),
|
|
+ coninfo->condef);
|
|
+
|
|
+ /*
|
|
+ * DROP must be fully qualified in case same name appears in
|
|
+ * pg_catalog
|
|
+ */
|
|
+ appendPQExpBuffer(delq, "ALTER DOMAIN %s.",
|
|
+ fmtId(tyinfo->dobj.namespace->dobj.name));
|
|
+ appendPQExpBuffer(delq, "%s ",
|
|
+ fmtId(tyinfo->dobj.name));
|
|
+ appendPQExpBuffer(delq, "DROP CONSTRAINT %s;\n",
|
|
+ fmtId(coninfo->dobj.name));
|
|
+
|
|
+ ArchiveEntry(fout, coninfo->dobj.catId, coninfo->dobj.dumpId,
|
|
+ coninfo->dobj.name,
|
|
+ tyinfo->dobj.namespace->dobj.name,
|
|
+ NULL,
|
|
+ tyinfo->rolname, false,
|
|
+ "CHECK CONSTRAINT", SECTION_POST_DATA,
|
|
+ q->data, delq->data, NULL,
|
|
+ NULL, 0,
|
|
+ NULL, NULL);
|
|
+ }
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ exit_horribly(NULL, "unrecognized constraint type: %c\n",
|
|
+ coninfo->contype);
|
|
+ }
|
|
+
|
|
+ /* Dump Constraint Comments --- only works for table constraints */
|
|
+ if (tbinfo && coninfo->separate)
|
|
+ dumpTableConstraintComment(fout, coninfo);
|
|
+
|
|
+ destroyPQExpBuffer(q);
|
|
+ destroyPQExpBuffer(delq);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * dumpTableConstraintComment --- dump a constraint's comment if any
|
|
+ *
|
|
+ * This is split out because we need the function in two different places
|
|
+ * depending on whether the constraint is dumped as part of CREATE TABLE
|
|
+ * or as a separate ALTER command.
|
|
+ */
|
|
+static void
|
|
+dumpTableConstraintComment(Archive *fout, ConstraintInfo *coninfo)
|
|
+{
|
|
+ TableInfo *tbinfo = coninfo->contable;
|
|
+ PQExpBuffer labelq = createPQExpBuffer();
|
|
+
|
|
+ appendPQExpBuffer(labelq, "CONSTRAINT %s ",
|
|
+ fmtId(coninfo->dobj.name));
|
|
+ appendPQExpBuffer(labelq, "ON %s",
|
|
+ fmtId(tbinfo->dobj.name));
|
|
+ dumpComment(fout, labelq->data,
|
|
+ tbinfo->dobj.namespace->dobj.name,
|
|
+ tbinfo->rolname,
|
|
+ coninfo->dobj.catId, 0,
|
|
+ coninfo->separate ? coninfo->dobj.dumpId : tbinfo->dobj.dumpId);
|
|
+
|
|
+ destroyPQExpBuffer(labelq);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * findLastBuiltInOid -
|
|
+ * find the last built in oid
|
|
+ *
|
|
+ * For 7.1 and 7.2, we do this by retrieving datlastsysoid from the
|
|
+ * pg_database entry for the current database
|
|
+ */
|
|
+static Oid
|
|
+findLastBuiltinOid_V71(Archive *fout, const char *dbname)
|
|
+{
|
|
+ PGresult *res;
|
|
+ Oid last_oid;
|
|
+ PQExpBuffer query = createPQExpBuffer();
|
|
+
|
|
+ resetPQExpBuffer(query);
|
|
+ appendPQExpBufferStr(query, "SELECT datlastsysoid from pg_database where datname = ");
|
|
+ appendStringLiteralAH(query, dbname, fout);
|
|
+
|
|
+ res = ExecuteSqlQueryForSingleRow(fout, query->data);
|
|
+ last_oid = atooid(PQgetvalue(res, 0, PQfnumber(res, "datlastsysoid")));
|
|
+ PQclear(res);
|
|
+ destroyPQExpBuffer(query);
|
|
+ return last_oid;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * findLastBuiltInOid -
|
|
+ * find the last built in oid
|
|
+ *
|
|
+ * For 7.0, we do this by assuming that the last thing that initdb does is to
|
|
+ * create the pg_indexes view. This sucks in general, but seeing that 7.0.x
|
|
+ * initdb won't be changing anymore, it'll do.
|
|
+ */
|
|
+static Oid
|
|
+findLastBuiltinOid_V70(Archive *fout)
|
|
+{
|
|
+ PGresult *res;
|
|
+ int last_oid;
|
|
+
|
|
+ res = ExecuteSqlQueryForSingleRow(fout,
|
|
+ "SELECT oid FROM pg_class WHERE relname = 'pg_indexes'");
|
|
+ last_oid = atooid(PQgetvalue(res, 0, PQfnumber(res, "oid")));
|
|
+ PQclear(res);
|
|
+ return last_oid;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * dumpSequence
|
|
+ * write the declaration (not data) of one user-defined sequence
|
|
+ */
|
|
+static void
|
|
+dumpSequence(Archive *fout, TableInfo *tbinfo)
|
|
+{
|
|
+ PGresult *res;
|
|
+ char *startv,
|
|
+ *incby,
|
|
+ *maxv = NULL,
|
|
+ *minv = NULL,
|
|
+ *cache;
|
|
+ char bufm[100],
|
|
+ bufx[100];
|
|
+ bool cycled;
|
|
+ PQExpBuffer query = createPQExpBuffer();
|
|
+ PQExpBuffer delqry = createPQExpBuffer();
|
|
+ PQExpBuffer labelq = createPQExpBuffer();
|
|
+
|
|
+ /* Make sure we are in proper schema */
|
|
+ selectSourceSchema(fout, tbinfo->dobj.namespace->dobj.name);
|
|
+
|
|
+ snprintf(bufm, sizeof(bufm), INT64_FORMAT, SEQ_MINVALUE);
|
|
+ snprintf(bufx, sizeof(bufx), INT64_FORMAT, SEQ_MAXVALUE);
|
|
+
|
|
+ if (fout->remoteVersion >= 80400)
|
|
+ {
|
|
+ appendPQExpBuffer(query,
|
|
+ "SELECT sequence_name, "
|
|
+ "start_value, increment_by, "
|
|
+ "CASE WHEN increment_by > 0 AND max_value = %s THEN NULL "
|
|
+ " WHEN increment_by < 0 AND max_value = -1 THEN NULL "
|
|
+ " ELSE max_value "
|
|
+ "END AS max_value, "
|
|
+ "CASE WHEN increment_by > 0 AND min_value = 1 THEN NULL "
|
|
+ " WHEN increment_by < 0 AND min_value = %s THEN NULL "
|
|
+ " ELSE min_value "
|
|
+ "END AS min_value, "
|
|
+ "cache_value, is_cycled FROM %s",
|
|
+ bufx, bufm,
|
|
+ fmtId(tbinfo->dobj.name));
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ appendPQExpBuffer(query,
|
|
+ "SELECT sequence_name, "
|
|
+ "0 AS start_value, increment_by, "
|
|
+ "CASE WHEN increment_by > 0 AND max_value = %s THEN NULL "
|
|
+ " WHEN increment_by < 0 AND max_value = -1 THEN NULL "
|
|
+ " ELSE max_value "
|
|
+ "END AS max_value, "
|
|
+ "CASE WHEN increment_by > 0 AND min_value = 1 THEN NULL "
|
|
+ " WHEN increment_by < 0 AND min_value = %s THEN NULL "
|
|
+ " ELSE min_value "
|
|
+ "END AS min_value, "
|
|
+ "cache_value, is_cycled FROM %s",
|
|
+ bufx, bufm,
|
|
+ fmtId(tbinfo->dobj.name));
|
|
+ }
|
|
+
|
|
+ res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
|
|
+
|
|
+ if (PQntuples(res) != 1)
|
|
+ {
|
|
+ write_msg(NULL, ngettext("query to get data of sequence \"%s\" returned %d row (expected 1)\n",
|
|
+ "query to get data of sequence \"%s\" returned %d rows (expected 1)\n",
|
|
+ PQntuples(res)),
|
|
+ tbinfo->dobj.name, PQntuples(res));
|
|
+ exit_nicely(1);
|
|
+ }
|
|
+
|
|
+ /* Disable this check: it fails if sequence has been renamed */
|
|
+#ifdef NOT_USED
|
|
+ if (strcmp(PQgetvalue(res, 0, 0), tbinfo->dobj.name) != 0)
|
|
+ {
|
|
+ write_msg(NULL, "query to get data of sequence \"%s\" returned name \"%s\"\n",
|
|
+ tbinfo->dobj.name, PQgetvalue(res, 0, 0));
|
|
+ exit_nicely(1);
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ startv = PQgetvalue(res, 0, 1);
|
|
+ incby = PQgetvalue(res, 0, 2);
|
|
+ if (!PQgetisnull(res, 0, 3))
|
|
+ maxv = PQgetvalue(res, 0, 3);
|
|
+ if (!PQgetisnull(res, 0, 4))
|
|
+ minv = PQgetvalue(res, 0, 4);
|
|
+ cache = PQgetvalue(res, 0, 5);
|
|
+ cycled = (strcmp(PQgetvalue(res, 0, 6), "t") == 0);
|
|
+
|
|
+ /*
|
|
+ * DROP must be fully qualified in case same name appears in pg_catalog
|
|
+ */
|
|
+ appendPQExpBuffer(delqry, "DROP SEQUENCE %s.",
|
|
+ fmtId(tbinfo->dobj.namespace->dobj.name));
|
|
+ appendPQExpBuffer(delqry, "%s;\n",
|
|
+ fmtId(tbinfo->dobj.name));
|
|
+
|
|
+ resetPQExpBuffer(query);
|
|
+
|
|
+ if (binary_upgrade)
|
|
+ {
|
|
+ binary_upgrade_set_pg_class_oids(fout, query,
|
|
+ tbinfo->dobj.catId.oid, false);
|
|
+ binary_upgrade_set_type_oids_by_rel_oid(fout, query,
|
|
+ tbinfo->dobj.catId.oid);
|
|
+ }
|
|
+
|
|
+ appendPQExpBuffer(query,
|
|
+ "CREATE SEQUENCE %s\n",
|
|
+ fmtId(tbinfo->dobj.name));
|
|
+
|
|
+ if (fout->remoteVersion >= 80400)
|
|
+ appendPQExpBuffer(query, " START WITH %s\n", startv);
|
|
+
|
|
+ appendPQExpBuffer(query, " INCREMENT BY %s\n", incby);
|
|
+
|
|
+ if (minv)
|
|
+ appendPQExpBuffer(query, " MINVALUE %s\n", minv);
|
|
+ else
|
|
+ appendPQExpBufferStr(query, " NO MINVALUE\n");
|
|
+
|
|
+ if (maxv)
|
|
+ appendPQExpBuffer(query, " MAXVALUE %s\n", maxv);
|
|
+ else
|
|
+ appendPQExpBufferStr(query, " NO MAXVALUE\n");
|
|
+
|
|
+ appendPQExpBuffer(query,
|
|
+ " CACHE %s%s",
|
|
+ cache, (cycled ? "\n CYCLE" : ""));
|
|
+
|
|
+ appendPQExpBufferStr(query, ";\n");
|
|
+
|
|
+ appendPQExpBuffer(labelq, "SEQUENCE %s", fmtId(tbinfo->dobj.name));
|
|
+
|
|
+ /* binary_upgrade: no need to clear TOAST table oid */
|
|
+
|
|
+ if (binary_upgrade)
|
|
+ binary_upgrade_extension_member(query, &tbinfo->dobj,
|
|
+ labelq->data);
|
|
+
|
|
+ ArchiveEntry(fout, tbinfo->dobj.catId, tbinfo->dobj.dumpId,
|
|
+ tbinfo->dobj.name,
|
|
+ tbinfo->dobj.namespace->dobj.name,
|
|
+ NULL,
|
|
+ tbinfo->rolname,
|
|
+ false, "SEQUENCE", SECTION_PRE_DATA,
|
|
+ query->data, delqry->data, NULL,
|
|
+ NULL, 0,
|
|
+ NULL, NULL);
|
|
+
|
|
+ /*
|
|
+ * If the sequence is owned by a table column, emit the ALTER for it as a
|
|
+ * separate TOC entry immediately following the sequence's own entry. It's
|
|
+ * OK to do this rather than using full sorting logic, because the
|
|
+ * dependency that tells us it's owned will have forced the table to be
|
|
+ * created first. We can't just include the ALTER in the TOC entry
|
|
+ * because it will fail if we haven't reassigned the sequence owner to
|
|
+ * match the table's owner.
|
|
+ *
|
|
+ * We need not schema-qualify the table reference because both sequence
|
|
+ * and table must be in the same schema.
|
|
+ */
|
|
+ if (OidIsValid(tbinfo->owning_tab))
|
|
+ {
|
|
+ TableInfo *owning_tab = findTableByOid(tbinfo->owning_tab);
|
|
+
|
|
+ if (owning_tab && owning_tab->dobj.dump)
|
|
+ {
|
|
+ resetPQExpBuffer(query);
|
|
+ appendPQExpBuffer(query, "ALTER SEQUENCE %s",
|
|
+ fmtId(tbinfo->dobj.name));
|
|
+ appendPQExpBuffer(query, " OWNED BY %s",
|
|
+ fmtId(owning_tab->dobj.name));
|
|
+ appendPQExpBuffer(query, ".%s;\n",
|
|
+ fmtId(owning_tab->attnames[tbinfo->owning_col - 1]));
|
|
+
|
|
+ ArchiveEntry(fout, nilCatalogId, createDumpId(),
|
|
+ tbinfo->dobj.name,
|
|
+ tbinfo->dobj.namespace->dobj.name,
|
|
+ NULL,
|
|
+ tbinfo->rolname,
|
|
+ false, "SEQUENCE OWNED BY", SECTION_PRE_DATA,
|
|
+ query->data, "", NULL,
|
|
+ &(tbinfo->dobj.dumpId), 1,
|
|
+ NULL, NULL);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* Dump Sequence Comments and Security Labels */
|
|
+ dumpComment(fout, labelq->data,
|
|
+ tbinfo->dobj.namespace->dobj.name, tbinfo->rolname,
|
|
+ tbinfo->dobj.catId, 0, tbinfo->dobj.dumpId);
|
|
+ dumpSecLabel(fout, labelq->data,
|
|
+ tbinfo->dobj.namespace->dobj.name, tbinfo->rolname,
|
|
+ tbinfo->dobj.catId, 0, tbinfo->dobj.dumpId);
|
|
+
|
|
+ PQclear(res);
|
|
+
|
|
+ destroyPQExpBuffer(query);
|
|
+ destroyPQExpBuffer(delqry);
|
|
+ destroyPQExpBuffer(labelq);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * dumpSequenceData
|
|
+ * write the data of one user-defined sequence
|
|
+ */
|
|
+static void
|
|
+dumpSequenceData(Archive *fout, TableDataInfo *tdinfo)
|
|
+{
|
|
+ TableInfo *tbinfo = tdinfo->tdtable;
|
|
+ PGresult *res;
|
|
+ char *last;
|
|
+ bool called;
|
|
+ PQExpBuffer query = createPQExpBuffer();
|
|
+
|
|
+ /* Make sure we are in proper schema */
|
|
+ selectSourceSchema(fout, tbinfo->dobj.namespace->dobj.name);
|
|
+
|
|
+ appendPQExpBuffer(query,
|
|
+ "SELECT last_value, is_called FROM %s",
|
|
+ fmtId(tbinfo->dobj.name));
|
|
+
|
|
+ res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
|
|
+
|
|
+ if (PQntuples(res) != 1)
|
|
+ {
|
|
+ write_msg(NULL, ngettext("query to get data of sequence \"%s\" returned %d row (expected 1)\n",
|
|
+ "query to get data of sequence \"%s\" returned %d rows (expected 1)\n",
|
|
+ PQntuples(res)),
|
|
+ tbinfo->dobj.name, PQntuples(res));
|
|
+ exit_nicely(1);
|
|
+ }
|
|
+
|
|
+ last = PQgetvalue(res, 0, 0);
|
|
+ called = (strcmp(PQgetvalue(res, 0, 1), "t") == 0);
|
|
+
|
|
+ resetPQExpBuffer(query);
|
|
+ appendPQExpBufferStr(query, "SELECT pg_catalog.setval(");
|
|
+ appendStringLiteralAH(query, fmtId(tbinfo->dobj.name), fout);
|
|
+ appendPQExpBuffer(query, ", %s, %s);\n",
|
|
+ last, (called ? "true" : "false"));
|
|
+
|
|
+ ArchiveEntry(fout, nilCatalogId, createDumpId(),
|
|
+ tbinfo->dobj.name,
|
|
+ tbinfo->dobj.namespace->dobj.name,
|
|
+ NULL,
|
|
+ tbinfo->rolname,
|
|
+ false, "SEQUENCE SET", SECTION_DATA,
|
|
+ query->data, "", NULL,
|
|
+ &(tbinfo->dobj.dumpId), 1,
|
|
+ NULL, NULL);
|
|
+
|
|
+ PQclear(res);
|
|
+
|
|
+ destroyPQExpBuffer(query);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * dumpTrigger
|
|
+ * write the declaration of one user-defined table trigger
|
|
+ */
|
|
+static void
|
|
+dumpTrigger(Archive *fout, TriggerInfo *tginfo)
|
|
+{
|
|
+ TableInfo *tbinfo = tginfo->tgtable;
|
|
+ PQExpBuffer query;
|
|
+ PQExpBuffer delqry;
|
|
+ PQExpBuffer labelq;
|
|
+ char *tgargs;
|
|
+ size_t lentgargs;
|
|
+ const char *p;
|
|
+ int findx;
|
|
+
|
|
+ /*
|
|
+ * we needn't check dobj.dump because TriggerInfo wouldn't have been
|
|
+ * created in the first place for non-dumpable triggers
|
|
+ */
|
|
+ if (dataOnly)
|
|
+ return;
|
|
+
|
|
+ query = createPQExpBuffer();
|
|
+ delqry = createPQExpBuffer();
|
|
+ labelq = createPQExpBuffer();
|
|
+
|
|
+ /*
|
|
+ * DROP must be fully qualified in case same name appears in pg_catalog
|
|
+ */
|
|
+ appendPQExpBuffer(delqry, "DROP TRIGGER %s ",
|
|
+ fmtId(tginfo->dobj.name));
|
|
+ appendPQExpBuffer(delqry, "ON %s.",
|
|
+ fmtId(tbinfo->dobj.namespace->dobj.name));
|
|
+ appendPQExpBuffer(delqry, "%s;\n",
|
|
+ fmtId(tbinfo->dobj.name));
|
|
+
|
|
+ if (tginfo->tgdef)
|
|
+ {
|
|
+ appendPQExpBuffer(query, "%s;\n", tginfo->tgdef);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ if (tginfo->tgisconstraint)
|
|
+ {
|
|
+ appendPQExpBufferStr(query, "CREATE CONSTRAINT TRIGGER ");
|
|
+ appendPQExpBufferStr(query, fmtId(tginfo->tgconstrname));
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ appendPQExpBufferStr(query, "CREATE TRIGGER ");
|
|
+ appendPQExpBufferStr(query, fmtId(tginfo->dobj.name));
|
|
+ }
|
|
+ appendPQExpBufferStr(query, "\n ");
|
|
+
|
|
+ /* Trigger type */
|
|
+ if (TRIGGER_FOR_BEFORE(tginfo->tgtype))
|
|
+ appendPQExpBufferStr(query, "BEFORE");
|
|
+ else if (TRIGGER_FOR_AFTER(tginfo->tgtype))
|
|
+ appendPQExpBufferStr(query, "AFTER");
|
|
+ else if (TRIGGER_FOR_INSTEAD(tginfo->tgtype))
|
|
+ appendPQExpBufferStr(query, "INSTEAD OF");
|
|
+ else
|
|
+ {
|
|
+ write_msg(NULL, "unexpected tgtype value: %d\n", tginfo->tgtype);
|
|
+ exit_nicely(1);
|
|
+ }
|
|
+
|
|
+ findx = 0;
|
|
+ if (TRIGGER_FOR_INSERT(tginfo->tgtype))
|
|
+ {
|
|
+ appendPQExpBufferStr(query, " INSERT");
|
|
+ findx++;
|
|
+ }
|
|
+ if (TRIGGER_FOR_DELETE(tginfo->tgtype))
|
|
+ {
|
|
+ if (findx > 0)
|
|
+ appendPQExpBufferStr(query, " OR DELETE");
|
|
+ else
|
|
+ appendPQExpBufferStr(query, " DELETE");
|
|
+ findx++;
|
|
+ }
|
|
+ if (TRIGGER_FOR_UPDATE(tginfo->tgtype))
|
|
+ {
|
|
+ if (findx > 0)
|
|
+ appendPQExpBufferStr(query, " OR UPDATE");
|
|
+ else
|
|
+ appendPQExpBufferStr(query, " UPDATE");
|
|
+ findx++;
|
|
+ }
|
|
+ if (TRIGGER_FOR_TRUNCATE(tginfo->tgtype))
|
|
+ {
|
|
+ if (findx > 0)
|
|
+ appendPQExpBufferStr(query, " OR TRUNCATE");
|
|
+ else
|
|
+ appendPQExpBufferStr(query, " TRUNCATE");
|
|
+ findx++;
|
|
+ }
|
|
+ appendPQExpBuffer(query, " ON %s\n",
|
|
+ fmtId(tbinfo->dobj.name));
|
|
+
|
|
+ if (tginfo->tgisconstraint)
|
|
+ {
|
|
+ if (OidIsValid(tginfo->tgconstrrelid))
|
|
+ {
|
|
+ /* If we are using regclass, name is already quoted */
|
|
+ if (fout->remoteVersion >= 70300)
|
|
+ appendPQExpBuffer(query, " FROM %s\n ",
|
|
+ tginfo->tgconstrrelname);
|
|
+ else
|
|
+ appendPQExpBuffer(query, " FROM %s\n ",
|
|
+ fmtId(tginfo->tgconstrrelname));
|
|
+ }
|
|
+ if (!tginfo->tgdeferrable)
|
|
+ appendPQExpBufferStr(query, "NOT ");
|
|
+ appendPQExpBufferStr(query, "DEFERRABLE INITIALLY ");
|
|
+ if (tginfo->tginitdeferred)
|
|
+ appendPQExpBufferStr(query, "DEFERRED\n");
|
|
+ else
|
|
+ appendPQExpBufferStr(query, "IMMEDIATE\n");
|
|
+ }
|
|
+
|
|
+ if (TRIGGER_FOR_ROW(tginfo->tgtype))
|
|
+ appendPQExpBufferStr(query, " FOR EACH ROW\n ");
|
|
+ else
|
|
+ appendPQExpBufferStr(query, " FOR EACH STATEMENT\n ");
|
|
+
|
|
+ /* In 7.3, result of regproc is already quoted */
|
|
+ if (fout->remoteVersion >= 70300)
|
|
+ appendPQExpBuffer(query, "EXECUTE PROCEDURE %s(",
|
|
+ tginfo->tgfname);
|
|
+ else
|
|
+ appendPQExpBuffer(query, "EXECUTE PROCEDURE %s(",
|
|
+ fmtId(tginfo->tgfname));
|
|
+
|
|
+ tgargs = (char *) PQunescapeBytea((unsigned char *) tginfo->tgargs,
|
|
+ &lentgargs);
|
|
+ p = tgargs;
|
|
+ for (findx = 0; findx < tginfo->tgnargs; findx++)
|
|
+ {
|
|
+ /* find the embedded null that terminates this trigger argument */
|
|
+ size_t tlen = strlen(p);
|
|
+
|
|
+ if (p + tlen >= tgargs + lentgargs)
|
|
+ {
|
|
+ /* hm, not found before end of bytea value... */
|
|
+ write_msg(NULL, "invalid argument string (%s) for trigger \"%s\" on table \"%s\"\n",
|
|
+ tginfo->tgargs,
|
|
+ tginfo->dobj.name,
|
|
+ tbinfo->dobj.name);
|
|
+ exit_nicely(1);
|
|
+ }
|
|
+
|
|
+ if (findx > 0)
|
|
+ appendPQExpBufferStr(query, ", ");
|
|
+ appendStringLiteralAH(query, p, fout);
|
|
+ p += tlen + 1;
|
|
+ }
|
|
+ free(tgargs);
|
|
+ appendPQExpBufferStr(query, ");\n");
|
|
+ }
|
|
+
|
|
+ if (tginfo->tgenabled != 't' && tginfo->tgenabled != 'O')
|
|
+ {
|
|
+ appendPQExpBuffer(query, "\nALTER TABLE %s ",
|
|
+ fmtId(tbinfo->dobj.name));
|
|
+ switch (tginfo->tgenabled)
|
|
+ {
|
|
+ case 'D':
|
|
+ case 'f':
|
|
+ appendPQExpBufferStr(query, "DISABLE");
|
|
+ break;
|
|
+ case 'A':
|
|
+ appendPQExpBufferStr(query, "ENABLE ALWAYS");
|
|
+ break;
|
|
+ case 'R':
|
|
+ appendPQExpBufferStr(query, "ENABLE REPLICA");
|
|
+ break;
|
|
+ default:
|
|
+ appendPQExpBufferStr(query, "ENABLE");
|
|
+ break;
|
|
+ }
|
|
+ appendPQExpBuffer(query, " TRIGGER %s;\n",
|
|
+ fmtId(tginfo->dobj.name));
|
|
+ }
|
|
+
|
|
+ appendPQExpBuffer(labelq, "TRIGGER %s ",
|
|
+ fmtId(tginfo->dobj.name));
|
|
+ appendPQExpBuffer(labelq, "ON %s",
|
|
+ fmtId(tbinfo->dobj.name));
|
|
+
|
|
+ ArchiveEntry(fout, tginfo->dobj.catId, tginfo->dobj.dumpId,
|
|
+ tginfo->dobj.name,
|
|
+ tbinfo->dobj.namespace->dobj.name,
|
|
+ NULL,
|
|
+ tbinfo->rolname, false,
|
|
+ "TRIGGER", SECTION_POST_DATA,
|
|
+ query->data, delqry->data, NULL,
|
|
+ NULL, 0,
|
|
+ NULL, NULL);
|
|
+
|
|
+ dumpComment(fout, labelq->data,
|
|
+ tbinfo->dobj.namespace->dobj.name, tbinfo->rolname,
|
|
+ tginfo->dobj.catId, 0, tginfo->dobj.dumpId);
|
|
+
|
|
+ destroyPQExpBuffer(query);
|
|
+ destroyPQExpBuffer(delqry);
|
|
+ destroyPQExpBuffer(labelq);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * dumpEventTrigger
|
|
+ * write the declaration of one user-defined event trigger
|
|
+ */
|
|
+static void
|
|
+dumpEventTrigger(Archive *fout, EventTriggerInfo *evtinfo)
|
|
+{
|
|
+ PQExpBuffer query;
|
|
+ PQExpBuffer labelq;
|
|
+
|
|
+ /* Skip if not to be dumped */
|
|
+ if (!evtinfo->dobj.dump || dataOnly)
|
|
+ return;
|
|
+
|
|
+ query = createPQExpBuffer();
|
|
+ labelq = createPQExpBuffer();
|
|
+
|
|
+ appendPQExpBufferStr(query, "CREATE EVENT TRIGGER ");
|
|
+ appendPQExpBufferStr(query, fmtId(evtinfo->dobj.name));
|
|
+ appendPQExpBufferStr(query, " ON ");
|
|
+ appendPQExpBufferStr(query, fmtId(evtinfo->evtevent));
|
|
+
|
|
+ if (strcmp("", evtinfo->evttags) != 0)
|
|
+ {
|
|
+ appendPQExpBufferStr(query, "\n WHEN TAG IN (");
|
|
+ appendPQExpBufferStr(query, evtinfo->evttags);
|
|
+ appendPQExpBufferChar(query, ')');
|
|
+ }
|
|
+
|
|
+ appendPQExpBufferStr(query, "\n EXECUTE PROCEDURE ");
|
|
+ appendPQExpBufferStr(query, evtinfo->evtfname);
|
|
+ appendPQExpBufferStr(query, "();\n");
|
|
+
|
|
+ if (evtinfo->evtenabled != 'O')
|
|
+ {
|
|
+ appendPQExpBuffer(query, "\nALTER EVENT TRIGGER %s ",
|
|
+ fmtId(evtinfo->dobj.name));
|
|
+ switch (evtinfo->evtenabled)
|
|
+ {
|
|
+ case 'D':
|
|
+ appendPQExpBufferStr(query, "DISABLE");
|
|
+ break;
|
|
+ case 'A':
|
|
+ appendPQExpBufferStr(query, "ENABLE ALWAYS");
|
|
+ break;
|
|
+ case 'R':
|
|
+ appendPQExpBufferStr(query, "ENABLE REPLICA");
|
|
+ break;
|
|
+ default:
|
|
+ appendPQExpBufferStr(query, "ENABLE");
|
|
+ break;
|
|
+ }
|
|
+ appendPQExpBufferStr(query, ";\n");
|
|
+ }
|
|
+ appendPQExpBuffer(labelq, "EVENT TRIGGER %s",
|
|
+ fmtId(evtinfo->dobj.name));
|
|
+
|
|
+ ArchiveEntry(fout, evtinfo->dobj.catId, evtinfo->dobj.dumpId,
|
|
+ evtinfo->dobj.name, NULL, NULL, evtinfo->evtowner, false,
|
|
+ "EVENT TRIGGER", SECTION_POST_DATA,
|
|
+ query->data, "", NULL, NULL, 0, NULL, NULL);
|
|
+
|
|
+ dumpComment(fout, labelq->data,
|
|
+ NULL, evtinfo->evtowner,
|
|
+ evtinfo->dobj.catId, 0, evtinfo->dobj.dumpId);
|
|
+
|
|
+ destroyPQExpBuffer(query);
|
|
+ destroyPQExpBuffer(labelq);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * dumpRule
|
|
+ * Dump a rule
|
|
+ */
|
|
+static void
|
|
+dumpRule(Archive *fout, RuleInfo *rinfo)
|
|
+{
|
|
+ TableInfo *tbinfo = rinfo->ruletable;
|
|
+ PQExpBuffer query;
|
|
+ PQExpBuffer cmd;
|
|
+ PQExpBuffer delcmd;
|
|
+ PQExpBuffer labelq;
|
|
+ PGresult *res;
|
|
+
|
|
+ /* Skip if not to be dumped */
|
|
+ if (!rinfo->dobj.dump || dataOnly)
|
|
+ return;
|
|
+
|
|
+ /*
|
|
+ * If it is an ON SELECT rule that is created implicitly by CREATE VIEW,
|
|
+ * we do not want to dump it as a separate object.
|
|
+ */
|
|
+ if (!rinfo->separate)
|
|
+ return;
|
|
+
|
|
+ /*
|
|
+ * Make sure we are in proper schema.
|
|
+ */
|
|
+ selectSourceSchema(fout, tbinfo->dobj.namespace->dobj.name);
|
|
+
|
|
+ query = createPQExpBuffer();
|
|
+ cmd = createPQExpBuffer();
|
|
+ delcmd = createPQExpBuffer();
|
|
+ labelq = createPQExpBuffer();
|
|
+
|
|
+ if (fout->remoteVersion >= 70300)
|
|
+ {
|
|
+ appendPQExpBuffer(query,
|
|
+ "SELECT pg_catalog.pg_get_ruledef('%u'::pg_catalog.oid) AS definition",
|
|
+ rinfo->dobj.catId.oid);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ /* Rule name was unique before 7.3 ... */
|
|
+ appendPQExpBuffer(query,
|
|
+ "SELECT pg_get_ruledef('%s') AS definition",
|
|
+ rinfo->dobj.name);
|
|
+ }
|
|
+
|
|
+ res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
|
|
+
|
|
+ if (PQntuples(res) != 1)
|
|
+ {
|
|
+ write_msg(NULL, "query to get rule \"%s\" for table \"%s\" failed: wrong number of rows returned\n",
|
|
+ rinfo->dobj.name, tbinfo->dobj.name);
|
|
+ exit_nicely(1);
|
|
+ }
|
|
+
|
|
+ printfPQExpBuffer(cmd, "%s\n", PQgetvalue(res, 0, 0));
|
|
+
|
|
+ /*
|
|
+ * Add the command to alter the rules replication firing semantics if it
|
|
+ * differs from the default.
|
|
+ */
|
|
+ if (rinfo->ev_enabled != 'O')
|
|
+ {
|
|
+ appendPQExpBuffer(cmd, "ALTER TABLE %s ", fmtId(tbinfo->dobj.name));
|
|
+ switch (rinfo->ev_enabled)
|
|
+ {
|
|
+ case 'A':
|
|
+ appendPQExpBuffer(cmd, "ENABLE ALWAYS RULE %s;\n",
|
|
+ fmtId(rinfo->dobj.name));
|
|
+ break;
|
|
+ case 'R':
|
|
+ appendPQExpBuffer(cmd, "ENABLE REPLICA RULE %s;\n",
|
|
+ fmtId(rinfo->dobj.name));
|
|
+ break;
|
|
+ case 'D':
|
|
+ appendPQExpBuffer(cmd, "DISABLE RULE %s;\n",
|
|
+ fmtId(rinfo->dobj.name));
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Apply view's reloptions when its ON SELECT rule is separate.
|
|
+ */
|
|
+ if (rinfo->reloptions && strlen(rinfo->reloptions) > 0)
|
|
+ {
|
|
+ appendPQExpBuffer(cmd, "ALTER VIEW %s SET (%s);\n",
|
|
+ fmtId(tbinfo->dobj.name),
|
|
+ rinfo->reloptions);
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * DROP must be fully qualified in case same name appears in pg_catalog
|
|
+ */
|
|
+ appendPQExpBuffer(delcmd, "DROP RULE %s ",
|
|
+ fmtId(rinfo->dobj.name));
|
|
+ appendPQExpBuffer(delcmd, "ON %s.",
|
|
+ fmtId(tbinfo->dobj.namespace->dobj.name));
|
|
+ appendPQExpBuffer(delcmd, "%s;\n",
|
|
+ fmtId(tbinfo->dobj.name));
|
|
+
|
|
+ appendPQExpBuffer(labelq, "RULE %s",
|
|
+ fmtId(rinfo->dobj.name));
|
|
+ appendPQExpBuffer(labelq, " ON %s",
|
|
+ fmtId(tbinfo->dobj.name));
|
|
+
|
|
+ ArchiveEntry(fout, rinfo->dobj.catId, rinfo->dobj.dumpId,
|
|
+ rinfo->dobj.name,
|
|
+ tbinfo->dobj.namespace->dobj.name,
|
|
+ NULL,
|
|
+ tbinfo->rolname, false,
|
|
+ "RULE", SECTION_POST_DATA,
|
|
+ cmd->data, delcmd->data, NULL,
|
|
+ NULL, 0,
|
|
+ NULL, NULL);
|
|
+
|
|
+ /* Dump rule comments */
|
|
+ dumpComment(fout, labelq->data,
|
|
+ tbinfo->dobj.namespace->dobj.name,
|
|
+ tbinfo->rolname,
|
|
+ rinfo->dobj.catId, 0, rinfo->dobj.dumpId);
|
|
+
|
|
+ PQclear(res);
|
|
+
|
|
+ destroyPQExpBuffer(query);
|
|
+ destroyPQExpBuffer(cmd);
|
|
+ destroyPQExpBuffer(delcmd);
|
|
+ destroyPQExpBuffer(labelq);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * getExtensionMembership --- obtain extension membership data
|
|
+ *
|
|
+ * There are three main parts to this process:
|
|
+ *
|
|
+ * 1. Identify objects which are members of extensions
|
|
+ *
|
|
+ * Generally speaking, this is to mark them as *not* being dumped, as most
|
|
+ * extension objects are created by the single CREATE EXTENSION command.
|
|
+ * The one exception is binary upgrades with pg_upgrade will still dump the
|
|
+ * non-table objects.
|
|
+ *
|
|
+ * 2. Identify and create dump records for extension configuration tables.
|
|
+ *
|
|
+ * Extensions can mark tables as "configuration", which means that the user
|
|
+ * is able and expected to modify those tables after the extension has been
|
|
+ * loaded. For these tables, we dump out only the data- the structure is
|
|
+ * expected to be handled at CREATE EXTENSION time, including any indexes or
|
|
+ * foriegn keys, which brings us to-
|
|
+ *
|
|
+ * 3. Record FK dependencies between configuration tables.
|
|
+ *
|
|
+ * Due to the FKs being created at CREATE EXTENSION time and therefore before
|
|
+ * the data is loaded, we have to work out what the best order for reloading
|
|
+ * the data is, to avoid FK violations when the tables are restored. This is
|
|
+ * not perfect- we can't handle circular dependencies and if any exist they
|
|
+ * will cause an invalid dump to be produced (though at least all of the data
|
|
+ * is included for a user to manually restore). This is currently documented
|
|
+ * but perhaps we can provide a better solution in the future.
|
|
+ */
|
|
+void
|
|
+getExtensionMembership(Archive *fout, ExtensionInfo extinfo[],
|
|
+ int numExtensions)
|
|
+{
|
|
+ PQExpBuffer query;
|
|
+ PGresult *res;
|
|
+ int ntups,
|
|
+ i;
|
|
+ int i_classid,
|
|
+ i_objid,
|
|
+ i_refclassid,
|
|
+ i_refobjid,
|
|
+ i_conrelid,
|
|
+ i_confrelid;
|
|
+ DumpableObject *dobj,
|
|
+ *refdobj;
|
|
+
|
|
+ /* Nothing to do if no extensions */
|
|
+ if (numExtensions == 0)
|
|
+ return;
|
|
+
|
|
+ /* Make sure we are in proper schema */
|
|
+ selectSourceSchema(fout, "pg_catalog");
|
|
+
|
|
+ query = createPQExpBuffer();
|
|
+
|
|
+ /* refclassid constraint is redundant but may speed the search */
|
|
+ appendPQExpBufferStr(query, "SELECT "
|
|
+ "classid, objid, refclassid, refobjid "
|
|
+ "FROM pg_depend "
|
|
+ "WHERE refclassid = 'pg_extension'::regclass "
|
|
+ "AND deptype = 'e' "
|
|
+ "ORDER BY 3,4");
|
|
+
|
|
+ res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
|
|
+
|
|
+ ntups = PQntuples(res);
|
|
+
|
|
+ i_classid = PQfnumber(res, "classid");
|
|
+ i_objid = PQfnumber(res, "objid");
|
|
+ i_refclassid = PQfnumber(res, "refclassid");
|
|
+ i_refobjid = PQfnumber(res, "refobjid");
|
|
+
|
|
+ /*
|
|
+ * Since we ordered the SELECT by referenced ID, we can expect that
|
|
+ * multiple entries for the same extension will appear together; this
|
|
+ * saves on searches.
|
|
+ */
|
|
+ refdobj = NULL;
|
|
+
|
|
+ for (i = 0; i < ntups; i++)
|
|
+ {
|
|
+ CatalogId objId;
|
|
+ CatalogId refobjId;
|
|
+
|
|
+ objId.tableoid = atooid(PQgetvalue(res, i, i_classid));
|
|
+ objId.oid = atooid(PQgetvalue(res, i, i_objid));
|
|
+ refobjId.tableoid = atooid(PQgetvalue(res, i, i_refclassid));
|
|
+ refobjId.oid = atooid(PQgetvalue(res, i, i_refobjid));
|
|
+
|
|
+ if (refdobj == NULL ||
|
|
+ refdobj->catId.tableoid != refobjId.tableoid ||
|
|
+ refdobj->catId.oid != refobjId.oid)
|
|
+ refdobj = findObjectByCatalogId(refobjId);
|
|
+
|
|
+ /*
|
|
+ * Failure to find objects mentioned in pg_depend is not unexpected,
|
|
+ * since for example we don't collect info about TOAST tables.
|
|
+ */
|
|
+ if (refdobj == NULL)
|
|
+ {
|
|
+#ifdef NOT_USED
|
|
+ fprintf(stderr, "no referenced object %u %u\n",
|
|
+ refobjId.tableoid, refobjId.oid);
|
|
+#endif
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ dobj = findObjectByCatalogId(objId);
|
|
+
|
|
+ if (dobj == NULL)
|
|
+ {
|
|
+#ifdef NOT_USED
|
|
+ fprintf(stderr, "no referencing object %u %u\n",
|
|
+ objId.tableoid, objId.oid);
|
|
+#endif
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ /* Record dependency so that getDependencies needn't repeat this */
|
|
+ addObjectDependency(dobj, refdobj->dumpId);
|
|
+
|
|
+ dobj->ext_member = true;
|
|
+
|
|
+ /*
|
|
+ * Normally, mark the member object as not to be dumped. But in
|
|
+ * binary upgrades, we still dump the members individually, since the
|
|
+ * idea is to exactly reproduce the database contents rather than
|
|
+ * replace the extension contents with something different.
|
|
+ */
|
|
+ if (!binary_upgrade)
|
|
+ dobj->dump = false;
|
|
+ else
|
|
+ dobj->dump = refdobj->dump;
|
|
+ }
|
|
+
|
|
+ PQclear(res);
|
|
+
|
|
+ /*
|
|
+ * Now identify extension configuration tables and create TableDataInfo
|
|
+ * objects for them, ensuring their data will be dumped even though the
|
|
+ * tables themselves won't be.
|
|
+ *
|
|
+ * Note that we create TableDataInfo objects even in schemaOnly mode, ie,
|
|
+ * user data in a configuration table is treated like schema data. This
|
|
+ * seems appropriate since system data in a config table would get
|
|
+ * reloaded by CREATE EXTENSION.
|
|
+ */
|
|
+ for (i = 0; i < numExtensions; i++)
|
|
+ {
|
|
+ ExtensionInfo *curext = &(extinfo[i]);
|
|
+ char *extconfig = curext->extconfig;
|
|
+ char *extcondition = curext->extcondition;
|
|
+ char **extconfigarray = NULL;
|
|
+ char **extconditionarray = NULL;
|
|
+ int nconfigitems;
|
|
+ int nconditionitems;
|
|
+
|
|
+ if (parsePGArray(extconfig, &extconfigarray, &nconfigitems) &&
|
|
+ parsePGArray(extcondition, &extconditionarray, &nconditionitems) &&
|
|
+ nconfigitems == nconditionitems)
|
|
+ {
|
|
+ int j;
|
|
+
|
|
+ for (j = 0; j < nconfigitems; j++)
|
|
+ {
|
|
+ TableInfo *configtbl;
|
|
+ Oid configtbloid = atooid(extconfigarray[j]);
|
|
+ bool dumpobj = curext->dobj.dump;
|
|
+
|
|
+ configtbl = findTableByOid(configtbloid);
|
|
+ if (configtbl == NULL)
|
|
+ continue;
|
|
+
|
|
+ /*
|
|
+ * Tables of not-to-be-dumped extensions shouldn't be dumped
|
|
+ * unless the table or its schema is explicitly included
|
|
+ */
|
|
+ if (!curext->dobj.dump)
|
|
+ {
|
|
+ /* check table explicitly requested */
|
|
+ if (table_include_oids.head != NULL &&
|
|
+ simple_oid_list_member(&table_include_oids,
|
|
+ configtbloid))
|
|
+ dumpobj = true;
|
|
+
|
|
+ /* check table's schema explicitly requested */
|
|
+ if (configtbl->dobj.namespace->dobj.dump)
|
|
+ dumpobj = true;
|
|
+ }
|
|
+
|
|
+ /* check table excluded by an exclusion switch */
|
|
+ if (table_exclude_oids.head != NULL &&
|
|
+ simple_oid_list_member(&table_exclude_oids,
|
|
+ configtbloid))
|
|
+ dumpobj = false;
|
|
+
|
|
+ /* check schema excluded by an exclusion switch */
|
|
+ if (simple_oid_list_member(&schema_exclude_oids,
|
|
+ configtbl->dobj.namespace->dobj.catId.oid))
|
|
+ dumpobj = false;
|
|
+
|
|
+ if (dumpobj)
|
|
+ {
|
|
+ /*
|
|
+ * Note: config tables are dumped without OIDs regardless
|
|
+ * of the --oids setting. This is because row filtering
|
|
+ * conditions aren't compatible with dumping OIDs.
|
|
+ */
|
|
+ makeTableDataInfo(configtbl, false);
|
|
+ if (configtbl->dataObj != NULL)
|
|
+ {
|
|
+ if (strlen(extconditionarray[j]) > 0)
|
|
+ configtbl->dataObj->filtercond = pg_strdup(extconditionarray[j]);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ if (extconfigarray)
|
|
+ free(extconfigarray);
|
|
+ if (extconditionarray)
|
|
+ free(extconditionarray);
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Now that all the TableInfoData objects have been created for all
|
|
+ * the extensions, check their FK dependencies and register them to
|
|
+ * try and dump the data out in an order which they can be restored
|
|
+ * in.
|
|
+ *
|
|
+ * Note that this is not a problem for user tables as their FKs are
|
|
+ * recreated after the data has been loaded.
|
|
+ */
|
|
+ printfPQExpBuffer(query,
|
|
+ "SELECT conrelid, confrelid "
|
|
+ "FROM pg_constraint "
|
|
+ "JOIN pg_depend ON (objid = confrelid) "
|
|
+ "WHERE contype = 'f' "
|
|
+ "AND refclassid = 'pg_extension'::regclass "
|
|
+ "AND classid = 'pg_class'::regclass;");
|
|
+
|
|
+ res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
|
|
+ ntups = PQntuples(res);
|
|
+
|
|
+ i_conrelid = PQfnumber(res, "conrelid");
|
|
+ i_confrelid = PQfnumber(res, "confrelid");
|
|
+
|
|
+ /* Now get the dependencies and register them */
|
|
+ for (i = 0; i < ntups; i++)
|
|
+ {
|
|
+ Oid conrelid, confrelid;
|
|
+ TableInfo *reftable, *contable;
|
|
+
|
|
+ conrelid = atooid(PQgetvalue(res, i, i_conrelid));
|
|
+ confrelid = atooid(PQgetvalue(res, i, i_confrelid));
|
|
+ contable = findTableByOid(conrelid);
|
|
+ reftable = findTableByOid(confrelid);
|
|
+
|
|
+ if (reftable == NULL ||
|
|
+ reftable->dataObj == NULL ||
|
|
+ contable == NULL ||
|
|
+ contable->dataObj == NULL)
|
|
+ continue;
|
|
+
|
|
+ /*
|
|
+ * Make referencing TABLE_DATA object depend on the
|
|
+ * referenced table's TABLE_DATA object.
|
|
+ */
|
|
+ addObjectDependency(&contable->dataObj->dobj,
|
|
+ reftable->dataObj->dobj.dumpId);
|
|
+ }
|
|
+ PQclear(res);
|
|
+ destroyPQExpBuffer(query);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * getDependencies --- obtain available dependency data
|
|
+ */
|
|
+static void
|
|
+getDependencies(Archive *fout)
|
|
+{
|
|
+ PQExpBuffer query;
|
|
+ PGresult *res;
|
|
+ int ntups,
|
|
+ i;
|
|
+ int i_classid,
|
|
+ i_objid,
|
|
+ i_refclassid,
|
|
+ i_refobjid,
|
|
+ i_deptype;
|
|
+ DumpableObject *dobj,
|
|
+ *refdobj;
|
|
+
|
|
+ /* No dependency info available before 7.3 */
|
|
+ if (fout->remoteVersion < 70300)
|
|
+ return;
|
|
+
|
|
+ if (g_verbose)
|
|
+ write_msg(NULL, "reading dependency data\n");
|
|
+
|
|
+ /* Make sure we are in proper schema */
|
|
+ selectSourceSchema(fout, "pg_catalog");
|
|
+
|
|
+ query = createPQExpBuffer();
|
|
+
|
|
+ /*
|
|
+ * PIN dependencies aren't interesting, and EXTENSION dependencies were
|
|
+ * already processed by getExtensionMembership.
|
|
+ */
|
|
+ appendPQExpBufferStr(query, "SELECT "
|
|
+ "classid, objid, refclassid, refobjid, deptype "
|
|
+ "FROM pg_depend "
|
|
+ "WHERE deptype != 'p' AND deptype != 'e' "
|
|
+ "ORDER BY 1,2");
|
|
+
|
|
+ res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
|
|
+
|
|
+ ntups = PQntuples(res);
|
|
+
|
|
+ i_classid = PQfnumber(res, "classid");
|
|
+ i_objid = PQfnumber(res, "objid");
|
|
+ i_refclassid = PQfnumber(res, "refclassid");
|
|
+ i_refobjid = PQfnumber(res, "refobjid");
|
|
+ i_deptype = PQfnumber(res, "deptype");
|
|
+
|
|
+ /*
|
|
+ * Since we ordered the SELECT by referencing ID, we can expect that
|
|
+ * multiple entries for the same object will appear together; this saves
|
|
+ * on searches.
|
|
+ */
|
|
+ dobj = NULL;
|
|
+
|
|
+ for (i = 0; i < ntups; i++)
|
|
+ {
|
|
+ CatalogId objId;
|
|
+ CatalogId refobjId;
|
|
+ char deptype;
|
|
+
|
|
+ objId.tableoid = atooid(PQgetvalue(res, i, i_classid));
|
|
+ objId.oid = atooid(PQgetvalue(res, i, i_objid));
|
|
+ refobjId.tableoid = atooid(PQgetvalue(res, i, i_refclassid));
|
|
+ refobjId.oid = atooid(PQgetvalue(res, i, i_refobjid));
|
|
+ deptype = *(PQgetvalue(res, i, i_deptype));
|
|
+
|
|
+ if (dobj == NULL ||
|
|
+ dobj->catId.tableoid != objId.tableoid ||
|
|
+ dobj->catId.oid != objId.oid)
|
|
+ dobj = findObjectByCatalogId(objId);
|
|
+
|
|
+ /*
|
|
+ * Failure to find objects mentioned in pg_depend is not unexpected,
|
|
+ * since for example we don't collect info about TOAST tables.
|
|
+ */
|
|
+ if (dobj == NULL)
|
|
+ {
|
|
+#ifdef NOT_USED
|
|
+ fprintf(stderr, "no referencing object %u %u\n",
|
|
+ objId.tableoid, objId.oid);
|
|
+#endif
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ refdobj = findObjectByCatalogId(refobjId);
|
|
+
|
|
+ if (refdobj == NULL)
|
|
+ {
|
|
+#ifdef NOT_USED
|
|
+ fprintf(stderr, "no referenced object %u %u\n",
|
|
+ refobjId.tableoid, refobjId.oid);
|
|
+#endif
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Ordinarily, table rowtypes have implicit dependencies on their
|
|
+ * tables. However, for a composite type the implicit dependency goes
|
|
+ * the other way in pg_depend; which is the right thing for DROP but
|
|
+ * it doesn't produce the dependency ordering we need. So in that one
|
|
+ * case, we reverse the direction of the dependency.
|
|
+ */
|
|
+ if (deptype == 'i' &&
|
|
+ dobj->objType == DO_TABLE &&
|
|
+ refdobj->objType == DO_TYPE)
|
|
+ addObjectDependency(refdobj, dobj->dumpId);
|
|
+ else
|
|
+ /* normal case */
|
|
+ addObjectDependency(dobj, refdobj->dumpId);
|
|
+ }
|
|
+
|
|
+ PQclear(res);
|
|
+
|
|
+ destroyPQExpBuffer(query);
|
|
+}
|
|
+
|
|
+
|
|
+/*
|
|
+ * createBoundaryObjects - create dummy DumpableObjects to represent
|
|
+ * dump section boundaries.
|
|
+ */
|
|
+static DumpableObject *
|
|
+createBoundaryObjects(void)
|
|
+{
|
|
+ DumpableObject *dobjs;
|
|
+
|
|
+ dobjs = (DumpableObject *) pg_malloc(2 * sizeof(DumpableObject));
|
|
+
|
|
+ dobjs[0].objType = DO_PRE_DATA_BOUNDARY;
|
|
+ dobjs[0].catId = nilCatalogId;
|
|
+ AssignDumpId(dobjs + 0);
|
|
+ dobjs[0].name = pg_strdup("PRE-DATA BOUNDARY");
|
|
+
|
|
+ dobjs[1].objType = DO_POST_DATA_BOUNDARY;
|
|
+ dobjs[1].catId = nilCatalogId;
|
|
+ AssignDumpId(dobjs + 1);
|
|
+ dobjs[1].name = pg_strdup("POST-DATA BOUNDARY");
|
|
+
|
|
+ return dobjs;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * addBoundaryDependencies - add dependencies as needed to enforce the dump
|
|
+ * section boundaries.
|
|
+ */
|
|
+static void
|
|
+addBoundaryDependencies(DumpableObject **dobjs, int numObjs,
|
|
+ DumpableObject *boundaryObjs)
|
|
+{
|
|
+ DumpableObject *preDataBound = boundaryObjs + 0;
|
|
+ DumpableObject *postDataBound = boundaryObjs + 1;
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < numObjs; i++)
|
|
+ {
|
|
+ DumpableObject *dobj = dobjs[i];
|
|
+
|
|
+ /*
|
|
+ * The classification of object types here must match the SECTION_xxx
|
|
+ * values assigned during subsequent ArchiveEntry calls!
|
|
+ */
|
|
+ switch (dobj->objType)
|
|
+ {
|
|
+ case DO_NAMESPACE:
|
|
+ case DO_EXTENSION:
|
|
+ case DO_TYPE:
|
|
+ case DO_SHELL_TYPE:
|
|
+ case DO_FUNC:
|
|
+ case DO_AGG:
|
|
+ case DO_OPERATOR:
|
|
+ case DO_OPCLASS:
|
|
+ case DO_OPFAMILY:
|
|
+ case DO_COLLATION:
|
|
+ case DO_CONVERSION:
|
|
+ case DO_TABLE:
|
|
+ case DO_ATTRDEF:
|
|
+ case DO_PROCLANG:
|
|
+ case DO_CAST:
|
|
+ case DO_DUMMY_TYPE:
|
|
+ case DO_TSPARSER:
|
|
+ case DO_TSDICT:
|
|
+ case DO_TSTEMPLATE:
|
|
+ case DO_TSCONFIG:
|
|
+ case DO_FDW:
|
|
+ case DO_FOREIGN_SERVER:
|
|
+ case DO_BLOB:
|
|
+ /* Pre-data objects: must come before the pre-data boundary */
|
|
+ addObjectDependency(preDataBound, dobj->dumpId);
|
|
+ break;
|
|
+ case DO_TABLE_DATA:
|
|
+ case DO_BLOB_DATA:
|
|
+ /* Data objects: must come between the boundaries */
|
|
+ addObjectDependency(dobj, preDataBound->dumpId);
|
|
+ addObjectDependency(postDataBound, dobj->dumpId);
|
|
+ break;
|
|
+ case DO_INDEX:
|
|
+ case DO_REFRESH_MATVIEW:
|
|
+ case DO_TRIGGER:
|
|
+ case DO_EVENT_TRIGGER:
|
|
+ case DO_DEFAULT_ACL:
|
|
+ /* Post-data objects: must come after the post-data boundary */
|
|
+ addObjectDependency(dobj, postDataBound->dumpId);
|
|
+ break;
|
|
+ case DO_RULE:
|
|
+ /* Rules are post-data, but only if dumped separately */
|
|
+ if (((RuleInfo *) dobj)->separate)
|
|
+ addObjectDependency(dobj, postDataBound->dumpId);
|
|
+ break;
|
|
+ case DO_CONSTRAINT:
|
|
+ case DO_FK_CONSTRAINT:
|
|
+ /* Constraints are post-data, but only if dumped separately */
|
|
+ if (((ConstraintInfo *) dobj)->separate)
|
|
+ addObjectDependency(dobj, postDataBound->dumpId);
|
|
+ break;
|
|
+ case DO_PRE_DATA_BOUNDARY:
|
|
+ /* nothing to do */
|
|
+ break;
|
|
+ case DO_POST_DATA_BOUNDARY:
|
|
+ /* must come after the pre-data boundary */
|
|
+ addObjectDependency(dobj, preDataBound->dumpId);
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+
|
|
+/*
|
|
+ * BuildArchiveDependencies - create dependency data for archive TOC entries
|
|
+ *
|
|
+ * The raw dependency data obtained by getDependencies() is not terribly
|
|
+ * useful in an archive dump, because in many cases there are dependency
|
|
+ * chains linking through objects that don't appear explicitly in the dump.
|
|
+ * For example, a view will depend on its _RETURN rule while the _RETURN rule
|
|
+ * will depend on other objects --- but the rule will not appear as a separate
|
|
+ * object in the dump. We need to adjust the view's dependencies to include
|
|
+ * whatever the rule depends on that is included in the dump.
|
|
+ *
|
|
+ * Just to make things more complicated, there are also "special" dependencies
|
|
+ * such as the dependency of a TABLE DATA item on its TABLE, which we must
|
|
+ * not rearrange because pg_restore knows that TABLE DATA only depends on
|
|
+ * its table. In these cases we must leave the dependencies strictly as-is
|
|
+ * even if they refer to not-to-be-dumped objects.
|
|
+ *
|
|
+ * To handle this, the convention is that "special" dependencies are created
|
|
+ * during ArchiveEntry calls, and an archive TOC item that has any such
|
|
+ * entries will not be touched here. Otherwise, we recursively search the
|
|
+ * DumpableObject data structures to build the correct dependencies for each
|
|
+ * archive TOC item.
|
|
+ */
|
|
+static void
|
|
+BuildArchiveDependencies(Archive *fout)
|
|
+{
|
|
+ ArchiveHandle *AH = (ArchiveHandle *) fout;
|
|
+ TocEntry *te;
|
|
+
|
|
+ /* Scan all TOC entries in the archive */
|
|
+ for (te = AH->toc->next; te != AH->toc; te = te->next)
|
|
+ {
|
|
+ DumpableObject *dobj;
|
|
+ DumpId *dependencies;
|
|
+ int nDeps;
|
|
+ int allocDeps;
|
|
+
|
|
+ /* No need to process entries that will not be dumped */
|
|
+ if (te->reqs == 0)
|
|
+ continue;
|
|
+ /* Ignore entries that already have "special" dependencies */
|
|
+ if (te->nDeps > 0)
|
|
+ continue;
|
|
+ /* Otherwise, look up the item's original DumpableObject, if any */
|
|
+ dobj = findObjectByDumpId(te->dumpId);
|
|
+ if (dobj == NULL)
|
|
+ continue;
|
|
+ /* No work if it has no dependencies */
|
|
+ if (dobj->nDeps <= 0)
|
|
+ continue;
|
|
+ /* Set up work array */
|
|
+ allocDeps = 64;
|
|
+ dependencies = (DumpId *) pg_malloc(allocDeps * sizeof(DumpId));
|
|
+ nDeps = 0;
|
|
+ /* Recursively find all dumpable dependencies */
|
|
+ findDumpableDependencies(AH, dobj,
|
|
+ &dependencies, &nDeps, &allocDeps);
|
|
+ /* And save 'em ... */
|
|
+ if (nDeps > 0)
|
|
+ {
|
|
+ dependencies = (DumpId *) pg_realloc(dependencies,
|
|
+ nDeps * sizeof(DumpId));
|
|
+ te->dependencies = dependencies;
|
|
+ te->nDeps = nDeps;
|
|
+ }
|
|
+ else
|
|
+ free(dependencies);
|
|
+ }
|
|
+}
|
|
+
|
|
+/* Recursive search subroutine for BuildArchiveDependencies */
|
|
+static void
|
|
+findDumpableDependencies(ArchiveHandle *AH, DumpableObject *dobj,
|
|
+ DumpId **dependencies, int *nDeps, int *allocDeps)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ /*
|
|
+ * Ignore section boundary objects: if we search through them, we'll
|
|
+ * report lots of bogus dependencies.
|
|
+ */
|
|
+ if (dobj->objType == DO_PRE_DATA_BOUNDARY ||
|
|
+ dobj->objType == DO_POST_DATA_BOUNDARY)
|
|
+ return;
|
|
+
|
|
+ for (i = 0; i < dobj->nDeps; i++)
|
|
+ {
|
|
+ DumpId depid = dobj->dependencies[i];
|
|
+
|
|
+ if (TocIDRequired(AH, depid) != 0)
|
|
+ {
|
|
+ /* Object will be dumped, so just reference it as a dependency */
|
|
+ if (*nDeps >= *allocDeps)
|
|
+ {
|
|
+ *allocDeps *= 2;
|
|
+ *dependencies = (DumpId *) pg_realloc(*dependencies,
|
|
+ *allocDeps * sizeof(DumpId));
|
|
+ }
|
|
+ (*dependencies)[*nDeps] = depid;
|
|
+ (*nDeps)++;
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ /*
|
|
+ * Object will not be dumped, so recursively consider its deps. We
|
|
+ * rely on the assumption that sortDumpableObjects already broke
|
|
+ * any dependency loops, else we might recurse infinitely.
|
|
+ */
|
|
+ DumpableObject *otherdobj = findObjectByDumpId(depid);
|
|
+
|
|
+ if (otherdobj)
|
|
+ findDumpableDependencies(AH, otherdobj,
|
|
+ dependencies, nDeps, allocDeps);
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+
|
|
+/*
|
|
+ * selectSourceSchema - make the specified schema the active search path
|
|
+ * in the source database.
|
|
+ *
|
|
+ * NB: pg_catalog is explicitly searched after the specified schema;
|
|
+ * so user names are only qualified if they are cross-schema references,
|
|
+ * and system names are only qualified if they conflict with a user name
|
|
+ * in the current schema.
|
|
+ *
|
|
+ * Whenever the selected schema is not pg_catalog, be careful to qualify
|
|
+ * references to system catalogs and types in our emitted commands!
|
|
+ *
|
|
+ * This function is called only from selectSourceSchemaOnAH and
|
|
+ * selectSourceSchema.
|
|
+ */
|
|
+static void
|
|
+selectSourceSchema(Archive *fout, const char *schemaName)
|
|
+{
|
|
+ PQExpBuffer query;
|
|
+
|
|
+ /* This is checked by the callers already */
|
|
+ Assert(schemaName != NULL && *schemaName != '\0');
|
|
+
|
|
+ /* Not relevant if fetching from pre-7.3 DB */
|
|
+ if (fout->remoteVersion < 70300)
|
|
+ return;
|
|
+
|
|
+ query = createPQExpBuffer();
|
|
+ appendPQExpBuffer(query, "SET search_path = %s",
|
|
+ fmtId(schemaName));
|
|
+ if (strcmp(schemaName, "pg_catalog") != 0)
|
|
+ appendPQExpBufferStr(query, ", pg_catalog");
|
|
+
|
|
+ ExecuteSqlStatement(fout, query->data);
|
|
+
|
|
+ destroyPQExpBuffer(query);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * getFormattedTypeName - retrieve a nicely-formatted type name for the
|
|
+ * given type name.
|
|
+ *
|
|
+ * NB: in 7.3 and up the result may depend on the currently-selected
|
|
+ * schema; this is why we don't try to cache the names.
|
|
+ */
|
|
+static char *
|
|
+getFormattedTypeName(Archive *fout, Oid oid, OidOptions opts)
|
|
+{
|
|
+ char *result;
|
|
+ PQExpBuffer query;
|
|
+ PGresult *res;
|
|
+
|
|
+ if (oid == 0)
|
|
+ {
|
|
+ if ((opts & zeroAsOpaque) != 0)
|
|
+ return pg_strdup(g_opaque_type);
|
|
+ else if ((opts & zeroAsAny) != 0)
|
|
+ return pg_strdup("'any'");
|
|
+ else if ((opts & zeroAsStar) != 0)
|
|
+ return pg_strdup("*");
|
|
+ else if ((opts & zeroAsNone) != 0)
|
|
+ return pg_strdup("NONE");
|
|
+ }
|
|
+
|
|
+ query = createPQExpBuffer();
|
|
+ if (fout->remoteVersion >= 70300)
|
|
+ {
|
|
+ appendPQExpBuffer(query, "SELECT pg_catalog.format_type('%u'::pg_catalog.oid, NULL)",
|
|
+ oid);
|
|
+ }
|
|
+ else if (fout->remoteVersion >= 70100)
|
|
+ {
|
|
+ appendPQExpBuffer(query, "SELECT format_type('%u'::oid, NULL)",
|
|
+ oid);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ appendPQExpBuffer(query, "SELECT typname "
|
|
+ "FROM pg_type "
|
|
+ "WHERE oid = '%u'::oid",
|
|
+ oid);
|
|
+ }
|
|
+
|
|
+ res = ExecuteSqlQueryForSingleRow(fout, query->data);
|
|
+
|
|
+ if (fout->remoteVersion >= 70100)
|
|
+ {
|
|
+ /* already quoted */
|
|
+ result = pg_strdup(PQgetvalue(res, 0, 0));
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ /* may need to quote it */
|
|
+ result = pg_strdup(fmtId(PQgetvalue(res, 0, 0)));
|
|
+ }
|
|
+
|
|
+ PQclear(res);
|
|
+ destroyPQExpBuffer(query);
|
|
+
|
|
+ return result;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * myFormatType --- local implementation of format_type for use with 7.0.
|
|
+ */
|
|
+static char *
|
|
+myFormatType(const char *typname, int32 typmod)
|
|
+{
|
|
+ char *result;
|
|
+ bool isarray = false;
|
|
+ PQExpBuffer buf = createPQExpBuffer();
|
|
+
|
|
+ /* Handle array types */
|
|
+ if (typname[0] == '_')
|
|
+ {
|
|
+ isarray = true;
|
|
+ typname++;
|
|
+ }
|
|
+
|
|
+ /* Show lengths on bpchar and varchar */
|
|
+ if (strcmp(typname, "bpchar") == 0)
|
|
+ {
|
|
+ int len = (typmod - VARHDRSZ);
|
|
+
|
|
+ appendPQExpBufferStr(buf, "character");
|
|
+ if (len > 1)
|
|
+ appendPQExpBuffer(buf, "(%d)",
|
|
+ typmod - VARHDRSZ);
|
|
+ }
|
|
+ else if (strcmp(typname, "varchar") == 0)
|
|
+ {
|
|
+ appendPQExpBufferStr(buf, "character varying");
|
|
+ if (typmod != -1)
|
|
+ appendPQExpBuffer(buf, "(%d)",
|
|
+ typmod - VARHDRSZ);
|
|
+ }
|
|
+ else if (strcmp(typname, "numeric") == 0)
|
|
+ {
|
|
+ appendPQExpBufferStr(buf, "numeric");
|
|
+ if (typmod != -1)
|
|
+ {
|
|
+ int32 tmp_typmod;
|
|
+ int precision;
|
|
+ int scale;
|
|
+
|
|
+ tmp_typmod = typmod - VARHDRSZ;
|
|
+ precision = (tmp_typmod >> 16) & 0xffff;
|
|
+ scale = tmp_typmod & 0xffff;
|
|
+ appendPQExpBuffer(buf, "(%d,%d)",
|
|
+ precision, scale);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * char is an internal single-byte data type; Let's make sure we force it
|
|
+ * through with quotes. - thomas 1998-12-13
|
|
+ */
|
|
+ else if (strcmp(typname, "char") == 0)
|
|
+ appendPQExpBufferStr(buf, "\"char\"");
|
|
+ else
|
|
+ appendPQExpBufferStr(buf, fmtId(typname));
|
|
+
|
|
+ /* Append array qualifier for array types */
|
|
+ if (isarray)
|
|
+ appendPQExpBufferStr(buf, "[]");
|
|
+
|
|
+ result = pg_strdup(buf->data);
|
|
+ destroyPQExpBuffer(buf);
|
|
+
|
|
+ return result;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Return a column list clause for the given relation.
|
|
+ *
|
|
+ * Special case: if there are no undropped columns in the relation, return
|
|
+ * "", not an invalid "()" column list.
|
|
+ */
|
|
+static const char *
|
|
+fmtCopyColumnList(const TableInfo *ti, PQExpBuffer buffer)
|
|
+{
|
|
+ int numatts = ti->numatts;
|
|
+ char **attnames = ti->attnames;
|
|
+ bool *attisdropped = ti->attisdropped;
|
|
+ bool needComma;
|
|
+ int i;
|
|
+
|
|
+ appendPQExpBufferChar(buffer, '(');
|
|
+ needComma = false;
|
|
+ for (i = 0; i < numatts; i++)
|
|
+ {
|
|
+ if (attisdropped[i])
|
|
+ continue;
|
|
+ if (needComma)
|
|
+ appendPQExpBufferStr(buffer, ", ");
|
|
+ appendPQExpBufferStr(buffer, fmtId(attnames[i]));
|
|
+ needComma = true;
|
|
+ }
|
|
+
|
|
+ if (!needComma)
|
|
+ return ""; /* no undropped columns */
|
|
+
|
|
+ appendPQExpBufferChar(buffer, ')');
|
|
+ return buffer->data;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Execute an SQL query and verify that we got exactly one row back.
|
|
+ */
|
|
+static PGresult *
|
|
+ExecuteSqlQueryForSingleRow(Archive *fout, char *query)
|
|
+{
|
|
+ PGresult *res;
|
|
+ int ntups;
|
|
+
|
|
+ res = ExecuteSqlQuery(fout, query, PGRES_TUPLES_OK);
|
|
+
|
|
+ /* Expecting a single result only */
|
|
+ ntups = PQntuples(res);
|
|
+ if (ntups != 1)
|
|
+ exit_horribly(NULL,
|
|
+ ngettext("query returned %d row instead of one: %s\n",
|
|
+ "query returned %d rows instead of one: %s\n",
|
|
+ ntups),
|
|
+ ntups, query);
|
|
+
|
|
+ return res;
|
|
+}
|
|
--- /dev/null
|
|
+++ pglogical-2.2.2/pglogical_dump/pg_dump.h
|
|
@@ -0,0 +1,580 @@
|
|
+/*-------------------------------------------------------------------------
|
|
+ *
|
|
+ * pg_dump.h
|
|
+ * Common header file for the pg_dump utility
|
|
+ *
|
|
+ * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
|
|
+ * Portions Copyright (c) 1994, Regents of the University of California
|
|
+ *
|
|
+ * src/bin/pg_dump/pg_dump.h
|
|
+ *
|
|
+ *-------------------------------------------------------------------------
|
|
+ */
|
|
+
|
|
+#ifndef PG_DUMP_H
|
|
+#define PG_DUMP_H
|
|
+
|
|
+#include "postgres_fe.h"
|
|
+
|
|
+/*
|
|
+ * pg_dump uses two different mechanisms for identifying database objects:
|
|
+ *
|
|
+ * CatalogId represents an object by the tableoid and oid of its defining
|
|
+ * entry in the system catalogs. We need this to interpret pg_depend entries,
|
|
+ * for instance.
|
|
+ *
|
|
+ * DumpId is a simple sequential integer counter assigned as dumpable objects
|
|
+ * are identified during a pg_dump run. We use DumpId internally in preference
|
|
+ * to CatalogId for two reasons: it's more compact, and we can assign DumpIds
|
|
+ * to "objects" that don't have a separate CatalogId. For example, it is
|
|
+ * convenient to consider a table, its data, and its ACL as three separate
|
|
+ * dumpable "objects" with distinct DumpIds --- this lets us reason about the
|
|
+ * order in which to dump these things.
|
|
+ */
|
|
+
|
|
+typedef struct
|
|
+{
|
|
+ Oid tableoid;
|
|
+ Oid oid;
|
|
+} CatalogId;
|
|
+
|
|
+typedef int DumpId;
|
|
+
|
|
+/*
|
|
+ * Data structures for simple lists of OIDs and strings. The support for
|
|
+ * these is very primitive compared to the backend's List facilities, but
|
|
+ * it's all we need in pg_dump.
|
|
+ */
|
|
+
|
|
+typedef struct SimpleOidListCell
|
|
+{
|
|
+ struct SimpleOidListCell *next;
|
|
+ Oid val;
|
|
+} SimpleOidListCell;
|
|
+
|
|
+typedef struct SimpleOidList
|
|
+{
|
|
+ SimpleOidListCell *head;
|
|
+ SimpleOidListCell *tail;
|
|
+} SimpleOidList;
|
|
+
|
|
+
|
|
+/*
|
|
+ * The data structures used to store system catalog information. Every
|
|
+ * dumpable object is a subclass of DumpableObject.
|
|
+ *
|
|
+ * NOTE: the structures described here live for the entire pg_dump run;
|
|
+ * and in most cases we make a struct for every object we can find in the
|
|
+ * catalogs, not only those we are actually going to dump. Hence, it's
|
|
+ * best to store a minimal amount of per-object info in these structs,
|
|
+ * and retrieve additional per-object info when and if we dump a specific
|
|
+ * object. In particular, try to avoid retrieving expensive-to-compute
|
|
+ * information until it's known to be needed. We do, however, have to
|
|
+ * store enough info to determine whether an object should be dumped and
|
|
+ * what order to dump in.
|
|
+ */
|
|
+
|
|
+typedef enum
|
|
+{
|
|
+ /* When modifying this enum, update priority tables in pg_dump_sort.c! */
|
|
+ DO_NAMESPACE,
|
|
+ DO_EXTENSION,
|
|
+ DO_TYPE,
|
|
+ DO_SHELL_TYPE,
|
|
+ DO_FUNC,
|
|
+ DO_AGG,
|
|
+ DO_OPERATOR,
|
|
+ DO_OPCLASS,
|
|
+ DO_OPFAMILY,
|
|
+ DO_COLLATION,
|
|
+ DO_CONVERSION,
|
|
+ DO_TABLE,
|
|
+ DO_ATTRDEF,
|
|
+ DO_INDEX,
|
|
+ DO_RULE,
|
|
+ DO_TRIGGER,
|
|
+ DO_CONSTRAINT,
|
|
+ DO_FK_CONSTRAINT, /* see note for ConstraintInfo */
|
|
+ DO_PROCLANG,
|
|
+ DO_CAST,
|
|
+ DO_TABLE_DATA,
|
|
+ DO_DUMMY_TYPE,
|
|
+ DO_TSPARSER,
|
|
+ DO_TSDICT,
|
|
+ DO_TSTEMPLATE,
|
|
+ DO_TSCONFIG,
|
|
+ DO_FDW,
|
|
+ DO_FOREIGN_SERVER,
|
|
+ DO_DEFAULT_ACL,
|
|
+ DO_BLOB,
|
|
+ DO_BLOB_DATA,
|
|
+ DO_PRE_DATA_BOUNDARY,
|
|
+ DO_POST_DATA_BOUNDARY,
|
|
+ DO_EVENT_TRIGGER,
|
|
+ DO_REFRESH_MATVIEW
|
|
+} DumpableObjectType;
|
|
+
|
|
+typedef struct _dumpableObject
|
|
+{
|
|
+ DumpableObjectType objType;
|
|
+ CatalogId catId; /* zero if not a cataloged object */
|
|
+ DumpId dumpId; /* assigned by AssignDumpId() */
|
|
+ char *name; /* object name (should never be NULL) */
|
|
+ struct _namespaceInfo *namespace; /* containing namespace, or NULL */
|
|
+ bool dump; /* true if we want to dump this object */
|
|
+ bool ext_member; /* true if object is member of extension */
|
|
+ DumpId *dependencies; /* dumpIds of objects this one depends on */
|
|
+ int nDeps; /* number of valid dependencies */
|
|
+ int allocDeps; /* allocated size of dependencies[] */
|
|
+} DumpableObject;
|
|
+
|
|
+typedef struct _namespaceInfo
|
|
+{
|
|
+ DumpableObject dobj;
|
|
+ char *rolname; /* name of owner, or empty string */
|
|
+ char *nspacl;
|
|
+} NamespaceInfo;
|
|
+
|
|
+typedef struct _extensionInfo
|
|
+{
|
|
+ DumpableObject dobj;
|
|
+ char *namespace; /* schema containing extension's objects */
|
|
+ bool relocatable;
|
|
+ char *extversion;
|
|
+ char *extconfig; /* info about configuration tables */
|
|
+ char *extcondition;
|
|
+} ExtensionInfo;
|
|
+
|
|
+typedef struct _typeInfo
|
|
+{
|
|
+ DumpableObject dobj;
|
|
+
|
|
+ /*
|
|
+ * Note: dobj.name is the pg_type.typname entry. format_type() might
|
|
+ * produce something different than typname
|
|
+ */
|
|
+ char *rolname; /* name of owner, or empty string */
|
|
+ char *typacl;
|
|
+ Oid typelem;
|
|
+ Oid typrelid;
|
|
+ char typrelkind; /* 'r', 'v', 'c', etc */
|
|
+ char typtype; /* 'b', 'c', etc */
|
|
+ bool isArray; /* true if auto-generated array type */
|
|
+ bool isDefined; /* true if typisdefined */
|
|
+ /* If needed, we'll create a "shell type" entry for it; link that here: */
|
|
+ struct _shellTypeInfo *shellType; /* shell-type entry, or NULL */
|
|
+ /* If it's a domain, we store links to its constraints here: */
|
|
+ int nDomChecks;
|
|
+ struct _constraintInfo *domChecks;
|
|
+} TypeInfo;
|
|
+
|
|
+typedef struct _shellTypeInfo
|
|
+{
|
|
+ DumpableObject dobj;
|
|
+
|
|
+ TypeInfo *baseType; /* back link to associated base type */
|
|
+} ShellTypeInfo;
|
|
+
|
|
+typedef struct _funcInfo
|
|
+{
|
|
+ DumpableObject dobj;
|
|
+ char *rolname; /* name of owner, or empty string */
|
|
+ Oid lang;
|
|
+ int nargs;
|
|
+ Oid *argtypes;
|
|
+ Oid prorettype;
|
|
+ char *proacl;
|
|
+} FuncInfo;
|
|
+
|
|
+/* AggInfo is a superset of FuncInfo */
|
|
+typedef struct _aggInfo
|
|
+{
|
|
+ FuncInfo aggfn;
|
|
+ /* we don't require any other fields at the moment */
|
|
+} AggInfo;
|
|
+
|
|
+typedef struct _oprInfo
|
|
+{
|
|
+ DumpableObject dobj;
|
|
+ char *rolname;
|
|
+ char oprkind;
|
|
+ Oid oprcode;
|
|
+} OprInfo;
|
|
+
|
|
+typedef struct _opclassInfo
|
|
+{
|
|
+ DumpableObject dobj;
|
|
+ char *rolname;
|
|
+} OpclassInfo;
|
|
+
|
|
+typedef struct _opfamilyInfo
|
|
+{
|
|
+ DumpableObject dobj;
|
|
+ char *rolname;
|
|
+} OpfamilyInfo;
|
|
+
|
|
+typedef struct _collInfo
|
|
+{
|
|
+ DumpableObject dobj;
|
|
+ char *rolname;
|
|
+} CollInfo;
|
|
+
|
|
+typedef struct _convInfo
|
|
+{
|
|
+ DumpableObject dobj;
|
|
+ char *rolname;
|
|
+} ConvInfo;
|
|
+
|
|
+typedef struct _tableInfo
|
|
+{
|
|
+ /*
|
|
+ * These fields are collected for every table in the database.
|
|
+ */
|
|
+ DumpableObject dobj;
|
|
+ char *rolname; /* name of owner, or empty string */
|
|
+ char *relacl;
|
|
+ char relkind;
|
|
+ char relpersistence; /* relation persistence */
|
|
+ bool relispopulated; /* relation is populated */
|
|
+ char relreplident; /* replica identifier */
|
|
+ char *reltablespace; /* relation tablespace */
|
|
+ char *reloptions; /* options specified by WITH (...) */
|
|
+ char *checkoption; /* WITH CHECK OPTION */
|
|
+ char *toast_reloptions; /* WITH options for the TOAST table */
|
|
+ bool hasindex; /* does it have any indexes? */
|
|
+ bool hasrules; /* does it have any rules? */
|
|
+ bool hastriggers; /* does it have any triggers? */
|
|
+ bool hasoids; /* does it have OIDs? */
|
|
+ uint32 frozenxid; /* table's relfrozenxid */
|
|
+ uint32 minmxid; /* table's relminmxid */
|
|
+ Oid toast_oid; /* toast table's OID, or 0 if none */
|
|
+ uint32 toast_frozenxid; /* toast table's relfrozenxid, if any */
|
|
+ uint32 toast_minmxid; /* toast table's relminmxid */
|
|
+ int ncheck; /* # of CHECK expressions */
|
|
+ char *reloftype; /* underlying type for typed table */
|
|
+ /* these two are set only if table is a sequence owned by a column: */
|
|
+ Oid owning_tab; /* OID of table owning sequence */
|
|
+ int owning_col; /* attr # of column owning sequence */
|
|
+ int relpages; /* table's size in pages (from pg_class) */
|
|
+
|
|
+ bool interesting; /* true if need to collect more data */
|
|
+ bool postponed_def; /* matview must be postponed into post-data */
|
|
+
|
|
+ /*
|
|
+ * These fields are computed only if we decide the table is interesting
|
|
+ * (it's either a table to dump, or a direct parent of a dumpable table).
|
|
+ */
|
|
+ int numatts; /* number of attributes */
|
|
+ char **attnames; /* the attribute names */
|
|
+ char **atttypnames; /* attribute type names */
|
|
+ int *atttypmod; /* type-specific type modifiers */
|
|
+ int *attstattarget; /* attribute statistics targets */
|
|
+ char *attstorage; /* attribute storage scheme */
|
|
+ char *typstorage; /* type storage scheme */
|
|
+ bool *attisdropped; /* true if attr is dropped; don't dump it */
|
|
+ int *attlen; /* attribute length, used by binary_upgrade */
|
|
+ char *attalign; /* attribute align, used by binary_upgrade */
|
|
+ bool *attislocal; /* true if attr has local definition */
|
|
+ char **attoptions; /* per-attribute options */
|
|
+ Oid *attcollation; /* per-attribute collation selection */
|
|
+ char **attfdwoptions; /* per-attribute fdw options */
|
|
+ bool *notnull; /* NOT NULL constraints on attributes */
|
|
+ bool *inhNotNull; /* true if NOT NULL is inherited */
|
|
+ struct _attrDefInfo **attrdefs; /* DEFAULT expressions */
|
|
+ struct _constraintInfo *checkexprs; /* CHECK constraints */
|
|
+
|
|
+ /*
|
|
+ * Stuff computed only for dumpable tables.
|
|
+ */
|
|
+ int numParents; /* number of (immediate) parent tables */
|
|
+ struct _tableInfo **parents; /* TableInfos of immediate parents */
|
|
+ struct _tableDataInfo *dataObj; /* TableDataInfo, if dumping its data */
|
|
+} TableInfo;
|
|
+
|
|
+typedef struct _attrDefInfo
|
|
+{
|
|
+ DumpableObject dobj; /* note: dobj.name is name of table */
|
|
+ TableInfo *adtable; /* link to table of attribute */
|
|
+ int adnum;
|
|
+ char *adef_expr; /* decompiled DEFAULT expression */
|
|
+ bool separate; /* TRUE if must dump as separate item */
|
|
+} AttrDefInfo;
|
|
+
|
|
+typedef struct _tableDataInfo
|
|
+{
|
|
+ DumpableObject dobj;
|
|
+ TableInfo *tdtable; /* link to table to dump */
|
|
+ bool oids; /* include OIDs in data? */
|
|
+ char *filtercond; /* WHERE condition to limit rows dumped */
|
|
+} TableDataInfo;
|
|
+
|
|
+typedef struct _indxInfo
|
|
+{
|
|
+ DumpableObject dobj;
|
|
+ TableInfo *indextable; /* link to table the index is for */
|
|
+ char *indexdef;
|
|
+ char *tablespace; /* tablespace in which index is stored */
|
|
+ char *options; /* options specified by WITH (...) */
|
|
+ int indnkeys;
|
|
+ Oid *indkeys;
|
|
+ bool indisclustered;
|
|
+ bool indisreplident;
|
|
+ /* if there is an associated constraint object, its dumpId: */
|
|
+ DumpId indexconstraint;
|
|
+ int relpages; /* relpages of the underlying table */
|
|
+} IndxInfo;
|
|
+
|
|
+typedef struct _ruleInfo
|
|
+{
|
|
+ DumpableObject dobj;
|
|
+ TableInfo *ruletable; /* link to table the rule is for */
|
|
+ char ev_type;
|
|
+ bool is_instead;
|
|
+ char ev_enabled;
|
|
+ bool separate; /* TRUE if must dump as separate item */
|
|
+ /* separate is always true for non-ON SELECT rules */
|
|
+ char *reloptions; /* options specified by WITH (...) */
|
|
+ /* reloptions is only set if we need to dump the options with the rule */
|
|
+} RuleInfo;
|
|
+
|
|
+typedef struct _triggerInfo
|
|
+{
|
|
+ DumpableObject dobj;
|
|
+ TableInfo *tgtable; /* link to table the trigger is for */
|
|
+ char *tgfname;
|
|
+ int tgtype;
|
|
+ int tgnargs;
|
|
+ char *tgargs;
|
|
+ bool tgisconstraint;
|
|
+ char *tgconstrname;
|
|
+ Oid tgconstrrelid;
|
|
+ char *tgconstrrelname;
|
|
+ char tgenabled;
|
|
+ bool tgdeferrable;
|
|
+ bool tginitdeferred;
|
|
+ char *tgdef;
|
|
+} TriggerInfo;
|
|
+
|
|
+typedef struct _evttriggerInfo
|
|
+{
|
|
+ DumpableObject dobj;
|
|
+ char *evtname;
|
|
+ char *evtevent;
|
|
+ char *evtowner;
|
|
+ char *evttags;
|
|
+ char *evtfname;
|
|
+ char evtenabled;
|
|
+} EventTriggerInfo;
|
|
+
|
|
+/*
|
|
+ * struct ConstraintInfo is used for all constraint types. However we
|
|
+ * use a different objType for foreign key constraints, to make it easier
|
|
+ * to sort them the way we want.
|
|
+ *
|
|
+ * Note: condeferrable and condeferred are currently only valid for
|
|
+ * unique/primary-key constraints. Otherwise that info is in condef.
|
|
+ */
|
|
+typedef struct _constraintInfo
|
|
+{
|
|
+ DumpableObject dobj;
|
|
+ TableInfo *contable; /* NULL if domain constraint */
|
|
+ TypeInfo *condomain; /* NULL if table constraint */
|
|
+ char contype;
|
|
+ char *condef; /* definition, if CHECK or FOREIGN KEY */
|
|
+ Oid confrelid; /* referenced table, if FOREIGN KEY */
|
|
+ DumpId conindex; /* identifies associated index if any */
|
|
+ bool condeferrable; /* TRUE if constraint is DEFERRABLE */
|
|
+ bool condeferred; /* TRUE if constraint is INITIALLY DEFERRED */
|
|
+ bool conislocal; /* TRUE if constraint has local definition */
|
|
+ bool separate; /* TRUE if must dump as separate item */
|
|
+} ConstraintInfo;
|
|
+
|
|
+typedef struct _procLangInfo
|
|
+{
|
|
+ DumpableObject dobj;
|
|
+ bool lanpltrusted;
|
|
+ Oid lanplcallfoid;
|
|
+ Oid laninline;
|
|
+ Oid lanvalidator;
|
|
+ char *lanacl;
|
|
+ char *lanowner; /* name of owner, or empty string */
|
|
+} ProcLangInfo;
|
|
+
|
|
+typedef struct _castInfo
|
|
+{
|
|
+ DumpableObject dobj;
|
|
+ Oid castsource;
|
|
+ Oid casttarget;
|
|
+ Oid castfunc;
|
|
+ char castcontext;
|
|
+ char castmethod;
|
|
+} CastInfo;
|
|
+
|
|
+/* InhInfo isn't a DumpableObject, just temporary state */
|
|
+typedef struct _inhInfo
|
|
+{
|
|
+ Oid inhrelid; /* OID of a child table */
|
|
+ Oid inhparent; /* OID of its parent */
|
|
+} InhInfo;
|
|
+
|
|
+typedef struct _prsInfo
|
|
+{
|
|
+ DumpableObject dobj;
|
|
+ Oid prsstart;
|
|
+ Oid prstoken;
|
|
+ Oid prsend;
|
|
+ Oid prsheadline;
|
|
+ Oid prslextype;
|
|
+} TSParserInfo;
|
|
+
|
|
+typedef struct _dictInfo
|
|
+{
|
|
+ DumpableObject dobj;
|
|
+ char *rolname;
|
|
+ Oid dicttemplate;
|
|
+ char *dictinitoption;
|
|
+} TSDictInfo;
|
|
+
|
|
+typedef struct _tmplInfo
|
|
+{
|
|
+ DumpableObject dobj;
|
|
+ Oid tmplinit;
|
|
+ Oid tmpllexize;
|
|
+} TSTemplateInfo;
|
|
+
|
|
+typedef struct _cfgInfo
|
|
+{
|
|
+ DumpableObject dobj;
|
|
+ char *rolname;
|
|
+ Oid cfgparser;
|
|
+} TSConfigInfo;
|
|
+
|
|
+typedef struct _fdwInfo
|
|
+{
|
|
+ DumpableObject dobj;
|
|
+ char *rolname;
|
|
+ char *fdwhandler;
|
|
+ char *fdwvalidator;
|
|
+ char *fdwoptions;
|
|
+ char *fdwacl;
|
|
+} FdwInfo;
|
|
+
|
|
+typedef struct _foreignServerInfo
|
|
+{
|
|
+ DumpableObject dobj;
|
|
+ char *rolname;
|
|
+ Oid srvfdw;
|
|
+ char *srvtype;
|
|
+ char *srvversion;
|
|
+ char *srvacl;
|
|
+ char *srvoptions;
|
|
+} ForeignServerInfo;
|
|
+
|
|
+typedef struct _defaultACLInfo
|
|
+{
|
|
+ DumpableObject dobj;
|
|
+ char *defaclrole;
|
|
+ char defaclobjtype;
|
|
+ char *defaclacl;
|
|
+} DefaultACLInfo;
|
|
+
|
|
+typedef struct _blobInfo
|
|
+{
|
|
+ DumpableObject dobj;
|
|
+ char *rolname;
|
|
+ char *blobacl;
|
|
+} BlobInfo;
|
|
+
|
|
+/* global decls */
|
|
+extern bool force_quotes; /* double-quotes for identifiers flag */
|
|
+extern bool g_verbose; /* verbose flag */
|
|
+
|
|
+/* placeholders for comment starting and ending delimiters */
|
|
+extern char g_comment_start[10];
|
|
+extern char g_comment_end[10];
|
|
+
|
|
+extern char g_opaque_type[10]; /* name for the opaque type */
|
|
+
|
|
+/*
|
|
+ * common utility functions
|
|
+ */
|
|
+
|
|
+struct Archive;
|
|
+typedef struct Archive Archive;
|
|
+
|
|
+extern TableInfo *getSchemaData(Archive *, int *numTablesPtr);
|
|
+
|
|
+typedef enum _OidOptions
|
|
+{
|
|
+ zeroAsOpaque = 1,
|
|
+ zeroAsAny = 2,
|
|
+ zeroAsStar = 4,
|
|
+ zeroAsNone = 8
|
|
+} OidOptions;
|
|
+
|
|
+extern void AssignDumpId(DumpableObject *dobj);
|
|
+extern DumpId createDumpId(void);
|
|
+extern DumpId getMaxDumpId(void);
|
|
+extern DumpableObject *findObjectByDumpId(DumpId dumpId);
|
|
+extern DumpableObject *findObjectByCatalogId(CatalogId catalogId);
|
|
+extern void getDumpableObjects(DumpableObject ***objs, int *numObjs);
|
|
+
|
|
+extern void addObjectDependency(DumpableObject *dobj, DumpId refId);
|
|
+extern void removeObjectDependency(DumpableObject *dobj, DumpId refId);
|
|
+
|
|
+extern TableInfo *findTableByOid(Oid oid);
|
|
+extern TypeInfo *findTypeByOid(Oid oid);
|
|
+extern FuncInfo *findFuncByOid(Oid oid);
|
|
+extern OprInfo *findOprByOid(Oid oid);
|
|
+extern CollInfo *findCollationByOid(Oid oid);
|
|
+extern NamespaceInfo *findNamespaceByOid(Oid oid);
|
|
+
|
|
+extern void simple_oid_list_append(SimpleOidList *list, Oid val);
|
|
+extern bool simple_oid_list_member(SimpleOidList *list, Oid val);
|
|
+
|
|
+extern void parseOidArray(const char *str, Oid *array, int arraysize);
|
|
+
|
|
+extern void sortDumpableObjects(DumpableObject **objs, int numObjs,
|
|
+ DumpId preBoundaryId, DumpId postBoundaryId);
|
|
+extern void sortDumpableObjectsByTypeName(DumpableObject **objs, int numObjs);
|
|
+extern void sortDumpableObjectsByTypeOid(DumpableObject **objs, int numObjs);
|
|
+extern void sortDataAndIndexObjectsBySize(DumpableObject **objs, int numObjs);
|
|
+
|
|
+/*
|
|
+ * version specific routines
|
|
+ */
|
|
+extern NamespaceInfo *getNamespaces(Archive *fout, int *numNamespaces);
|
|
+extern ExtensionInfo *getExtensions(Archive *fout, int *numExtensions);
|
|
+extern TypeInfo *getTypes(Archive *fout, int *numTypes);
|
|
+extern FuncInfo *getFuncs(Archive *fout, int *numFuncs);
|
|
+extern AggInfo *getAggregates(Archive *fout, int *numAggregates);
|
|
+extern OprInfo *getOperators(Archive *fout, int *numOperators);
|
|
+extern OpclassInfo *getOpclasses(Archive *fout, int *numOpclasses);
|
|
+extern OpfamilyInfo *getOpfamilies(Archive *fout, int *numOpfamilies);
|
|
+extern CollInfo *getCollations(Archive *fout, int *numCollations);
|
|
+extern ConvInfo *getConversions(Archive *fout, int *numConversions);
|
|
+extern TableInfo *getTables(Archive *fout, int *numTables);
|
|
+extern void getOwnedSeqs(Archive *fout, TableInfo tblinfo[], int numTables);
|
|
+extern InhInfo *getInherits(Archive *fout, int *numInherits);
|
|
+extern void getIndexes(Archive *fout, TableInfo tblinfo[], int numTables);
|
|
+extern void getConstraints(Archive *fout, TableInfo tblinfo[], int numTables);
|
|
+extern RuleInfo *getRules(Archive *fout, int *numRules);
|
|
+extern void getTriggers(Archive *fout, TableInfo tblinfo[], int numTables);
|
|
+extern ProcLangInfo *getProcLangs(Archive *fout, int *numProcLangs);
|
|
+extern CastInfo *getCasts(Archive *fout, int *numCasts);
|
|
+extern void getTableAttrs(Archive *fout, TableInfo *tbinfo, int numTables);
|
|
+extern bool shouldPrintColumn(TableInfo *tbinfo, int colno);
|
|
+extern TSParserInfo *getTSParsers(Archive *fout, int *numTSParsers);
|
|
+extern TSDictInfo *getTSDictionaries(Archive *fout, int *numTSDicts);
|
|
+extern TSTemplateInfo *getTSTemplates(Archive *fout, int *numTSTemplates);
|
|
+extern TSConfigInfo *getTSConfigurations(Archive *fout, int *numTSConfigs);
|
|
+extern FdwInfo *getForeignDataWrappers(Archive *fout,
|
|
+ int *numForeignDataWrappers);
|
|
+extern ForeignServerInfo *getForeignServers(Archive *fout,
|
|
+ int *numForeignServers);
|
|
+extern DefaultACLInfo *getDefaultACLs(Archive *fout, int *numDefaultACLs);
|
|
+extern void getExtensionMembership(Archive *fout, ExtensionInfo extinfo[],
|
|
+ int numExtensions);
|
|
+extern EventTriggerInfo *getEventTriggers(Archive *fout, int *numEventTriggers);
|
|
+
|
|
+#endif /* PG_DUMP_H */
|
|
--- /dev/null
|
|
+++ pglogical-2.2.2/pglogical_dump/pg_dump_sort.c
|
|
@@ -0,0 +1,1470 @@
|
|
+/*-------------------------------------------------------------------------
|
|
+ *
|
|
+ * pg_dump_sort.c
|
|
+ * Sort the items of a dump into a safe order for dumping
|
|
+ *
|
|
+ *
|
|
+ * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
|
|
+ * Portions Copyright (c) 1994, Regents of the University of California
|
|
+ *
|
|
+ *
|
|
+ * IDENTIFICATION
|
|
+ * src/bin/pg_dump/pg_dump_sort.c
|
|
+ *
|
|
+ *-------------------------------------------------------------------------
|
|
+ */
|
|
+#include "pg_backup_archiver.h"
|
|
+#include "pg_backup_utils.h"
|
|
+#include "parallel.h"
|
|
+
|
|
+/* translator: this is a module name */
|
|
+static const char *modulename = gettext_noop("sorter");
|
|
+
|
|
+/*
|
|
+ * Sort priority for object types when dumping a pre-7.3 database.
|
|
+ * Objects are sorted by priority levels, and within an equal priority level
|
|
+ * by OID. (This is a relatively crude hack to provide semi-reasonable
|
|
+ * behavior for old databases without full dependency info.) Note: collations,
|
|
+ * extensions, text search, foreign-data, materialized view, event trigger,
|
|
+ * and default ACL objects can't really happen here, so the rather bogus
|
|
+ * priorities for them don't matter.
|
|
+ *
|
|
+ * NOTE: object-type priorities must match the section assignments made in
|
|
+ * pg_dump.c; that is, PRE_DATA objects must sort before DO_PRE_DATA_BOUNDARY,
|
|
+ * POST_DATA objects must sort after DO_POST_DATA_BOUNDARY, and DATA objects
|
|
+ * must sort between them.
|
|
+ */
|
|
+static const int oldObjectTypePriority[] =
|
|
+{
|
|
+ 1, /* DO_NAMESPACE */
|
|
+ 1, /* DO_EXTENSION */
|
|
+ 2, /* DO_TYPE */
|
|
+ 2, /* DO_SHELL_TYPE */
|
|
+ 2, /* DO_FUNC */
|
|
+ 3, /* DO_AGG */
|
|
+ 3, /* DO_OPERATOR */
|
|
+ 4, /* DO_OPCLASS */
|
|
+ 4, /* DO_OPFAMILY */
|
|
+ 4, /* DO_COLLATION */
|
|
+ 5, /* DO_CONVERSION */
|
|
+ 6, /* DO_TABLE */
|
|
+ 8, /* DO_ATTRDEF */
|
|
+ 15, /* DO_INDEX */
|
|
+ 16, /* DO_RULE */
|
|
+ 17, /* DO_TRIGGER */
|
|
+ 14, /* DO_CONSTRAINT */
|
|
+ 18, /* DO_FK_CONSTRAINT */
|
|
+ 2, /* DO_PROCLANG */
|
|
+ 2, /* DO_CAST */
|
|
+ 11, /* DO_TABLE_DATA */
|
|
+ 7, /* DO_DUMMY_TYPE */
|
|
+ 4, /* DO_TSPARSER */
|
|
+ 4, /* DO_TSDICT */
|
|
+ 4, /* DO_TSTEMPLATE */
|
|
+ 4, /* DO_TSCONFIG */
|
|
+ 4, /* DO_FDW */
|
|
+ 4, /* DO_FOREIGN_SERVER */
|
|
+ 19, /* DO_DEFAULT_ACL */
|
|
+ 9, /* DO_BLOB */
|
|
+ 12, /* DO_BLOB_DATA */
|
|
+ 10, /* DO_PRE_DATA_BOUNDARY */
|
|
+ 13, /* DO_POST_DATA_BOUNDARY */
|
|
+ 20, /* DO_EVENT_TRIGGER */
|
|
+ 15 /* DO_REFRESH_MATVIEW */
|
|
+};
|
|
+
|
|
+/*
|
|
+ * Sort priority for object types when dumping newer databases.
|
|
+ * Objects are sorted by type, and within a type by name.
|
|
+ *
|
|
+ * NOTE: object-type priorities must match the section assignments made in
|
|
+ * pg_dump.c; that is, PRE_DATA objects must sort before DO_PRE_DATA_BOUNDARY,
|
|
+ * POST_DATA objects must sort after DO_POST_DATA_BOUNDARY, and DATA objects
|
|
+ * must sort between them.
|
|
+ */
|
|
+static const int newObjectTypePriority[] =
|
|
+{
|
|
+ 1, /* DO_NAMESPACE */
|
|
+ 4, /* DO_EXTENSION */
|
|
+ 5, /* DO_TYPE */
|
|
+ 5, /* DO_SHELL_TYPE */
|
|
+ 6, /* DO_FUNC */
|
|
+ 7, /* DO_AGG */
|
|
+ 8, /* DO_OPERATOR */
|
|
+ 9, /* DO_OPCLASS */
|
|
+ 9, /* DO_OPFAMILY */
|
|
+ 3, /* DO_COLLATION */
|
|
+ 11, /* DO_CONVERSION */
|
|
+ 18, /* DO_TABLE */
|
|
+ 20, /* DO_ATTRDEF */
|
|
+ 27, /* DO_INDEX */
|
|
+ 28, /* DO_RULE */
|
|
+ 29, /* DO_TRIGGER */
|
|
+ 26, /* DO_CONSTRAINT */
|
|
+ 30, /* DO_FK_CONSTRAINT */
|
|
+ 2, /* DO_PROCLANG */
|
|
+ 10, /* DO_CAST */
|
|
+ 23, /* DO_TABLE_DATA */
|
|
+ 19, /* DO_DUMMY_TYPE */
|
|
+ 12, /* DO_TSPARSER */
|
|
+ 14, /* DO_TSDICT */
|
|
+ 13, /* DO_TSTEMPLATE */
|
|
+ 15, /* DO_TSCONFIG */
|
|
+ 16, /* DO_FDW */
|
|
+ 17, /* DO_FOREIGN_SERVER */
|
|
+ 31, /* DO_DEFAULT_ACL */
|
|
+ 21, /* DO_BLOB */
|
|
+ 24, /* DO_BLOB_DATA */
|
|
+ 22, /* DO_PRE_DATA_BOUNDARY */
|
|
+ 25, /* DO_POST_DATA_BOUNDARY */
|
|
+ 32, /* DO_EVENT_TRIGGER */
|
|
+ 33 /* DO_REFRESH_MATVIEW */
|
|
+};
|
|
+
|
|
+static DumpId preDataBoundId;
|
|
+static DumpId postDataBoundId;
|
|
+
|
|
+
|
|
+static int DOTypeNameCompare(const void *p1, const void *p2);
|
|
+static int DOTypeOidCompare(const void *p1, const void *p2);
|
|
+static bool TopoSort(DumpableObject **objs,
|
|
+ int numObjs,
|
|
+ DumpableObject **ordering,
|
|
+ int *nOrdering);
|
|
+static void addHeapElement(int val, int *heap, int heapLength);
|
|
+static int removeHeapElement(int *heap, int heapLength);
|
|
+static void findDependencyLoops(DumpableObject **objs, int nObjs, int totObjs);
|
|
+static int findLoop(DumpableObject *obj,
|
|
+ DumpId startPoint,
|
|
+ bool *processed,
|
|
+ DumpId *searchFailed,
|
|
+ DumpableObject **workspace,
|
|
+ int depth);
|
|
+static void repairDependencyLoop(DumpableObject **loop,
|
|
+ int nLoop);
|
|
+static void describeDumpableObject(DumpableObject *obj,
|
|
+ char *buf, int bufsize);
|
|
+
|
|
+static int DOSizeCompare(const void *p1, const void *p2);
|
|
+
|
|
+static int
|
|
+findFirstEqualType(DumpableObjectType type, DumpableObject **objs, int numObjs)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < numObjs; i++)
|
|
+ if (objs[i]->objType == type)
|
|
+ return i;
|
|
+ return -1;
|
|
+}
|
|
+
|
|
+static int
|
|
+findFirstDifferentType(DumpableObjectType type, DumpableObject **objs, int numObjs, int start)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ for (i = start; i < numObjs; i++)
|
|
+ if (objs[i]->objType != type)
|
|
+ return i;
|
|
+ return numObjs - 1;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * When we do a parallel dump, we want to start with the largest items first.
|
|
+ *
|
|
+ * Say we have the objects in this order:
|
|
+ * ....DDDDD....III....
|
|
+ *
|
|
+ * with D = Table data, I = Index, . = other object
|
|
+ *
|
|
+ * This sorting function now takes each of the D or I blocks and sorts them
|
|
+ * according to their size.
|
|
+ */
|
|
+void
|
|
+sortDataAndIndexObjectsBySize(DumpableObject **objs, int numObjs)
|
|
+{
|
|
+ int startIdx,
|
|
+ endIdx;
|
|
+ void *startPtr;
|
|
+
|
|
+ if (numObjs <= 1)
|
|
+ return;
|
|
+
|
|
+ startIdx = findFirstEqualType(DO_TABLE_DATA, objs, numObjs);
|
|
+ if (startIdx >= 0)
|
|
+ {
|
|
+ endIdx = findFirstDifferentType(DO_TABLE_DATA, objs, numObjs, startIdx);
|
|
+ startPtr = objs + startIdx;
|
|
+ qsort(startPtr, endIdx - startIdx, sizeof(DumpableObject *),
|
|
+ DOSizeCompare);
|
|
+ }
|
|
+
|
|
+ startIdx = findFirstEqualType(DO_INDEX, objs, numObjs);
|
|
+ if (startIdx >= 0)
|
|
+ {
|
|
+ endIdx = findFirstDifferentType(DO_INDEX, objs, numObjs, startIdx);
|
|
+ startPtr = objs + startIdx;
|
|
+ qsort(startPtr, endIdx - startIdx, sizeof(DumpableObject *),
|
|
+ DOSizeCompare);
|
|
+ }
|
|
+}
|
|
+
|
|
+static int
|
|
+DOSizeCompare(const void *p1, const void *p2)
|
|
+{
|
|
+ DumpableObject *obj1 = *(DumpableObject **) p1;
|
|
+ DumpableObject *obj2 = *(DumpableObject **) p2;
|
|
+ int obj1_size = 0;
|
|
+ int obj2_size = 0;
|
|
+
|
|
+ if (obj1->objType == DO_TABLE_DATA)
|
|
+ obj1_size = ((TableDataInfo *) obj1)->tdtable->relpages;
|
|
+ if (obj1->objType == DO_INDEX)
|
|
+ obj1_size = ((IndxInfo *) obj1)->relpages;
|
|
+
|
|
+ if (obj2->objType == DO_TABLE_DATA)
|
|
+ obj2_size = ((TableDataInfo *) obj2)->tdtable->relpages;
|
|
+ if (obj2->objType == DO_INDEX)
|
|
+ obj2_size = ((IndxInfo *) obj2)->relpages;
|
|
+
|
|
+ /* we want to see the biggest item go first */
|
|
+ if (obj1_size > obj2_size)
|
|
+ return -1;
|
|
+ if (obj2_size > obj1_size)
|
|
+ return 1;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Sort the given objects into a type/name-based ordering
|
|
+ *
|
|
+ * Normally this is just the starting point for the dependency-based
|
|
+ * ordering.
|
|
+ */
|
|
+void
|
|
+sortDumpableObjectsByTypeName(DumpableObject **objs, int numObjs)
|
|
+{
|
|
+ if (numObjs > 1)
|
|
+ qsort((void *) objs, numObjs, sizeof(DumpableObject *),
|
|
+ DOTypeNameCompare);
|
|
+}
|
|
+
|
|
+static int
|
|
+DOTypeNameCompare(const void *p1, const void *p2)
|
|
+{
|
|
+ DumpableObject *obj1 = *(DumpableObject *const *) p1;
|
|
+ DumpableObject *obj2 = *(DumpableObject *const *) p2;
|
|
+ int cmpval;
|
|
+
|
|
+ /* Sort by type */
|
|
+ cmpval = newObjectTypePriority[obj1->objType] -
|
|
+ newObjectTypePriority[obj2->objType];
|
|
+
|
|
+ if (cmpval != 0)
|
|
+ return cmpval;
|
|
+
|
|
+ /*
|
|
+ * Sort by namespace. Note that all objects of the same type should
|
|
+ * either have or not have a namespace link, so we needn't be fancy about
|
|
+ * cases where one link is null and the other not.
|
|
+ */
|
|
+ if (obj1->namespace && obj2->namespace)
|
|
+ {
|
|
+ cmpval = strcmp(obj1->namespace->dobj.name,
|
|
+ obj2->namespace->dobj.name);
|
|
+ if (cmpval != 0)
|
|
+ return cmpval;
|
|
+ }
|
|
+
|
|
+ /* Sort by name */
|
|
+ cmpval = strcmp(obj1->name, obj2->name);
|
|
+ if (cmpval != 0)
|
|
+ return cmpval;
|
|
+
|
|
+ /* To have a stable sort order, break ties for some object types */
|
|
+ if (obj1->objType == DO_FUNC || obj1->objType == DO_AGG)
|
|
+ {
|
|
+ FuncInfo *fobj1 = *(FuncInfo *const *) p1;
|
|
+ FuncInfo *fobj2 = *(FuncInfo *const *) p2;
|
|
+ int i;
|
|
+
|
|
+ cmpval = fobj1->nargs - fobj2->nargs;
|
|
+ if (cmpval != 0)
|
|
+ return cmpval;
|
|
+ for (i = 0; i < fobj1->nargs; i++)
|
|
+ {
|
|
+ TypeInfo *argtype1 = findTypeByOid(fobj1->argtypes[i]);
|
|
+ TypeInfo *argtype2 = findTypeByOid(fobj2->argtypes[i]);
|
|
+
|
|
+ if (argtype1 && argtype2)
|
|
+ {
|
|
+ if (argtype1->dobj.namespace && argtype2->dobj.namespace)
|
|
+ {
|
|
+ cmpval = strcmp(argtype1->dobj.namespace->dobj.name,
|
|
+ argtype2->dobj.namespace->dobj.name);
|
|
+ if (cmpval != 0)
|
|
+ return cmpval;
|
|
+ }
|
|
+ cmpval = strcmp(argtype1->dobj.name, argtype2->dobj.name);
|
|
+ if (cmpval != 0)
|
|
+ return cmpval;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ else if (obj1->objType == DO_OPERATOR)
|
|
+ {
|
|
+ OprInfo *oobj1 = *(OprInfo *const *) p1;
|
|
+ OprInfo *oobj2 = *(OprInfo *const *) p2;
|
|
+
|
|
+ /* oprkind is 'l', 'r', or 'b'; this sorts prefix, postfix, infix */
|
|
+ cmpval = (oobj2->oprkind - oobj1->oprkind);
|
|
+ if (cmpval != 0)
|
|
+ return cmpval;
|
|
+ }
|
|
+ else if (obj1->objType == DO_ATTRDEF)
|
|
+ {
|
|
+ AttrDefInfo *adobj1 = *(AttrDefInfo *const *) p1;
|
|
+ AttrDefInfo *adobj2 = *(AttrDefInfo *const *) p2;
|
|
+
|
|
+ cmpval = (adobj1->adnum - adobj2->adnum);
|
|
+ if (cmpval != 0)
|
|
+ return cmpval;
|
|
+ }
|
|
+
|
|
+ /* Usually shouldn't get here, but if we do, sort by OID */
|
|
+ return oidcmp(obj1->catId.oid, obj2->catId.oid);
|
|
+}
|
|
+
|
|
+
|
|
+/*
|
|
+ * Sort the given objects into a type/OID-based ordering
|
|
+ *
|
|
+ * This is used with pre-7.3 source databases as a crude substitute for the
|
|
+ * lack of dependency information.
|
|
+ */
|
|
+void
|
|
+sortDumpableObjectsByTypeOid(DumpableObject **objs, int numObjs)
|
|
+{
|
|
+ if (numObjs > 1)
|
|
+ qsort((void *) objs, numObjs, sizeof(DumpableObject *),
|
|
+ DOTypeOidCompare);
|
|
+}
|
|
+
|
|
+static int
|
|
+DOTypeOidCompare(const void *p1, const void *p2)
|
|
+{
|
|
+ DumpableObject *obj1 = *(DumpableObject *const *) p1;
|
|
+ DumpableObject *obj2 = *(DumpableObject *const *) p2;
|
|
+ int cmpval;
|
|
+
|
|
+ cmpval = oldObjectTypePriority[obj1->objType] -
|
|
+ oldObjectTypePriority[obj2->objType];
|
|
+
|
|
+ if (cmpval != 0)
|
|
+ return cmpval;
|
|
+
|
|
+ return oidcmp(obj1->catId.oid, obj2->catId.oid);
|
|
+}
|
|
+
|
|
+
|
|
+/*
|
|
+ * Sort the given objects into a safe dump order using dependency
|
|
+ * information (to the extent we have it available).
|
|
+ *
|
|
+ * The DumpIds of the PRE_DATA_BOUNDARY and POST_DATA_BOUNDARY objects are
|
|
+ * passed in separately, in case we need them during dependency loop repair.
|
|
+ */
|
|
+void
|
|
+sortDumpableObjects(DumpableObject **objs, int numObjs,
|
|
+ DumpId preBoundaryId, DumpId postBoundaryId)
|
|
+{
|
|
+ DumpableObject **ordering;
|
|
+ int nOrdering;
|
|
+
|
|
+ if (numObjs <= 0) /* can't happen anymore ... */
|
|
+ return;
|
|
+
|
|
+ /*
|
|
+ * Saving the boundary IDs in static variables is a bit grotty, but seems
|
|
+ * better than adding them to parameter lists of subsidiary functions.
|
|
+ */
|
|
+ preDataBoundId = preBoundaryId;
|
|
+ postDataBoundId = postBoundaryId;
|
|
+
|
|
+ ordering = (DumpableObject **) pg_malloc(numObjs * sizeof(DumpableObject *));
|
|
+ while (!TopoSort(objs, numObjs, ordering, &nOrdering))
|
|
+ findDependencyLoops(ordering, nOrdering, numObjs);
|
|
+
|
|
+ memcpy(objs, ordering, numObjs * sizeof(DumpableObject *));
|
|
+
|
|
+ free(ordering);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * TopoSort -- topological sort of a dump list
|
|
+ *
|
|
+ * Generate a re-ordering of the dump list that satisfies all the dependency
|
|
+ * constraints shown in the dump list. (Each such constraint is a fact of a
|
|
+ * partial ordering.) Minimize rearrangement of the list not needed to
|
|
+ * achieve the partial ordering.
|
|
+ *
|
|
+ * The input is the list of numObjs objects in objs[]. This list is not
|
|
+ * modified.
|
|
+ *
|
|
+ * Returns TRUE if able to build an ordering that satisfies all the
|
|
+ * constraints, FALSE if not (there are contradictory constraints).
|
|
+ *
|
|
+ * On success (TRUE result), ordering[] is filled with a sorted array of
|
|
+ * DumpableObject pointers, of length equal to the input list length.
|
|
+ *
|
|
+ * On failure (FALSE result), ordering[] is filled with an unsorted array of
|
|
+ * DumpableObject pointers of length *nOrdering, listing the objects that
|
|
+ * prevented the sort from being completed. In general, these objects either
|
|
+ * participate directly in a dependency cycle, or are depended on by objects
|
|
+ * that are in a cycle. (The latter objects are not actually problematic,
|
|
+ * but it takes further analysis to identify which are which.)
|
|
+ *
|
|
+ * The caller is responsible for allocating sufficient space at *ordering.
|
|
+ */
|
|
+static bool
|
|
+TopoSort(DumpableObject **objs,
|
|
+ int numObjs,
|
|
+ DumpableObject **ordering, /* output argument */
|
|
+ int *nOrdering) /* output argument */
|
|
+{
|
|
+ DumpId maxDumpId = getMaxDumpId();
|
|
+ int *pendingHeap;
|
|
+ int *beforeConstraints;
|
|
+ int *idMap;
|
|
+ DumpableObject *obj;
|
|
+ int heapLength;
|
|
+ int i,
|
|
+ j,
|
|
+ k;
|
|
+
|
|
+ /*
|
|
+ * This is basically the same algorithm shown for topological sorting in
|
|
+ * Knuth's Volume 1. However, we would like to minimize unnecessary
|
|
+ * rearrangement of the input ordering; that is, when we have a choice of
|
|
+ * which item to output next, we always want to take the one highest in
|
|
+ * the original list. Therefore, instead of maintaining an unordered
|
|
+ * linked list of items-ready-to-output as Knuth does, we maintain a heap
|
|
+ * of their item numbers, which we can use as a priority queue. This
|
|
+ * turns the algorithm from O(N) to O(N log N) because each insertion or
|
|
+ * removal of a heap item takes O(log N) time. However, that's still
|
|
+ * plenty fast enough for this application.
|
|
+ */
|
|
+
|
|
+ *nOrdering = numObjs; /* for success return */
|
|
+
|
|
+ /* Eliminate the null case */
|
|
+ if (numObjs <= 0)
|
|
+ return true;
|
|
+
|
|
+ /* Create workspace for the above-described heap */
|
|
+ pendingHeap = (int *) pg_malloc(numObjs * sizeof(int));
|
|
+
|
|
+ /*
|
|
+ * Scan the constraints, and for each item in the input, generate a count
|
|
+ * of the number of constraints that say it must be before something else.
|
|
+ * The count for the item with dumpId j is stored in beforeConstraints[j].
|
|
+ * We also make a map showing the input-order index of the item with
|
|
+ * dumpId j.
|
|
+ */
|
|
+ beforeConstraints = (int *) pg_malloc((maxDumpId + 1) * sizeof(int));
|
|
+ memset(beforeConstraints, 0, (maxDumpId + 1) * sizeof(int));
|
|
+ idMap = (int *) pg_malloc((maxDumpId + 1) * sizeof(int));
|
|
+ for (i = 0; i < numObjs; i++)
|
|
+ {
|
|
+ obj = objs[i];
|
|
+ j = obj->dumpId;
|
|
+ if (j <= 0 || j > maxDumpId)
|
|
+ exit_horribly(modulename, "invalid dumpId %d\n", j);
|
|
+ idMap[j] = i;
|
|
+ for (j = 0; j < obj->nDeps; j++)
|
|
+ {
|
|
+ k = obj->dependencies[j];
|
|
+ if (k <= 0 || k > maxDumpId)
|
|
+ exit_horribly(modulename, "invalid dependency %d\n", k);
|
|
+ beforeConstraints[k]++;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Now initialize the heap of items-ready-to-output by filling it with the
|
|
+ * indexes of items that already have beforeConstraints[id] == 0.
|
|
+ *
|
|
+ * The essential property of a heap is heap[(j-1)/2] >= heap[j] for each j
|
|
+ * in the range 1..heapLength-1 (note we are using 0-based subscripts
|
|
+ * here, while the discussion in Knuth assumes 1-based subscripts). So, if
|
|
+ * we simply enter the indexes into pendingHeap[] in decreasing order, we
|
|
+ * a-fortiori have the heap invariant satisfied at completion of this
|
|
+ * loop, and don't need to do any sift-up comparisons.
|
|
+ */
|
|
+ heapLength = 0;
|
|
+ for (i = numObjs; --i >= 0;)
|
|
+ {
|
|
+ if (beforeConstraints[objs[i]->dumpId] == 0)
|
|
+ pendingHeap[heapLength++] = i;
|
|
+ }
|
|
+
|
|
+ /*--------------------
|
|
+ * Now emit objects, working backwards in the output list. At each step,
|
|
+ * we use the priority heap to select the last item that has no remaining
|
|
+ * before-constraints. We remove that item from the heap, output it to
|
|
+ * ordering[], and decrease the beforeConstraints count of each of the
|
|
+ * items it was constrained against. Whenever an item's beforeConstraints
|
|
+ * count is thereby decreased to zero, we insert it into the priority heap
|
|
+ * to show that it is a candidate to output. We are done when the heap
|
|
+ * becomes empty; if we have output every element then we succeeded,
|
|
+ * otherwise we failed.
|
|
+ * i = number of ordering[] entries left to output
|
|
+ * j = objs[] index of item we are outputting
|
|
+ * k = temp for scanning constraint list for item j
|
|
+ *--------------------
|
|
+ */
|
|
+ i = numObjs;
|
|
+ while (heapLength > 0)
|
|
+ {
|
|
+ /* Select object to output by removing largest heap member */
|
|
+ j = removeHeapElement(pendingHeap, heapLength--);
|
|
+ obj = objs[j];
|
|
+ /* Output candidate to ordering[] */
|
|
+ ordering[--i] = obj;
|
|
+ /* Update beforeConstraints counts of its predecessors */
|
|
+ for (k = 0; k < obj->nDeps; k++)
|
|
+ {
|
|
+ int id = obj->dependencies[k];
|
|
+
|
|
+ if ((--beforeConstraints[id]) == 0)
|
|
+ addHeapElement(idMap[id], pendingHeap, heapLength++);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * If we failed, report the objects that couldn't be output; these are the
|
|
+ * ones with beforeConstraints[] still nonzero.
|
|
+ */
|
|
+ if (i != 0)
|
|
+ {
|
|
+ k = 0;
|
|
+ for (j = 1; j <= maxDumpId; j++)
|
|
+ {
|
|
+ if (beforeConstraints[j] != 0)
|
|
+ ordering[k++] = objs[idMap[j]];
|
|
+ }
|
|
+ *nOrdering = k;
|
|
+ }
|
|
+
|
|
+ /* Done */
|
|
+ free(pendingHeap);
|
|
+ free(beforeConstraints);
|
|
+ free(idMap);
|
|
+
|
|
+ return (i == 0);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Add an item to a heap (priority queue)
|
|
+ *
|
|
+ * heapLength is the current heap size; caller is responsible for increasing
|
|
+ * its value after the call. There must be sufficient storage at *heap.
|
|
+ */
|
|
+static void
|
|
+addHeapElement(int val, int *heap, int heapLength)
|
|
+{
|
|
+ int j;
|
|
+
|
|
+ /*
|
|
+ * Sift-up the new entry, per Knuth 5.2.3 exercise 16. Note that Knuth is
|
|
+ * using 1-based array indexes, not 0-based.
|
|
+ */
|
|
+ j = heapLength;
|
|
+ while (j > 0)
|
|
+ {
|
|
+ int i = (j - 1) >> 1;
|
|
+
|
|
+ if (val <= heap[i])
|
|
+ break;
|
|
+ heap[j] = heap[i];
|
|
+ j = i;
|
|
+ }
|
|
+ heap[j] = val;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Remove the largest item present in a heap (priority queue)
|
|
+ *
|
|
+ * heapLength is the current heap size; caller is responsible for decreasing
|
|
+ * its value after the call.
|
|
+ *
|
|
+ * We remove and return heap[0], which is always the largest element of
|
|
+ * the heap, and then "sift up" to maintain the heap invariant.
|
|
+ */
|
|
+static int
|
|
+removeHeapElement(int *heap, int heapLength)
|
|
+{
|
|
+ int result = heap[0];
|
|
+ int val;
|
|
+ int i;
|
|
+
|
|
+ if (--heapLength <= 0)
|
|
+ return result;
|
|
+ val = heap[heapLength]; /* value that must be reinserted */
|
|
+ i = 0; /* i is where the "hole" is */
|
|
+ for (;;)
|
|
+ {
|
|
+ int j = 2 * i + 1;
|
|
+
|
|
+ if (j >= heapLength)
|
|
+ break;
|
|
+ if (j + 1 < heapLength &&
|
|
+ heap[j] < heap[j + 1])
|
|
+ j++;
|
|
+ if (val >= heap[j])
|
|
+ break;
|
|
+ heap[i] = heap[j];
|
|
+ i = j;
|
|
+ }
|
|
+ heap[i] = val;
|
|
+ return result;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * findDependencyLoops - identify loops in TopoSort's failure output,
|
|
+ * and pass each such loop to repairDependencyLoop() for action
|
|
+ *
|
|
+ * In general there may be many loops in the set of objects returned by
|
|
+ * TopoSort; for speed we should try to repair as many loops as we can
|
|
+ * before trying TopoSort again. We can safely repair loops that are
|
|
+ * disjoint (have no members in common); if we find overlapping loops
|
|
+ * then we repair only the first one found, because the action taken to
|
|
+ * repair the first might have repaired the other as well. (If not,
|
|
+ * we'll fix it on the next go-round.)
|
|
+ *
|
|
+ * objs[] lists the objects TopoSort couldn't sort
|
|
+ * nObjs is the number of such objects
|
|
+ * totObjs is the total number of objects in the universe
|
|
+ */
|
|
+static void
|
|
+findDependencyLoops(DumpableObject **objs, int nObjs, int totObjs)
|
|
+{
|
|
+ /*
|
|
+ * We use three data structures here:
|
|
+ *
|
|
+ * processed[] is a bool array indexed by dump ID, marking the objects
|
|
+ * already processed during this invocation of findDependencyLoops().
|
|
+ *
|
|
+ * searchFailed[] is another array indexed by dump ID. searchFailed[j] is
|
|
+ * set to dump ID k if we have proven that there is no dependency path
|
|
+ * leading from object j back to start point k. This allows us to skip
|
|
+ * useless searching when there are multiple dependency paths from k to j,
|
|
+ * which is a common situation. We could use a simple bool array for
|
|
+ * this, but then we'd need to re-zero it for each start point, resulting
|
|
+ * in O(N^2) zeroing work. Using the start point's dump ID as the "true"
|
|
+ * value lets us skip clearing the array before we consider the next start
|
|
+ * point.
|
|
+ *
|
|
+ * workspace[] is an array of DumpableObject pointers, in which we try to
|
|
+ * build lists of objects constituting loops. We make workspace[] large
|
|
+ * enough to hold all the objects in TopoSort's output, which is huge
|
|
+ * overkill in most cases but could theoretically be necessary if there is
|
|
+ * a single dependency chain linking all the objects.
|
|
+ */
|
|
+ bool *processed;
|
|
+ DumpId *searchFailed;
|
|
+ DumpableObject **workspace;
|
|
+ bool fixedloop;
|
|
+ int i;
|
|
+
|
|
+ processed = (bool *) pg_malloc0((getMaxDumpId() + 1) * sizeof(bool));
|
|
+ searchFailed = (DumpId *) pg_malloc0((getMaxDumpId() + 1) * sizeof(DumpId));
|
|
+ workspace = (DumpableObject **) pg_malloc(totObjs * sizeof(DumpableObject *));
|
|
+ fixedloop = false;
|
|
+
|
|
+ for (i = 0; i < nObjs; i++)
|
|
+ {
|
|
+ DumpableObject *obj = objs[i];
|
|
+ int looplen;
|
|
+ int j;
|
|
+
|
|
+ looplen = findLoop(obj,
|
|
+ obj->dumpId,
|
|
+ processed,
|
|
+ searchFailed,
|
|
+ workspace,
|
|
+ 0);
|
|
+
|
|
+ if (looplen > 0)
|
|
+ {
|
|
+ /* Found a loop, repair it */
|
|
+ repairDependencyLoop(workspace, looplen);
|
|
+ fixedloop = true;
|
|
+ /* Mark loop members as processed */
|
|
+ for (j = 0; j < looplen; j++)
|
|
+ processed[workspace[j]->dumpId] = true;
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ /*
|
|
+ * There's no loop starting at this object, but mark it processed
|
|
+ * anyway. This is not necessary for correctness, but saves later
|
|
+ * invocations of findLoop() from uselessly chasing references to
|
|
+ * such an object.
|
|
+ */
|
|
+ processed[obj->dumpId] = true;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* We'd better have fixed at least one loop */
|
|
+ if (!fixedloop)
|
|
+ exit_horribly(modulename, "could not identify dependency loop\n");
|
|
+
|
|
+ free(workspace);
|
|
+ free(searchFailed);
|
|
+ free(processed);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Recursively search for a circular dependency loop that doesn't include
|
|
+ * any already-processed objects.
|
|
+ *
|
|
+ * obj: object we are examining now
|
|
+ * startPoint: dumpId of starting object for the hoped-for circular loop
|
|
+ * processed[]: flag array marking already-processed objects
|
|
+ * searchFailed[]: flag array marking already-unsuccessfully-visited objects
|
|
+ * workspace[]: work array in which we are building list of loop members
|
|
+ * depth: number of valid entries in workspace[] at call
|
|
+ *
|
|
+ * On success, the length of the loop is returned, and workspace[] is filled
|
|
+ * with pointers to the members of the loop. On failure, we return 0.
|
|
+ *
|
|
+ * Note: it is possible that the given starting object is a member of more
|
|
+ * than one cycle; if so, we will find an arbitrary one of the cycles.
|
|
+ */
|
|
+static int
|
|
+findLoop(DumpableObject *obj,
|
|
+ DumpId startPoint,
|
|
+ bool *processed,
|
|
+ DumpId *searchFailed,
|
|
+ DumpableObject **workspace,
|
|
+ int depth)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ /*
|
|
+ * Reject if obj is already processed. This test prevents us from finding
|
|
+ * loops that overlap previously-processed loops.
|
|
+ */
|
|
+ if (processed[obj->dumpId])
|
|
+ return 0;
|
|
+
|
|
+ /*
|
|
+ * If we've already proven there is no path from this object back to the
|
|
+ * startPoint, forget it.
|
|
+ */
|
|
+ if (searchFailed[obj->dumpId] == startPoint)
|
|
+ return 0;
|
|
+
|
|
+ /*
|
|
+ * Reject if obj is already present in workspace. This test prevents us
|
|
+ * from going into infinite recursion if we are given a startPoint object
|
|
+ * that links to a cycle it's not a member of, and it guarantees that we
|
|
+ * can't overflow the allocated size of workspace[].
|
|
+ */
|
|
+ for (i = 0; i < depth; i++)
|
|
+ {
|
|
+ if (workspace[i] == obj)
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Okay, tentatively add obj to workspace
|
|
+ */
|
|
+ workspace[depth++] = obj;
|
|
+
|
|
+ /*
|
|
+ * See if we've found a loop back to the desired startPoint; if so, done
|
|
+ */
|
|
+ for (i = 0; i < obj->nDeps; i++)
|
|
+ {
|
|
+ if (obj->dependencies[i] == startPoint)
|
|
+ return depth;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Recurse down each outgoing branch
|
|
+ */
|
|
+ for (i = 0; i < obj->nDeps; i++)
|
|
+ {
|
|
+ DumpableObject *nextobj = findObjectByDumpId(obj->dependencies[i]);
|
|
+ int newDepth;
|
|
+
|
|
+ if (!nextobj)
|
|
+ continue; /* ignore dependencies on undumped objects */
|
|
+ newDepth = findLoop(nextobj,
|
|
+ startPoint,
|
|
+ processed,
|
|
+ searchFailed,
|
|
+ workspace,
|
|
+ depth);
|
|
+ if (newDepth > 0)
|
|
+ return newDepth;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Remember there is no path from here back to startPoint
|
|
+ */
|
|
+ searchFailed[obj->dumpId] = startPoint;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * A user-defined datatype will have a dependency loop with each of its
|
|
+ * I/O functions (since those have the datatype as input or output).
|
|
+ * Similarly, a range type will have a loop with its canonicalize function,
|
|
+ * if any. Break the loop by making the function depend on the associated
|
|
+ * shell type, instead.
|
|
+ */
|
|
+static void
|
|
+repairTypeFuncLoop(DumpableObject *typeobj, DumpableObject *funcobj)
|
|
+{
|
|
+ TypeInfo *typeInfo = (TypeInfo *) typeobj;
|
|
+
|
|
+ /* remove function's dependency on type */
|
|
+ removeObjectDependency(funcobj, typeobj->dumpId);
|
|
+
|
|
+ /* add function's dependency on shell type, instead */
|
|
+ if (typeInfo->shellType)
|
|
+ {
|
|
+ addObjectDependency(funcobj, typeInfo->shellType->dobj.dumpId);
|
|
+ /* Mark shell type as to be dumped if any such function is */
|
|
+ if (funcobj->dump)
|
|
+ typeInfo->shellType->dobj.dump = true;
|
|
+ }
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Because we force a view to depend on its ON SELECT rule, while there
|
|
+ * will be an implicit dependency in the other direction, we need to break
|
|
+ * the loop. If there are no other objects in the loop then we can remove
|
|
+ * the implicit dependency and leave the ON SELECT rule non-separate.
|
|
+ * This applies to matviews, as well.
|
|
+ */
|
|
+static void
|
|
+repairViewRuleLoop(DumpableObject *viewobj,
|
|
+ DumpableObject *ruleobj)
|
|
+{
|
|
+ /* remove rule's dependency on view */
|
|
+ removeObjectDependency(ruleobj, viewobj->dumpId);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * However, if there are other objects in the loop, we must break the loop
|
|
+ * by making the ON SELECT rule a separately-dumped object.
|
|
+ *
|
|
+ * Because findLoop() finds shorter cycles before longer ones, it's likely
|
|
+ * that we will have previously fired repairViewRuleLoop() and removed the
|
|
+ * rule's dependency on the view. Put it back to ensure the rule won't be
|
|
+ * emitted before the view.
|
|
+ *
|
|
+ * Note: this approach does *not* work for matviews, at the moment.
|
|
+ */
|
|
+static void
|
|
+repairViewRuleMultiLoop(DumpableObject *viewobj,
|
|
+ DumpableObject *ruleobj)
|
|
+{
|
|
+ TableInfo *viewinfo = (TableInfo *) viewobj;
|
|
+ RuleInfo *ruleinfo = (RuleInfo *) ruleobj;
|
|
+
|
|
+ /* remove view's dependency on rule */
|
|
+ removeObjectDependency(viewobj, ruleobj->dumpId);
|
|
+ /* pretend view is a plain table and dump it that way */
|
|
+ viewinfo->relkind = 'r'; /* RELKIND_RELATION */
|
|
+ /* mark rule as needing its own dump */
|
|
+ ruleinfo->separate = true;
|
|
+ /* move any reloptions from view to rule */
|
|
+ if (viewinfo->reloptions)
|
|
+ {
|
|
+ ruleinfo->reloptions = viewinfo->reloptions;
|
|
+ viewinfo->reloptions = NULL;
|
|
+ }
|
|
+ /* put back rule's dependency on view */
|
|
+ addObjectDependency(ruleobj, viewobj->dumpId);
|
|
+ /* now that rule is separate, it must be post-data */
|
|
+ addObjectDependency(ruleobj, postDataBoundId);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * If a matview is involved in a multi-object loop, we can't currently fix
|
|
+ * that by splitting off the rule. As a stopgap, we try to fix it by
|
|
+ * dropping the constraint that the matview be dumped in the pre-data section.
|
|
+ * This is sufficient to handle cases where a matview depends on some unique
|
|
+ * index, as can happen if it has a GROUP BY for example.
|
|
+ *
|
|
+ * Note that the "next object" is not necessarily the matview itself;
|
|
+ * it could be the matview's rowtype, for example. We may come through here
|
|
+ * several times while removing all the pre-data linkages.
|
|
+ */
|
|
+static void
|
|
+repairMatViewBoundaryMultiLoop(DumpableObject *matviewobj,
|
|
+ DumpableObject *boundaryobj,
|
|
+ DumpableObject *nextobj)
|
|
+{
|
|
+ TableInfo *matviewinfo = (TableInfo *) matviewobj;
|
|
+
|
|
+ /* remove boundary's dependency on object after it in loop */
|
|
+ removeObjectDependency(boundaryobj, nextobj->dumpId);
|
|
+ /* mark matview as postponed into post-data section */
|
|
+ matviewinfo->postponed_def = true;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Because we make tables depend on their CHECK constraints, while there
|
|
+ * will be an automatic dependency in the other direction, we need to break
|
|
+ * the loop. If there are no other objects in the loop then we can remove
|
|
+ * the automatic dependency and leave the CHECK constraint non-separate.
|
|
+ */
|
|
+static void
|
|
+repairTableConstraintLoop(DumpableObject *tableobj,
|
|
+ DumpableObject *constraintobj)
|
|
+{
|
|
+ /* remove constraint's dependency on table */
|
|
+ removeObjectDependency(constraintobj, tableobj->dumpId);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * However, if there are other objects in the loop, we must break the loop
|
|
+ * by making the CHECK constraint a separately-dumped object.
|
|
+ *
|
|
+ * Because findLoop() finds shorter cycles before longer ones, it's likely
|
|
+ * that we will have previously fired repairTableConstraintLoop() and
|
|
+ * removed the constraint's dependency on the table. Put it back to ensure
|
|
+ * the constraint won't be emitted before the table...
|
|
+ */
|
|
+static void
|
|
+repairTableConstraintMultiLoop(DumpableObject *tableobj,
|
|
+ DumpableObject *constraintobj)
|
|
+{
|
|
+ /* remove table's dependency on constraint */
|
|
+ removeObjectDependency(tableobj, constraintobj->dumpId);
|
|
+ /* mark constraint as needing its own dump */
|
|
+ ((ConstraintInfo *) constraintobj)->separate = true;
|
|
+ /* put back constraint's dependency on table */
|
|
+ addObjectDependency(constraintobj, tableobj->dumpId);
|
|
+ /* now that constraint is separate, it must be post-data */
|
|
+ addObjectDependency(constraintobj, postDataBoundId);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Attribute defaults behave exactly the same as CHECK constraints...
|
|
+ */
|
|
+static void
|
|
+repairTableAttrDefLoop(DumpableObject *tableobj,
|
|
+ DumpableObject *attrdefobj)
|
|
+{
|
|
+ /* remove attrdef's dependency on table */
|
|
+ removeObjectDependency(attrdefobj, tableobj->dumpId);
|
|
+}
|
|
+
|
|
+static void
|
|
+repairTableAttrDefMultiLoop(DumpableObject *tableobj,
|
|
+ DumpableObject *attrdefobj)
|
|
+{
|
|
+ /* remove table's dependency on attrdef */
|
|
+ removeObjectDependency(tableobj, attrdefobj->dumpId);
|
|
+ /* mark attrdef as needing its own dump */
|
|
+ ((AttrDefInfo *) attrdefobj)->separate = true;
|
|
+ /* put back attrdef's dependency on table */
|
|
+ addObjectDependency(attrdefobj, tableobj->dumpId);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * CHECK constraints on domains work just like those on tables ...
|
|
+ */
|
|
+static void
|
|
+repairDomainConstraintLoop(DumpableObject *domainobj,
|
|
+ DumpableObject *constraintobj)
|
|
+{
|
|
+ /* remove constraint's dependency on domain */
|
|
+ removeObjectDependency(constraintobj, domainobj->dumpId);
|
|
+}
|
|
+
|
|
+static void
|
|
+repairDomainConstraintMultiLoop(DumpableObject *domainobj,
|
|
+ DumpableObject *constraintobj)
|
|
+{
|
|
+ /* remove domain's dependency on constraint */
|
|
+ removeObjectDependency(domainobj, constraintobj->dumpId);
|
|
+ /* mark constraint as needing its own dump */
|
|
+ ((ConstraintInfo *) constraintobj)->separate = true;
|
|
+ /* put back constraint's dependency on domain */
|
|
+ addObjectDependency(constraintobj, domainobj->dumpId);
|
|
+ /* now that constraint is separate, it must be post-data */
|
|
+ addObjectDependency(constraintobj, postDataBoundId);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Fix a dependency loop, or die trying ...
|
|
+ *
|
|
+ * This routine is mainly concerned with reducing the multiple ways that
|
|
+ * a loop might appear to common cases, which it passes off to the
|
|
+ * "fixer" routines above.
|
|
+ */
|
|
+static void
|
|
+repairDependencyLoop(DumpableObject **loop,
|
|
+ int nLoop)
|
|
+{
|
|
+ int i,
|
|
+ j;
|
|
+
|
|
+ /* Datatype and one of its I/O or canonicalize functions */
|
|
+ if (nLoop == 2 &&
|
|
+ loop[0]->objType == DO_TYPE &&
|
|
+ loop[1]->objType == DO_FUNC)
|
|
+ {
|
|
+ repairTypeFuncLoop(loop[0], loop[1]);
|
|
+ return;
|
|
+ }
|
|
+ if (nLoop == 2 &&
|
|
+ loop[1]->objType == DO_TYPE &&
|
|
+ loop[0]->objType == DO_FUNC)
|
|
+ {
|
|
+ repairTypeFuncLoop(loop[1], loop[0]);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ /* View (including matview) and its ON SELECT rule */
|
|
+ if (nLoop == 2 &&
|
|
+ loop[0]->objType == DO_TABLE &&
|
|
+ loop[1]->objType == DO_RULE &&
|
|
+ (((TableInfo *) loop[0])->relkind == 'v' || /* RELKIND_VIEW */
|
|
+ ((TableInfo *) loop[0])->relkind == 'm') && /* RELKIND_MATVIEW */
|
|
+ ((RuleInfo *) loop[1])->ev_type == '1' &&
|
|
+ ((RuleInfo *) loop[1])->is_instead &&
|
|
+ ((RuleInfo *) loop[1])->ruletable == (TableInfo *) loop[0])
|
|
+ {
|
|
+ repairViewRuleLoop(loop[0], loop[1]);
|
|
+ return;
|
|
+ }
|
|
+ if (nLoop == 2 &&
|
|
+ loop[1]->objType == DO_TABLE &&
|
|
+ loop[0]->objType == DO_RULE &&
|
|
+ (((TableInfo *) loop[1])->relkind == 'v' || /* RELKIND_VIEW */
|
|
+ ((TableInfo *) loop[1])->relkind == 'm') && /* RELKIND_MATVIEW */
|
|
+ ((RuleInfo *) loop[0])->ev_type == '1' &&
|
|
+ ((RuleInfo *) loop[0])->is_instead &&
|
|
+ ((RuleInfo *) loop[0])->ruletable == (TableInfo *) loop[1])
|
|
+ {
|
|
+ repairViewRuleLoop(loop[1], loop[0]);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ /* Indirect loop involving view (but not matview) and ON SELECT rule */
|
|
+ if (nLoop > 2)
|
|
+ {
|
|
+ for (i = 0; i < nLoop; i++)
|
|
+ {
|
|
+ if (loop[i]->objType == DO_TABLE &&
|
|
+ ((TableInfo *) loop[i])->relkind == 'v') /* RELKIND_VIEW */
|
|
+ {
|
|
+ for (j = 0; j < nLoop; j++)
|
|
+ {
|
|
+ if (loop[j]->objType == DO_RULE &&
|
|
+ ((RuleInfo *) loop[j])->ev_type == '1' &&
|
|
+ ((RuleInfo *) loop[j])->is_instead &&
|
|
+ ((RuleInfo *) loop[j])->ruletable == (TableInfo *) loop[i])
|
|
+ {
|
|
+ repairViewRuleMultiLoop(loop[i], loop[j]);
|
|
+ return;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* Indirect loop involving matview and data boundary */
|
|
+ if (nLoop > 2)
|
|
+ {
|
|
+ for (i = 0; i < nLoop; i++)
|
|
+ {
|
|
+ if (loop[i]->objType == DO_TABLE &&
|
|
+ ((TableInfo *) loop[i])->relkind == 'm') /* RELKIND_MATVIEW */
|
|
+ {
|
|
+ for (j = 0; j < nLoop; j++)
|
|
+ {
|
|
+ if (loop[j]->objType == DO_PRE_DATA_BOUNDARY)
|
|
+ {
|
|
+ DumpableObject *nextobj;
|
|
+
|
|
+ nextobj = (j < nLoop - 1) ? loop[j + 1] : loop[0];
|
|
+ repairMatViewBoundaryMultiLoop(loop[i], loop[j],
|
|
+ nextobj);
|
|
+ return;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* Table and CHECK constraint */
|
|
+ if (nLoop == 2 &&
|
|
+ loop[0]->objType == DO_TABLE &&
|
|
+ loop[1]->objType == DO_CONSTRAINT &&
|
|
+ ((ConstraintInfo *) loop[1])->contype == 'c' &&
|
|
+ ((ConstraintInfo *) loop[1])->contable == (TableInfo *) loop[0])
|
|
+ {
|
|
+ repairTableConstraintLoop(loop[0], loop[1]);
|
|
+ return;
|
|
+ }
|
|
+ if (nLoop == 2 &&
|
|
+ loop[1]->objType == DO_TABLE &&
|
|
+ loop[0]->objType == DO_CONSTRAINT &&
|
|
+ ((ConstraintInfo *) loop[0])->contype == 'c' &&
|
|
+ ((ConstraintInfo *) loop[0])->contable == (TableInfo *) loop[1])
|
|
+ {
|
|
+ repairTableConstraintLoop(loop[1], loop[0]);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ /* Indirect loop involving table and CHECK constraint */
|
|
+ if (nLoop > 2)
|
|
+ {
|
|
+ for (i = 0; i < nLoop; i++)
|
|
+ {
|
|
+ if (loop[i]->objType == DO_TABLE)
|
|
+ {
|
|
+ for (j = 0; j < nLoop; j++)
|
|
+ {
|
|
+ if (loop[j]->objType == DO_CONSTRAINT &&
|
|
+ ((ConstraintInfo *) loop[j])->contype == 'c' &&
|
|
+ ((ConstraintInfo *) loop[j])->contable == (TableInfo *) loop[i])
|
|
+ {
|
|
+ repairTableConstraintMultiLoop(loop[i], loop[j]);
|
|
+ return;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* Table and attribute default */
|
|
+ if (nLoop == 2 &&
|
|
+ loop[0]->objType == DO_TABLE &&
|
|
+ loop[1]->objType == DO_ATTRDEF &&
|
|
+ ((AttrDefInfo *) loop[1])->adtable == (TableInfo *) loop[0])
|
|
+ {
|
|
+ repairTableAttrDefLoop(loop[0], loop[1]);
|
|
+ return;
|
|
+ }
|
|
+ if (nLoop == 2 &&
|
|
+ loop[1]->objType == DO_TABLE &&
|
|
+ loop[0]->objType == DO_ATTRDEF &&
|
|
+ ((AttrDefInfo *) loop[0])->adtable == (TableInfo *) loop[1])
|
|
+ {
|
|
+ repairTableAttrDefLoop(loop[1], loop[0]);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ /* Indirect loop involving table and attribute default */
|
|
+ if (nLoop > 2)
|
|
+ {
|
|
+ for (i = 0; i < nLoop; i++)
|
|
+ {
|
|
+ if (loop[i]->objType == DO_TABLE)
|
|
+ {
|
|
+ for (j = 0; j < nLoop; j++)
|
|
+ {
|
|
+ if (loop[j]->objType == DO_ATTRDEF &&
|
|
+ ((AttrDefInfo *) loop[j])->adtable == (TableInfo *) loop[i])
|
|
+ {
|
|
+ repairTableAttrDefMultiLoop(loop[i], loop[j]);
|
|
+ return;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* Domain and CHECK constraint */
|
|
+ if (nLoop == 2 &&
|
|
+ loop[0]->objType == DO_TYPE &&
|
|
+ loop[1]->objType == DO_CONSTRAINT &&
|
|
+ ((ConstraintInfo *) loop[1])->contype == 'c' &&
|
|
+ ((ConstraintInfo *) loop[1])->condomain == (TypeInfo *) loop[0])
|
|
+ {
|
|
+ repairDomainConstraintLoop(loop[0], loop[1]);
|
|
+ return;
|
|
+ }
|
|
+ if (nLoop == 2 &&
|
|
+ loop[1]->objType == DO_TYPE &&
|
|
+ loop[0]->objType == DO_CONSTRAINT &&
|
|
+ ((ConstraintInfo *) loop[0])->contype == 'c' &&
|
|
+ ((ConstraintInfo *) loop[0])->condomain == (TypeInfo *) loop[1])
|
|
+ {
|
|
+ repairDomainConstraintLoop(loop[1], loop[0]);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ /* Indirect loop involving domain and CHECK constraint */
|
|
+ if (nLoop > 2)
|
|
+ {
|
|
+ for (i = 0; i < nLoop; i++)
|
|
+ {
|
|
+ if (loop[i]->objType == DO_TYPE)
|
|
+ {
|
|
+ for (j = 0; j < nLoop; j++)
|
|
+ {
|
|
+ if (loop[j]->objType == DO_CONSTRAINT &&
|
|
+ ((ConstraintInfo *) loop[j])->contype == 'c' &&
|
|
+ ((ConstraintInfo *) loop[j])->condomain == (TypeInfo *) loop[i])
|
|
+ {
|
|
+ repairDomainConstraintMultiLoop(loop[i], loop[j]);
|
|
+ return;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * If all the objects are TABLE_DATA items, what we must have is a
|
|
+ * circular set of foreign key constraints (or a single self-referential
|
|
+ * table). Print an appropriate complaint and break the loop arbitrarily.
|
|
+ */
|
|
+ for (i = 0; i < nLoop; i++)
|
|
+ {
|
|
+ if (loop[i]->objType != DO_TABLE_DATA)
|
|
+ break;
|
|
+ }
|
|
+ if (i >= nLoop)
|
|
+ {
|
|
+ write_msg(NULL, "NOTICE: there are circular foreign-key constraints among these table(s):\n");
|
|
+ for (i = 0; i < nLoop; i++)
|
|
+ write_msg(NULL, " %s\n", loop[i]->name);
|
|
+ write_msg(NULL, "You might not be able to restore the dump without using --disable-triggers or temporarily dropping the constraints.\n");
|
|
+ write_msg(NULL, "Consider using a full dump instead of a --data-only dump to avoid this problem.\n");
|
|
+ if (nLoop > 1)
|
|
+ removeObjectDependency(loop[0], loop[1]->dumpId);
|
|
+ else /* must be a self-dependency */
|
|
+ removeObjectDependency(loop[0], loop[0]->dumpId);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * If we can't find a principled way to break the loop, complain and break
|
|
+ * it in an arbitrary fashion.
|
|
+ */
|
|
+ write_msg(modulename, "WARNING: could not resolve dependency loop among these items:\n");
|
|
+ for (i = 0; i < nLoop; i++)
|
|
+ {
|
|
+ char buf[1024];
|
|
+
|
|
+ describeDumpableObject(loop[i], buf, sizeof(buf));
|
|
+ write_msg(modulename, " %s\n", buf);
|
|
+ }
|
|
+
|
|
+ if (nLoop > 1)
|
|
+ removeObjectDependency(loop[0], loop[1]->dumpId);
|
|
+ else /* must be a self-dependency */
|
|
+ removeObjectDependency(loop[0], loop[0]->dumpId);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Describe a dumpable object usefully for errors
|
|
+ *
|
|
+ * This should probably go somewhere else...
|
|
+ */
|
|
+static void
|
|
+describeDumpableObject(DumpableObject *obj, char *buf, int bufsize)
|
|
+{
|
|
+ switch (obj->objType)
|
|
+ {
|
|
+ case DO_NAMESPACE:
|
|
+ snprintf(buf, bufsize,
|
|
+ "SCHEMA %s (ID %d OID %u)",
|
|
+ obj->name, obj->dumpId, obj->catId.oid);
|
|
+ return;
|
|
+ case DO_EXTENSION:
|
|
+ snprintf(buf, bufsize,
|
|
+ "EXTENSION %s (ID %d OID %u)",
|
|
+ obj->name, obj->dumpId, obj->catId.oid);
|
|
+ return;
|
|
+ case DO_TYPE:
|
|
+ snprintf(buf, bufsize,
|
|
+ "TYPE %s (ID %d OID %u)",
|
|
+ obj->name, obj->dumpId, obj->catId.oid);
|
|
+ return;
|
|
+ case DO_SHELL_TYPE:
|
|
+ snprintf(buf, bufsize,
|
|
+ "SHELL TYPE %s (ID %d OID %u)",
|
|
+ obj->name, obj->dumpId, obj->catId.oid);
|
|
+ return;
|
|
+ case DO_FUNC:
|
|
+ snprintf(buf, bufsize,
|
|
+ "FUNCTION %s (ID %d OID %u)",
|
|
+ obj->name, obj->dumpId, obj->catId.oid);
|
|
+ return;
|
|
+ case DO_AGG:
|
|
+ snprintf(buf, bufsize,
|
|
+ "AGGREGATE %s (ID %d OID %u)",
|
|
+ obj->name, obj->dumpId, obj->catId.oid);
|
|
+ return;
|
|
+ case DO_OPERATOR:
|
|
+ snprintf(buf, bufsize,
|
|
+ "OPERATOR %s (ID %d OID %u)",
|
|
+ obj->name, obj->dumpId, obj->catId.oid);
|
|
+ return;
|
|
+ case DO_OPCLASS:
|
|
+ snprintf(buf, bufsize,
|
|
+ "OPERATOR CLASS %s (ID %d OID %u)",
|
|
+ obj->name, obj->dumpId, obj->catId.oid);
|
|
+ return;
|
|
+ case DO_OPFAMILY:
|
|
+ snprintf(buf, bufsize,
|
|
+ "OPERATOR FAMILY %s (ID %d OID %u)",
|
|
+ obj->name, obj->dumpId, obj->catId.oid);
|
|
+ return;
|
|
+ case DO_COLLATION:
|
|
+ snprintf(buf, bufsize,
|
|
+ "COLLATION %s (ID %d OID %u)",
|
|
+ obj->name, obj->dumpId, obj->catId.oid);
|
|
+ return;
|
|
+ case DO_CONVERSION:
|
|
+ snprintf(buf, bufsize,
|
|
+ "CONVERSION %s (ID %d OID %u)",
|
|
+ obj->name, obj->dumpId, obj->catId.oid);
|
|
+ return;
|
|
+ case DO_TABLE:
|
|
+ snprintf(buf, bufsize,
|
|
+ "TABLE %s (ID %d OID %u)",
|
|
+ obj->name, obj->dumpId, obj->catId.oid);
|
|
+ return;
|
|
+ case DO_ATTRDEF:
|
|
+ snprintf(buf, bufsize,
|
|
+ "ATTRDEF %s.%s (ID %d OID %u)",
|
|
+ ((AttrDefInfo *) obj)->adtable->dobj.name,
|
|
+ ((AttrDefInfo *) obj)->adtable->attnames[((AttrDefInfo *) obj)->adnum - 1],
|
|
+ obj->dumpId, obj->catId.oid);
|
|
+ return;
|
|
+ case DO_INDEX:
|
|
+ snprintf(buf, bufsize,
|
|
+ "INDEX %s (ID %d OID %u)",
|
|
+ obj->name, obj->dumpId, obj->catId.oid);
|
|
+ return;
|
|
+ case DO_REFRESH_MATVIEW:
|
|
+ snprintf(buf, bufsize,
|
|
+ "REFRESH MATERIALIZED VIEW %s (ID %d OID %u)",
|
|
+ obj->name, obj->dumpId, obj->catId.oid);
|
|
+ return;
|
|
+ case DO_RULE:
|
|
+ snprintf(buf, bufsize,
|
|
+ "RULE %s (ID %d OID %u)",
|
|
+ obj->name, obj->dumpId, obj->catId.oid);
|
|
+ return;
|
|
+ case DO_TRIGGER:
|
|
+ snprintf(buf, bufsize,
|
|
+ "TRIGGER %s (ID %d OID %u)",
|
|
+ obj->name, obj->dumpId, obj->catId.oid);
|
|
+ return;
|
|
+ case DO_EVENT_TRIGGER:
|
|
+ snprintf(buf, bufsize,
|
|
+ "EVENT TRIGGER %s (ID %d OID %u)",
|
|
+ obj->name, obj->dumpId, obj->catId.oid);
|
|
+ return;
|
|
+ case DO_CONSTRAINT:
|
|
+ snprintf(buf, bufsize,
|
|
+ "CONSTRAINT %s (ID %d OID %u)",
|
|
+ obj->name, obj->dumpId, obj->catId.oid);
|
|
+ return;
|
|
+ case DO_FK_CONSTRAINT:
|
|
+ snprintf(buf, bufsize,
|
|
+ "FK CONSTRAINT %s (ID %d OID %u)",
|
|
+ obj->name, obj->dumpId, obj->catId.oid);
|
|
+ return;
|
|
+ case DO_PROCLANG:
|
|
+ snprintf(buf, bufsize,
|
|
+ "PROCEDURAL LANGUAGE %s (ID %d OID %u)",
|
|
+ obj->name, obj->dumpId, obj->catId.oid);
|
|
+ return;
|
|
+ case DO_CAST:
|
|
+ snprintf(buf, bufsize,
|
|
+ "CAST %u to %u (ID %d OID %u)",
|
|
+ ((CastInfo *) obj)->castsource,
|
|
+ ((CastInfo *) obj)->casttarget,
|
|
+ obj->dumpId, obj->catId.oid);
|
|
+ return;
|
|
+ case DO_TABLE_DATA:
|
|
+ snprintf(buf, bufsize,
|
|
+ "TABLE DATA %s (ID %d OID %u)",
|
|
+ obj->name, obj->dumpId, obj->catId.oid);
|
|
+ return;
|
|
+ case DO_DUMMY_TYPE:
|
|
+ snprintf(buf, bufsize,
|
|
+ "DUMMY TYPE %s (ID %d OID %u)",
|
|
+ obj->name, obj->dumpId, obj->catId.oid);
|
|
+ return;
|
|
+ case DO_TSPARSER:
|
|
+ snprintf(buf, bufsize,
|
|
+ "TEXT SEARCH PARSER %s (ID %d OID %u)",
|
|
+ obj->name, obj->dumpId, obj->catId.oid);
|
|
+ return;
|
|
+ case DO_TSDICT:
|
|
+ snprintf(buf, bufsize,
|
|
+ "TEXT SEARCH DICTIONARY %s (ID %d OID %u)",
|
|
+ obj->name, obj->dumpId, obj->catId.oid);
|
|
+ return;
|
|
+ case DO_TSTEMPLATE:
|
|
+ snprintf(buf, bufsize,
|
|
+ "TEXT SEARCH TEMPLATE %s (ID %d OID %u)",
|
|
+ obj->name, obj->dumpId, obj->catId.oid);
|
|
+ return;
|
|
+ case DO_TSCONFIG:
|
|
+ snprintf(buf, bufsize,
|
|
+ "TEXT SEARCH CONFIGURATION %s (ID %d OID %u)",
|
|
+ obj->name, obj->dumpId, obj->catId.oid);
|
|
+ return;
|
|
+ case DO_FDW:
|
|
+ snprintf(buf, bufsize,
|
|
+ "FOREIGN DATA WRAPPER %s (ID %d OID %u)",
|
|
+ obj->name, obj->dumpId, obj->catId.oid);
|
|
+ return;
|
|
+ case DO_FOREIGN_SERVER:
|
|
+ snprintf(buf, bufsize,
|
|
+ "FOREIGN SERVER %s (ID %d OID %u)",
|
|
+ obj->name, obj->dumpId, obj->catId.oid);
|
|
+ return;
|
|
+ case DO_DEFAULT_ACL:
|
|
+ snprintf(buf, bufsize,
|
|
+ "DEFAULT ACL %s (ID %d OID %u)",
|
|
+ obj->name, obj->dumpId, obj->catId.oid);
|
|
+ return;
|
|
+ case DO_BLOB:
|
|
+ snprintf(buf, bufsize,
|
|
+ "BLOB (ID %d OID %u)",
|
|
+ obj->dumpId, obj->catId.oid);
|
|
+ return;
|
|
+ case DO_BLOB_DATA:
|
|
+ snprintf(buf, bufsize,
|
|
+ "BLOB DATA (ID %d)",
|
|
+ obj->dumpId);
|
|
+ return;
|
|
+ case DO_PRE_DATA_BOUNDARY:
|
|
+ snprintf(buf, bufsize,
|
|
+ "PRE-DATA BOUNDARY (ID %d)",
|
|
+ obj->dumpId);
|
|
+ return;
|
|
+ case DO_POST_DATA_BOUNDARY:
|
|
+ snprintf(buf, bufsize,
|
|
+ "POST-DATA BOUNDARY (ID %d)",
|
|
+ obj->dumpId);
|
|
+ return;
|
|
+ }
|
|
+ /* shouldn't get here */
|
|
+ snprintf(buf, bufsize,
|
|
+ "object type %d (ID %d OID %u)",
|
|
+ (int) obj->objType,
|
|
+ obj->dumpId, obj->catId.oid);
|
|
+}
|
|
--- /dev/null
|
|
+++ pglogical-2.2.2/pglogical_dump/pgtar.h
|
|
@@ -0,0 +1,17 @@
|
|
+/*-------------------------------------------------------------------------
|
|
+ *
|
|
+ * pgtar.h
|
|
+ * Functions for manipulating tarfile datastructures (src/port/tar.c)
|
|
+ *
|
|
+ *
|
|
+ * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
|
|
+ * Portions Copyright (c) 1994, Regents of the University of California
|
|
+ *
|
|
+ * src/include/pgtar.h
|
|
+ *
|
|
+ *-------------------------------------------------------------------------
|
|
+ */
|
|
+extern void tarCreateHeader(char *h, const char *filename, const char *linktarget,
|
|
+ pgoff_t size, mode_t mode, uid_t uid, gid_t gid, time_t mtime);
|
|
+extern uint64 read_tar_number(const char *s, int len);
|
|
+extern int tarChecksum(char *header);
|
|
--- /dev/null
|
|
+++ pglogical-2.2.2/pglogical_dump/tar.c
|
|
@@ -0,0 +1,196 @@
|
|
+#include "c.h"
|
|
+#include "pgtar.h"
|
|
+#include <sys/stat.h>
|
|
+
|
|
+/*
|
|
+ * Print a numeric field in a tar header. The field starts at *s and is of
|
|
+ * length len; val is the value to be written.
|
|
+ *
|
|
+ * Per POSIX, the way to write a number is in octal with leading zeroes and
|
|
+ * one trailing space (or NUL, but we use space) at the end of the specified
|
|
+ * field width.
|
|
+ *
|
|
+ * However, the given value may not fit in the available space in octal form.
|
|
+ * If that's true, we use the GNU extension of writing \200 followed by the
|
|
+ * number in base-256 form (ie, stored in binary MSB-first). (Note: here we
|
|
+ * support only non-negative numbers, so we don't worry about the GNU rules
|
|
+ * for handling negative numbers.)
|
|
+ */
|
|
+static void
|
|
+print_tar_number(char *s, int len, uint64 val)
|
|
+{
|
|
+ if (val < (((uint64) 1) << ((len - 1) * 3)))
|
|
+ {
|
|
+ /* Use octal with trailing space */
|
|
+ s[--len] = ' ';
|
|
+ while (len)
|
|
+ {
|
|
+ s[--len] = (val & 7) + '0';
|
|
+ val >>= 3;
|
|
+ }
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ /* Use base-256 with leading \200 */
|
|
+ s[0] = '\200';
|
|
+ while (len > 1)
|
|
+ {
|
|
+ s[--len] = (val & 255);
|
|
+ val >>= 8;
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+
|
|
+/*
|
|
+ * Read a numeric field in a tar header. The field starts at *s and is of
|
|
+ * length len.
|
|
+ *
|
|
+ * The POSIX-approved format for a number is octal, ending with a space or
|
|
+ * NUL. However, for values that don't fit, we recognize the GNU extension
|
|
+ * of \200 followed by the number in base-256 form (ie, stored in binary
|
|
+ * MSB-first). (Note: here we support only non-negative numbers, so we don't
|
|
+ * worry about the GNU rules for handling negative numbers.)
|
|
+ */
|
|
+uint64
|
|
+read_tar_number(const char *s, int len)
|
|
+{
|
|
+ uint64 result = 0;
|
|
+
|
|
+ if (*s == '\200')
|
|
+ {
|
|
+ /* base-256 */
|
|
+ while (--len)
|
|
+ {
|
|
+ result <<= 8;
|
|
+ result |= (unsigned char) (*++s);
|
|
+ }
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ /* octal */
|
|
+ while (len-- && *s >= '0' && *s <= '7')
|
|
+ {
|
|
+ result <<= 3;
|
|
+ result |= (*s - '0');
|
|
+ s++;
|
|
+ }
|
|
+ }
|
|
+ return result;
|
|
+}
|
|
+
|
|
+
|
|
+/*
|
|
+ * Calculate the tar checksum for a header. The header is assumed to always
|
|
+ * be 512 bytes, per the tar standard.
|
|
+ */
|
|
+int
|
|
+tarChecksum(char *header)
|
|
+{
|
|
+ int i,
|
|
+ sum;
|
|
+
|
|
+ /*
|
|
+ * Per POSIX, the checksum is the simple sum of all bytes in the header,
|
|
+ * treating the bytes as unsigned, and treating the checksum field (at
|
|
+ * offset 148) as though it contained 8 spaces.
|
|
+ */
|
|
+ sum = 8 * ' '; /* presumed value for checksum field */
|
|
+ for (i = 0; i < 512; i++)
|
|
+ if (i < 148 || i >= 156)
|
|
+ sum += 0xFF & header[i];
|
|
+ return sum;
|
|
+}
|
|
+
|
|
+
|
|
+/*
|
|
+ * Fill in the buffer pointed to by h with a tar format header. This buffer
|
|
+ * must always have space for 512 characters, which is a requirement of
|
|
+ * the tar format.
|
|
+ */
|
|
+void
|
|
+tarCreateHeader(char *h, const char *filename, const char *linktarget,
|
|
+ pgoff_t size, mode_t mode, uid_t uid, gid_t gid, time_t mtime)
|
|
+{
|
|
+ memset(h, 0, 512); /* assume tar header size */
|
|
+
|
|
+ /* Name 100 */
|
|
+ strlcpy(&h[0], filename, 100);
|
|
+ if (linktarget != NULL || S_ISDIR(mode))
|
|
+ {
|
|
+ /*
|
|
+ * We only support symbolic links to directories, and this is
|
|
+ * indicated in the tar format by adding a slash at the end of the
|
|
+ * name, the same as for regular directories.
|
|
+ */
|
|
+ int flen = strlen(filename);
|
|
+
|
|
+ flen = Min(flen, 99);
|
|
+ h[flen] = '/';
|
|
+ h[flen + 1] = '\0';
|
|
+ }
|
|
+
|
|
+ /* Mode 8 - this doesn't include the file type bits (S_IFMT) */
|
|
+ print_tar_number(&h[100], 8, (mode & 07777));
|
|
+
|
|
+ /* User ID 8 */
|
|
+ print_tar_number(&h[108], 8, uid);
|
|
+
|
|
+ /* Group 8 */
|
|
+ print_tar_number(&h[116], 8, gid);
|
|
+
|
|
+ /* File size 12 */
|
|
+ if (linktarget != NULL || S_ISDIR(mode))
|
|
+ /* Symbolic link or directory has size zero */
|
|
+ print_tar_number(&h[124], 12, 0);
|
|
+ else
|
|
+ print_tar_number(&h[124], 12, size);
|
|
+
|
|
+ /* Mod Time 12 */
|
|
+ print_tar_number(&h[136], 12, mtime);
|
|
+
|
|
+ /* Checksum 8 cannot be calculated until we've filled all other fields */
|
|
+
|
|
+ if (linktarget != NULL)
|
|
+ {
|
|
+ /* Type - Symbolic link */
|
|
+ h[156] = '2';
|
|
+ /* Link Name 100 */
|
|
+ strlcpy(&h[157], linktarget, 100);
|
|
+ }
|
|
+ else if (S_ISDIR(mode))
|
|
+ {
|
|
+ /* Type - directory */
|
|
+ h[156] = '5';
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ /* Type - regular file */
|
|
+ h[156] = '0';
|
|
+ }
|
|
+
|
|
+ /* Magic 6 */
|
|
+ strcpy(&h[257], "ustar");
|
|
+
|
|
+ /* Version 2 */
|
|
+ memcpy(&h[263], "00", 2);
|
|
+
|
|
+ /* User 32 */
|
|
+ /* XXX: Do we need to care about setting correct username? */
|
|
+ strlcpy(&h[265], "postgres", 32);
|
|
+
|
|
+ /* Group 32 */
|
|
+ /* XXX: Do we need to care about setting correct group name? */
|
|
+ strlcpy(&h[297], "postgres", 32);
|
|
+
|
|
+ /* Major Dev 8 */
|
|
+ print_tar_number(&h[329], 8, 0);
|
|
+
|
|
+ /* Minor Dev 8 */
|
|
+ print_tar_number(&h[337], 8, 0);
|
|
+
|
|
+ /* Prefix 155 - not used, leave as nulls */
|
|
+
|
|
+ /* Finally, compute and insert the checksum */
|
|
+ print_tar_number(&h[148], 8, tarChecksum(h));
|
|
+}
|