diff options
| author | Bruce Momjian <bruce@momjian.us> | 2017-05-17 16:31:56 -0400 |
|---|---|---|
| committer | Bruce Momjian <bruce@momjian.us> | 2017-05-17 16:31:56 -0400 |
| commit | a6fd7b7a5f7bf3a8aa3f3d076cf09d922c1c6dd2 (patch) | |
| tree | d10454411c05d459abe06df161ab3c1156c5f477 /src/backend/utils/adt | |
| parent | 8a943324780259757c77c56cfc597347d1150cdb (diff) | |
| download | postgresql-a6fd7b7a5f7bf3a8aa3f3d076cf09d922c1c6dd2.tar.gz | |
Post-PG 10 beta1 pgindent run
perltidy run not included.
Diffstat (limited to 'src/backend/utils/adt')
| -rw-r--r-- | src/backend/utils/adt/cash.c | 12 | ||||
| -rw-r--r-- | src/backend/utils/adt/dbsize.c | 8 | ||||
| -rw-r--r-- | src/backend/utils/adt/formatting.c | 63 | ||||
| -rw-r--r-- | src/backend/utils/adt/genfile.c | 4 | ||||
| -rw-r--r-- | src/backend/utils/adt/json.c | 2 | ||||
| -rw-r--r-- | src/backend/utils/adt/jsonb.c | 2 | ||||
| -rw-r--r-- | src/backend/utils/adt/jsonfuncs.c | 428 | ||||
| -rw-r--r-- | src/backend/utils/adt/like.c | 8 | ||||
| -rw-r--r-- | src/backend/utils/adt/mac.c | 2 | ||||
| -rw-r--r-- | src/backend/utils/adt/mac8.c | 2 | ||||
| -rw-r--r-- | src/backend/utils/adt/pg_locale.c | 44 | ||||
| -rw-r--r-- | src/backend/utils/adt/ruleutils.c | 14 | ||||
| -rw-r--r-- | src/backend/utils/adt/selfuncs.c | 44 | ||||
| -rw-r--r-- | src/backend/utils/adt/txid.c | 16 | ||||
| -rw-r--r-- | src/backend/utils/adt/varlena.c | 35 | ||||
| -rw-r--r-- | src/backend/utils/adt/xml.c | 9 |
16 files changed, 363 insertions, 330 deletions
diff --git a/src/backend/utils/adt/cash.c b/src/backend/utils/adt/cash.c index 5afadb65d1..5cb086e50e 100644 --- a/src/backend/utils/adt/cash.c +++ b/src/backend/utils/adt/cash.c @@ -203,7 +203,7 @@ cash_in(PG_FUNCTION_ARGS) /* than the required number of decimal places */ if (isdigit((unsigned char) *s) && (!seen_dot || dec < fpoint)) { - Cash newvalue = (value * 10) - (*s - '0'); + Cash newvalue = (value * 10) - (*s - '0'); if (newvalue / 10 != value) ereport(ERROR, @@ -230,7 +230,7 @@ cash_in(PG_FUNCTION_ARGS) /* round off if there's another digit */ if (isdigit((unsigned char) *s) && *s >= '5') - value--; /* remember we build the value in the negative */ + value--; /* remember we build the value in the negative */ if (value > 0) ereport(ERROR, @@ -241,7 +241,7 @@ cash_in(PG_FUNCTION_ARGS) /* adjust for less than required decimal places */ for (; dec < fpoint; dec++) { - Cash newvalue = value * 10; + Cash newvalue = value * 10; if (newvalue / 10 != value) ereport(ERROR, @@ -279,8 +279,10 @@ cash_in(PG_FUNCTION_ARGS) "money", str))); } - /* If the value is supposed to be positive, flip the sign, but check for - * the most negative number. */ + /* + * If the value is supposed to be positive, flip the sign, but check for + * the most negative number. + */ if (sgn > 0) { result = -value; diff --git a/src/backend/utils/adt/dbsize.c b/src/backend/utils/adt/dbsize.c index b0418b18dc..f0725860b4 100644 --- a/src/backend/utils/adt/dbsize.c +++ b/src/backend/utils/adt/dbsize.c @@ -90,8 +90,8 @@ calculate_database_size(Oid dbOid) AclResult aclresult; /* - * User must have connect privilege for target database - * or be a member of pg_read_all_stats + * User must have connect privilege for target database or be a member of + * pg_read_all_stats */ aclresult = pg_database_aclcheck(dbOid, GetUserId(), ACL_CONNECT); if (aclresult != ACLCHECK_OK && @@ -180,8 +180,8 @@ calculate_tablespace_size(Oid tblspcOid) /* * User must be a member of pg_read_all_stats or have CREATE privilege for - * target tablespace, either explicitly granted or implicitly because - * it is default for current database. + * target tablespace, either explicitly granted or implicitly because it + * is default for current database. */ if (tblspcOid != MyDatabaseTableSpace && !is_member_of_role(GetUserId(), DEFAULT_ROLE_READ_ALL_STATS)) diff --git a/src/backend/utils/adt/formatting.c b/src/backend/utils/adt/formatting.c index 1e21dd5c68..4127bece12 100644 --- a/src/backend/utils/adt/formatting.c +++ b/src/backend/utils/adt/formatting.c @@ -1449,10 +1449,10 @@ str_numth(char *dest, char *num, int type) #ifdef USE_ICU -typedef int32_t (*ICU_Convert_Func)(UChar *dest, int32_t destCapacity, - const UChar *src, int32_t srcLength, - const char *locale, - UErrorCode *pErrorCode); +typedef int32_t (*ICU_Convert_Func) (UChar *dest, int32_t destCapacity, + const UChar *src, int32_t srcLength, + const char *locale, + UErrorCode *pErrorCode); static int32_t icu_convert_case(ICU_Convert_Func func, pg_locale_t mylocale, @@ -1461,7 +1461,7 @@ icu_convert_case(ICU_Convert_Func func, pg_locale_t mylocale, UErrorCode status; int32_t len_dest; - len_dest = len_source; /* try first with same length */ + len_dest = len_source; /* try first with same length */ *buff_dest = palloc(len_dest * sizeof(**buff_dest)); status = U_ZERO_ERROR; len_dest = func(*buff_dest, len_dest, buff_source, len_source, @@ -1491,7 +1491,7 @@ u_strToTitle_default_BI(UChar *dest, int32_t destCapacity, NULL, locale, pErrorCode); } -#endif /* USE_ICU */ +#endif /* USE_ICU */ /* * If the system provides the needed functions for wide-character manipulation @@ -1592,7 +1592,10 @@ str_tolower(const char *buff, size_t nbytes, Oid collid) workspace[curr_char] = towlower(workspace[curr_char]); } - /* Make result large enough; case change might change number of bytes */ + /* + * Make result large enough; case change might change number + * of bytes + */ result_size = curr_char * pg_database_encoding_max_length() + 1; result = palloc(result_size); @@ -1607,11 +1610,11 @@ str_tolower(const char *buff, size_t nbytes, Oid collid) result = pnstrdup(buff, nbytes); /* - * Note: we assume that tolower_l() will not be so broken as to need - * an isupper_l() guard test. When using the default collation, we - * apply the traditional Postgres behavior that forces ASCII-style - * treatment of I/i, but in non-default collations you get exactly - * what the collation says. + * Note: we assume that tolower_l() will not be so broken as + * to need an isupper_l() guard test. When using the default + * collation, we apply the traditional Postgres behavior that + * forces ASCII-style treatment of I/i, but in non-default + * collations you get exactly what the collation says. */ for (p = result; *p; p++) { @@ -1672,7 +1675,8 @@ str_toupper(const char *buff, size_t nbytes, Oid collid) #ifdef USE_ICU if (mylocale && mylocale->provider == COLLPROVIDER_ICU) { - int32_t len_uchar, len_conv; + int32_t len_uchar, + len_conv; UChar *buff_uchar; UChar *buff_conv; @@ -1711,7 +1715,10 @@ str_toupper(const char *buff, size_t nbytes, Oid collid) workspace[curr_char] = towupper(workspace[curr_char]); } - /* Make result large enough; case change might change number of bytes */ + /* + * Make result large enough; case change might change number + * of bytes + */ result_size = curr_char * pg_database_encoding_max_length() + 1; result = palloc(result_size); @@ -1726,11 +1733,11 @@ str_toupper(const char *buff, size_t nbytes, Oid collid) result = pnstrdup(buff, nbytes); /* - * Note: we assume that toupper_l() will not be so broken as to need - * an islower_l() guard test. When using the default collation, we - * apply the traditional Postgres behavior that forces ASCII-style - * treatment of I/i, but in non-default collations you get exactly - * what the collation says. + * Note: we assume that toupper_l() will not be so broken as + * to need an islower_l() guard test. When using the default + * collation, we apply the traditional Postgres behavior that + * forces ASCII-style treatment of I/i, but in non-default + * collations you get exactly what the collation says. */ for (p = result; *p; p++) { @@ -1792,7 +1799,8 @@ str_initcap(const char *buff, size_t nbytes, Oid collid) #ifdef USE_ICU if (mylocale && mylocale->provider == COLLPROVIDER_ICU) { - int32_t len_uchar, len_conv; + int32_t len_uchar, + len_conv; UChar *buff_uchar; UChar *buff_conv; @@ -1843,7 +1851,10 @@ str_initcap(const char *buff, size_t nbytes, Oid collid) } } - /* Make result large enough; case change might change number of bytes */ + /* + * Make result large enough; case change might change number + * of bytes + */ result_size = curr_char * pg_database_encoding_max_length() + 1; result = palloc(result_size); @@ -1858,11 +1869,11 @@ str_initcap(const char *buff, size_t nbytes, Oid collid) result = pnstrdup(buff, nbytes); /* - * Note: we assume that toupper_l()/tolower_l() will not be so broken - * as to need guard tests. When using the default collation, we apply - * the traditional Postgres behavior that forces ASCII-style treatment - * of I/i, but in non-default collations you get exactly what the - * collation says. + * Note: we assume that toupper_l()/tolower_l() will not be so + * broken as to need guard tests. When using the default + * collation, we apply the traditional Postgres behavior that + * forces ASCII-style treatment of I/i, but in non-default + * collations you get exactly what the collation says. */ for (p = result; *p; p++) { diff --git a/src/backend/utils/adt/genfile.c b/src/backend/utils/adt/genfile.c index 32d6a66688..5b15562ba5 100644 --- a/src/backend/utils/adt/genfile.c +++ b/src/backend/utils/adt/genfile.c @@ -486,7 +486,7 @@ pg_ls_dir_files(FunctionCallInfo fcinfo, char *dir) if (SRF_IS_FIRSTCALL()) { MemoryContext oldcontext; - TupleDesc tupdesc; + TupleDesc tupdesc; funcctx = SRF_FIRSTCALL_INIT(); oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); @@ -523,7 +523,7 @@ pg_ls_dir_files(FunctionCallInfo fcinfo, char *dir) Datum values[3]; bool nulls[3]; char path[MAXPGPATH * 2]; - struct stat attrib; + struct stat attrib; HeapTuple tuple; /* Skip hidden files */ diff --git a/src/backend/utils/adt/json.c b/src/backend/utils/adt/json.c index 9fb0e480bf..0c6572d03e 100644 --- a/src/backend/utils/adt/json.c +++ b/src/backend/utils/adt/json.c @@ -1400,7 +1400,7 @@ json_categorize_type(Oid typoid, if (OidIsValid(get_element_type(typoid)) || typoid == ANYARRAYOID || typoid == RECORDARRAYOID) *tcategory = JSONTYPE_ARRAY; - else if (type_is_rowtype(typoid)) /* includes RECORDOID */ + else if (type_is_rowtype(typoid)) /* includes RECORDOID */ *tcategory = JSONTYPE_COMPOSITE; else { diff --git a/src/backend/utils/adt/jsonb.c b/src/backend/utils/adt/jsonb.c index 164f57ef77..952040d5bb 100644 --- a/src/backend/utils/adt/jsonb.c +++ b/src/backend/utils/adt/jsonb.c @@ -647,7 +647,7 @@ jsonb_categorize_type(Oid typoid, if (OidIsValid(get_element_type(typoid)) || typoid == ANYARRAYOID || typoid == RECORDARRAYOID) *tcategory = JSONBTYPE_ARRAY; - else if (type_is_rowtype(typoid)) /* includes RECORDOID */ + else if (type_is_rowtype(typoid)) /* includes RECORDOID */ *tcategory = JSONBTYPE_COMPOSITE; else { diff --git a/src/backend/utils/adt/jsonfuncs.c b/src/backend/utils/adt/jsonfuncs.c index 3966e43dd5..173584fef6 100644 --- a/src/backend/utils/adt/jsonfuncs.c +++ b/src/backend/utils/adt/jsonfuncs.c @@ -56,20 +56,20 @@ typedef struct OkeysState /* state for iterate_json_string_values function */ typedef struct IterateJsonStringValuesState { - JsonLexContext *lex; - JsonIterateStringValuesAction action; /* an action that will be applied - to each json value */ - void *action_state; /* any necessary context for iteration */ + JsonLexContext *lex; + JsonIterateStringValuesAction action; /* an action that will be + * applied to each json value */ + void *action_state; /* any necessary context for iteration */ } IterateJsonStringValuesState; /* state for transform_json_string_values function */ typedef struct TransformJsonStringValuesState { - JsonLexContext *lex; - StringInfo strval; /* resulting json */ - JsonTransformStringValuesAction action; /* an action that will be applied - to each json value */ - void *action_state; /* any necessary context for transformation */ + JsonLexContext *lex; + StringInfo strval; /* resulting json */ + JsonTransformStringValuesAction action; /* an action that will be + * applied to each json value */ + void *action_state; /* any necessary context for transformation */ } TransformJsonStringValuesState; /* state for json_get* functions */ @@ -154,29 +154,29 @@ typedef struct RecordIOData RecordIOData; /* structure to cache metadata needed for populate_array() */ typedef struct ArrayIOData { - ColumnIOData *element_info; /* metadata cache */ - Oid element_type; /* array element type id */ - int32 element_typmod; /* array element type modifier */ + ColumnIOData *element_info; /* metadata cache */ + Oid element_type; /* array element type id */ + int32 element_typmod; /* array element type modifier */ } ArrayIOData; /* structure to cache metadata needed for populate_composite() */ typedef struct CompositeIOData { /* - * We use pointer to a RecordIOData here because variable-length - * struct RecordIOData can't be used directly in ColumnIOData.io union + * We use pointer to a RecordIOData here because variable-length struct + * RecordIOData can't be used directly in ColumnIOData.io union */ - RecordIOData *record_io; /* metadata cache for populate_record() */ - TupleDesc tupdesc; /* cached tuple descriptor */ + RecordIOData *record_io; /* metadata cache for populate_record() */ + TupleDesc tupdesc; /* cached tuple descriptor */ } CompositeIOData; /* structure to cache metadata needed for populate_domain() */ typedef struct DomainIOData { - ColumnIOData *base_io; /* metadata cache */ - Oid base_typid; /* base type id */ - int32 base_typmod; /* base type modifier */ - void *domain_info; /* opaque cache for domain checks */ + ColumnIOData *base_io; /* metadata cache */ + Oid base_typid; /* base type id */ + int32 base_typmod; /* base type modifier */ + void *domain_info; /* opaque cache for domain checks */ } DomainIOData; /* enumeration type categories */ @@ -193,17 +193,18 @@ typedef enum TypeCat /* structure to cache record metadata needed for populate_record_field() */ struct ColumnIOData { - Oid typid; /* column type id */ - int32 typmod; /* column type modifier */ - TypeCat typcat; /* column type category */ - ScalarIOData scalar_io; /* metadata cache for directi conversion - * through input function */ + Oid typid; /* column type id */ + int32 typmod; /* column type modifier */ + TypeCat typcat; /* column type category */ + ScalarIOData scalar_io; /* metadata cache for directi conversion + * through input function */ union { - ArrayIOData array; - CompositeIOData composite; - DomainIOData domain; - } io; /* metadata cache for various column type categories */ + ArrayIOData array; + CompositeIOData composite; + DomainIOData domain; + } io; /* metadata cache for various column type + * categories */ }; /* structure to cache record metadata needed for populate_record() */ @@ -234,31 +235,32 @@ typedef struct PopulateRecordsetState /* structure to cache metadata needed for populate_record_worker() */ typedef struct PopulateRecordCache { - Oid argtype; /* verified row type of the first argument */ + Oid argtype; /* verified row type of the first argument */ CompositeIOData io; /* metadata cache for populate_composite() */ } PopulateRecordCache; /* common data for populate_array_json() and populate_array_dim_jsonb() */ typedef struct PopulateArrayContext { - ArrayBuildState *astate; /* array build state */ - ArrayIOData *aio; /* metadata cache */ - MemoryContext acxt; /* array build memory context */ - MemoryContext mcxt; /* cache memory context */ - const char *colname; /* for diagnostics only */ - int *dims; /* dimensions */ - int *sizes; /* current dimension counters */ - int ndims; /* number of dimensions */ + ArrayBuildState *astate; /* array build state */ + ArrayIOData *aio; /* metadata cache */ + MemoryContext acxt; /* array build memory context */ + MemoryContext mcxt; /* cache memory context */ + const char *colname; /* for diagnostics only */ + int *dims; /* dimensions */ + int *sizes; /* current dimension counters */ + int ndims; /* number of dimensions */ } PopulateArrayContext; /* state for populate_array_json() */ typedef struct PopulateArrayState { - JsonLexContext *lex; /* json lexer */ + JsonLexContext *lex; /* json lexer */ PopulateArrayContext *ctx; /* context */ - char *element_start; /* start of the current array element */ - char *element_scalar; /* current array element token if it is a scalar */ - JsonTokenType element_type; /* current array element type */ + char *element_start; /* start of the current array element */ + char *element_scalar; /* current array element token if it is a + * scalar */ + JsonTokenType element_type; /* current array element type */ } PopulateArrayState; /* state for json_strip_nulls */ @@ -272,18 +274,18 @@ typedef struct StripnullState /* structure for generalized json/jsonb value passing */ typedef struct JsValue { - bool is_json; /* json/jsonb */ + bool is_json; /* json/jsonb */ union { struct { - char *str; /* json string */ - int len; /* json string length or -1 if null-terminated */ - JsonTokenType type; /* json type */ - } json; /* json value */ + char *str; /* json string */ + int len; /* json string length or -1 if null-terminated */ + JsonTokenType type; /* json type */ + } json; /* json value */ JsonbValue *jsonb; /* jsonb value */ - } val; + } val; } JsValue; typedef struct JsObject @@ -291,9 +293,9 @@ typedef struct JsObject bool is_json; /* json/jsonb */ union { - HTAB *json_hash; + HTAB *json_hash; JsonbContainer *jsonb_cont; - } val; + } val; } JsObject; /* useful macros for testing JsValue properties */ @@ -406,39 +408,39 @@ static void sn_scalar(void *state, char *token, JsonTokenType tokentype); static Datum populate_recordset_worker(FunctionCallInfo fcinfo, const char *funcname, bool have_record_arg); static Datum populate_record_worker(FunctionCallInfo fcinfo, const char *funcname, - bool have_record_arg); + bool have_record_arg); /* helper functions for populate_record[set] */ -static HeapTupleHeader populate_record(TupleDesc tupdesc, RecordIOData **record_info, - HeapTupleHeader template, MemoryContext mcxt, - JsObject *obj); +static HeapTupleHeader populate_record(TupleDesc tupdesc, RecordIOData **record_info, + HeapTupleHeader template, MemoryContext mcxt, + JsObject *obj); static Datum populate_record_field(ColumnIOData *col, Oid typid, int32 typmod, - const char *colname, MemoryContext mcxt, - Datum defaultval, JsValue *jsv, bool *isnull); + const char *colname, MemoryContext mcxt, + Datum defaultval, JsValue *jsv, bool *isnull); static void JsValueToJsObject(JsValue *jsv, JsObject *jso); static Datum populate_composite(CompositeIOData *io, Oid typid, int32 typmod, - const char *colname, MemoryContext mcxt, - HeapTupleHeader defaultval, JsValue *jsv); + const char *colname, MemoryContext mcxt, + HeapTupleHeader defaultval, JsValue *jsv); static Datum populate_scalar(ScalarIOData *io, Oid typid, int32 typmod, JsValue *jsv); static void prepare_column_cache(ColumnIOData *column, Oid typid, int32 typmod, - MemoryContext mcxt, bool json); + MemoryContext mcxt, bool json); static Datum populate_record_field(ColumnIOData *col, Oid typid, int32 typmod, - const char *colname, MemoryContext mcxt, Datum defaultval, - JsValue *jsv, bool *isnull); -static RecordIOData * allocate_record_info(MemoryContext mcxt, int ncolumns); + const char *colname, MemoryContext mcxt, Datum defaultval, + JsValue *jsv, bool *isnull); +static RecordIOData *allocate_record_info(MemoryContext mcxt, int ncolumns); static bool JsObjectGetField(JsObject *obj, char *field, JsValue *jsv); static void populate_recordset_record(PopulateRecordsetState *state, JsObject *obj); static void populate_array_json(PopulateArrayContext *ctx, char *json, int len); -static void populate_array_dim_jsonb(PopulateArrayContext *ctx, JsonbValue *jbv, - int ndim); +static void populate_array_dim_jsonb(PopulateArrayContext *ctx, JsonbValue *jbv, + int ndim); static void populate_array_report_expected_array(PopulateArrayContext *ctx, int ndim); static void populate_array_assign_ndims(PopulateArrayContext *ctx, int ndims); static void populate_array_check_dimension(PopulateArrayContext *ctx, int ndim); static void populate_array_element(PopulateArrayContext *ctx, int ndim, JsValue *jsv); -static Datum populate_array(ArrayIOData *aio, const char *colname, - MemoryContext mcxt, JsValue *jsv); -static Datum populate_domain(DomainIOData *io, Oid typid, const char *colname, - MemoryContext mcxt, JsValue *jsv, bool isnull); +static Datum populate_array(ArrayIOData *aio, const char *colname, + MemoryContext mcxt, JsValue *jsv); +static Datum populate_domain(DomainIOData *io, Oid typid, const char *colname, + MemoryContext mcxt, JsValue *jsv, bool isnull); /* Worker that takes care of common setup for us */ static JsonbValue *findJsonbValueFromContainerLen(JsonbContainer *container, @@ -2319,8 +2321,8 @@ populate_array_report_expected_array(PopulateArrayContext *ctx, int ndim) } else { - StringInfoData indices; - int i; + StringInfoData indices; + int i; initStringInfo(&indices); @@ -2348,7 +2350,7 @@ populate_array_report_expected_array(PopulateArrayContext *ctx, int ndim) static void populate_array_assign_ndims(PopulateArrayContext *ctx, int ndims) { - int i; + int i; Assert(ctx->ndims <= 0); @@ -2360,17 +2362,17 @@ populate_array_assign_ndims(PopulateArrayContext *ctx, int ndims) ctx->sizes = palloc0(sizeof(int) * ndims); for (i = 0; i < ndims; i++) - ctx->dims[i] = -1; /* dimensions are unknown yet */ + ctx->dims[i] = -1; /* dimensions are unknown yet */ } /* check the populated subarray dimension */ static void populate_array_check_dimension(PopulateArrayContext *ctx, int ndim) { - int dim = ctx->sizes[ndim]; /* current dimension counter */ + int dim = ctx->sizes[ndim]; /* current dimension counter */ if (ctx->dims[ndim] == -1) - ctx->dims[ndim] = dim; /* assign dimension if not yet known */ + ctx->dims[ndim] = dim; /* assign dimension if not yet known */ else if (ctx->dims[ndim] != dim) ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), @@ -2389,8 +2391,8 @@ populate_array_check_dimension(PopulateArrayContext *ctx, int ndim) static void populate_array_element(PopulateArrayContext *ctx, int ndim, JsValue *jsv) { - Datum element; - bool element_isnull; + Datum element; + bool element_isnull; /* populate the array element */ element = populate_record_field(ctx->aio->element_info, @@ -2400,10 +2402,10 @@ populate_array_element(PopulateArrayContext *ctx, int ndim, JsValue *jsv) jsv, &element_isnull); accumArrayResult(ctx->astate, element, element_isnull, - ctx->aio->element_type, ctx->acxt); + ctx->aio->element_type, ctx->acxt); Assert(ndim > 0); - ctx->sizes[ndim - 1]++; /* increment current dimension counter */ + ctx->sizes[ndim - 1]++; /* increment current dimension counter */ } /* json object start handler for populate_array_json() */ @@ -2411,7 +2413,7 @@ static void populate_array_object_start(void *_state) { PopulateArrayState *state = (PopulateArrayState *) _state; - int ndim = state->lex->lex_level; + int ndim = state->lex->lex_level; if (state->ctx->ndims <= 0) populate_array_assign_ndims(state->ctx, ndim); @@ -2423,9 +2425,9 @@ populate_array_object_start(void *_state) static void populate_array_array_end(void *_state) { - PopulateArrayState *state = (PopulateArrayState *) _state; - PopulateArrayContext *ctx = state->ctx; - int ndim = state->lex->lex_level; + PopulateArrayState *state = (PopulateArrayState *) _state; + PopulateArrayContext *ctx = state->ctx; + int ndim = state->lex->lex_level; if (ctx->ndims <= 0) populate_array_assign_ndims(ctx, ndim + 1); @@ -2439,7 +2441,7 @@ static void populate_array_element_start(void *_state, bool isnull) { PopulateArrayState *state = (PopulateArrayState *) _state; - int ndim = state->lex->lex_level; + int ndim = state->lex->lex_level; if (state->ctx->ndims <= 0 || ndim == state->ctx->ndims) { @@ -2454,9 +2456,9 @@ populate_array_element_start(void *_state, bool isnull) static void populate_array_element_end(void *_state, bool isnull) { - PopulateArrayState *state = (PopulateArrayState *) _state; - PopulateArrayContext *ctx = state->ctx; - int ndim = state->lex->lex_level; + PopulateArrayState *state = (PopulateArrayState *) _state; + PopulateArrayContext *ctx = state->ctx; + int ndim = state->lex->lex_level; Assert(ctx->ndims > 0); @@ -2476,7 +2478,7 @@ populate_array_element_end(void *_state, bool isnull) else if (state->element_scalar) { jsv.val.json.str = state->element_scalar; - jsv.val.json.len = -1; /* null-terminated */ + jsv.val.json.len = -1; /* null-terminated */ } else { @@ -2493,9 +2495,9 @@ populate_array_element_end(void *_state, bool isnull) static void populate_array_scalar(void *_state, char *token, JsonTokenType tokentype) { - PopulateArrayState *state = (PopulateArrayState *) _state; - PopulateArrayContext *ctx = state->ctx; - int ndim = state->lex->lex_level; + PopulateArrayState *state = (PopulateArrayState *) _state; + PopulateArrayContext *ctx = state->ctx; + int ndim = state->lex->lex_level; if (ctx->ndims <= 0) populate_array_assign_ndims(ctx, ndim); @@ -2515,8 +2517,8 @@ populate_array_scalar(void *_state, char *token, JsonTokenType tokentype) static void populate_array_json(PopulateArrayContext *ctx, char *json, int len) { - PopulateArrayState state; - JsonSemAction sem; + PopulateArrayState state; + JsonSemAction sem; state.lex = makeJsonLexContextCstringLen(json, len, true); state.ctx = ctx; @@ -2539,18 +2541,18 @@ populate_array_json(PopulateArrayContext *ctx, char *json, int len) /* * populate_array_dim_jsonb() -- Iterate recursively through jsonb sub-array - * elements and accumulate result using given ArrayBuildState. + * elements and accumulate result using given ArrayBuildState. */ static void -populate_array_dim_jsonb(PopulateArrayContext *ctx, /* context */ - JsonbValue *jbv, /* jsonb sub-array */ - int ndim) /* current dimension */ +populate_array_dim_jsonb(PopulateArrayContext *ctx, /* context */ + JsonbValue *jbv, /* jsonb sub-array */ + int ndim) /* current dimension */ { - JsonbContainer *jbc = jbv->val.binary.data; - JsonbIterator *it; - JsonbIteratorToken tok; - JsonbValue val; - JsValue jsv; + JsonbContainer *jbc = jbv->val.binary.data; + JsonbIterator *it; + JsonbIteratorToken tok; + JsonbValue val; + JsValue jsv; check_stack_depth(); @@ -2567,9 +2569,9 @@ populate_array_dim_jsonb(PopulateArrayContext *ctx, /* context */ tok = JsonbIteratorNext(&it, &val, true); /* - * If the number of dimensions is not yet known and - * we have found end of the array, or the first child element is not - * an array, then assign the number of dimensions now. + * If the number of dimensions is not yet known and we have found end of + * the array, or the first child element is not an array, then assign the + * number of dimensions now. */ if (ctx->ndims <= 0 && (tok == WJB_END_ARRAY || @@ -2585,8 +2587,8 @@ populate_array_dim_jsonb(PopulateArrayContext *ctx, /* context */ while (tok == WJB_ELEM) { /* - * Recurse only if the dimensions of dimensions is still unknown or - * if it is not the innermost dimension. + * Recurse only if the dimensions of dimensions is still unknown or if + * it is not the innermost dimension. */ if (ctx->ndims > 0 && ndim >= ctx->ndims) populate_array_element(ctx, ndim, &jsv); @@ -2613,29 +2615,29 @@ populate_array_dim_jsonb(PopulateArrayContext *ctx, /* context */ /* recursively populate an array from json/jsonb */ static Datum -populate_array(ArrayIOData *aio, - const char *colname, - MemoryContext mcxt, - JsValue *jsv) -{ - PopulateArrayContext ctx; - Datum result; - int *lbs; - int i; +populate_array(ArrayIOData *aio, + const char *colname, + MemoryContext mcxt, + JsValue *jsv) +{ + PopulateArrayContext ctx; + Datum result; + int *lbs; + int i; ctx.aio = aio; ctx.mcxt = mcxt; ctx.acxt = CurrentMemoryContext; ctx.astate = initArrayResult(aio->element_type, ctx.acxt, true); ctx.colname = colname; - ctx.ndims = 0; /* unknown yet */ + ctx.ndims = 0; /* unknown yet */ ctx.dims = NULL; ctx.sizes = NULL; if (jsv->is_json) populate_array_json(&ctx, jsv->val.json.str, jsv->val.json.len >= 0 ? jsv->val.json.len - : strlen(jsv->val.json.str)); + : strlen(jsv->val.json.str)); else { populate_array_dim_jsonb(&ctx, jsv->val.jsonb, 1); @@ -2644,7 +2646,7 @@ populate_array(ArrayIOData *aio, Assert(ctx.ndims > 0); - lbs = palloc(sizeof(int) * ctx.ndims); + lbs = palloc(sizeof(int) * ctx.ndims); for (i = 0; i < ctx.ndims; i++) lbs[i] = 1; @@ -2668,11 +2670,11 @@ JsValueToJsObject(JsValue *jsv, JsObject *jso) { /* convert plain-text json into a hash table */ jso->val.json_hash = - get_json_object_as_hash(jsv->val.json.str, - jsv->val.json.len >= 0 - ? jsv->val.json.len - : strlen(jsv->val.json.str), - "populate_composite"); + get_json_object_as_hash(jsv->val.json.str, + jsv->val.json.len >= 0 + ? jsv->val.json.len + : strlen(jsv->val.json.str), + "populate_composite"); } else { @@ -2689,23 +2691,23 @@ JsValueToJsObject(JsValue *jsv, JsObject *jso) /* recursively populate a composite (row type) value from json/jsonb */ static Datum populate_composite(CompositeIOData *io, - Oid typid, - int32 typmod, - const char *colname, - MemoryContext mcxt, - HeapTupleHeader defaultval, - JsValue *jsv) + Oid typid, + int32 typmod, + const char *colname, + MemoryContext mcxt, + HeapTupleHeader defaultval, + JsValue *jsv) { - HeapTupleHeader tuple; - JsObject jso; + HeapTupleHeader tuple; + JsObject jso; /* acquire cached tuple descriptor */ if (!io->tupdesc || io->tupdesc->tdtypeid != typid || io->tupdesc->tdtypmod != typmod) { - TupleDesc tupdesc = lookup_rowtype_tupdesc(typid, typmod); - MemoryContext oldcxt; + TupleDesc tupdesc = lookup_rowtype_tupdesc(typid, typmod); + MemoryContext oldcxt; if (io->tupdesc) FreeTupleDesc(io->tupdesc); @@ -2750,8 +2752,8 @@ populate_scalar(ScalarIOData *io, Oid typid, int32 typmod, JsValue *jsv) jsv->val.json.type == JSON_TOKEN_STRING) { /* - * Add quotes around string value (should be already escaped) - * if converting to json/jsonb. + * Add quotes around string value (should be already escaped) if + * converting to json/jsonb. */ if (len < 0) @@ -2771,7 +2773,7 @@ populate_scalar(ScalarIOData *io, Oid typid, int32 typmod, JsValue *jsv) str[len] = '\0'; } else - str = json; /* null-terminated string */ + str = json; /* null-terminated string */ } else { @@ -2779,7 +2781,8 @@ populate_scalar(ScalarIOData *io, Oid typid, int32 typmod, JsValue *jsv) if (typid == JSONBOID) { - Jsonb *jsonb = JsonbValueToJsonb(jbv); /* directly use jsonb */ + Jsonb *jsonb = JsonbValueToJsonb(jbv); /* directly use jsonb */ + return JsonbGetDatum(jsonb); } /* convert jsonb to string for typio call */ @@ -2789,19 +2792,20 @@ populate_scalar(ScalarIOData *io, Oid typid, int32 typmod, JsValue *jsv) * Convert scalar jsonb (non-scalars are passed here as jbvBinary) * to json string, preserving quotes around top-level strings. */ - Jsonb *jsonb = JsonbValueToJsonb(jbv); + Jsonb *jsonb = JsonbValueToJsonb(jbv); + str = JsonbToCString(NULL, &jsonb->root, VARSIZE(jsonb)); } - else if (jbv->type == jbvString) /* quotes are stripped */ + else if (jbv->type == jbvString) /* quotes are stripped */ str = pnstrdup(jbv->val.string.val, jbv->val.string.len); else if (jbv->type == jbvBool) str = pstrdup(jbv->val.boolean ? "true" : "false"); else if (jbv->type == jbvNumeric) str = DatumGetCString(DirectFunctionCall1(numeric_out, - PointerGetDatum(jbv->val.numeric))); + PointerGetDatum(jbv->val.numeric))); else if (jbv->type == jbvBinary) str = JsonbToCString(NULL, jbv->val.binary.data, - jbv->val.binary.len); + jbv->val.binary.len); else elog(ERROR, "unrecognized jsonb type: %d", (int) jbv->type); } @@ -2816,12 +2820,12 @@ populate_scalar(ScalarIOData *io, Oid typid, int32 typmod, JsValue *jsv) } static Datum -populate_domain(DomainIOData *io, - Oid typid, - const char *colname, - MemoryContext mcxt, - JsValue *jsv, - bool isnull) +populate_domain(DomainIOData *io, + Oid typid, + const char *colname, + MemoryContext mcxt, + JsValue *jsv, + bool isnull) { Datum res; @@ -2843,14 +2847,14 @@ populate_domain(DomainIOData *io, /* prepare column metadata cache for the given type */ static void -prepare_column_cache(ColumnIOData *column, - Oid typid, - int32 typmod, - MemoryContext mcxt, - bool json) +prepare_column_cache(ColumnIOData *column, + Oid typid, + int32 typmod, + MemoryContext mcxt, + bool json) { - HeapTuple tup; - Form_pg_type type; + HeapTuple tup; + Form_pg_type type; column->typid = typid; column->typmod = typmod; @@ -2867,7 +2871,7 @@ prepare_column_cache(ColumnIOData *column, column->io.domain.base_typid = type->typbasetype; column->io.domain.base_typmod = type->typtypmod; column->io.domain.base_io = MemoryContextAllocZero(mcxt, - sizeof(ColumnIOData)); + sizeof(ColumnIOData)); column->io.domain.domain_info = NULL; } else if (type->typtype == TYPTYPE_COMPOSITE || typid == RECORDOID) @@ -2880,7 +2884,7 @@ prepare_column_cache(ColumnIOData *column, { column->typcat = TYPECAT_ARRAY; column->io.array.element_info = MemoryContextAllocZero(mcxt, - sizeof(ColumnIOData)); + sizeof(ColumnIOData)); column->io.array.element_type = type->typelem; /* array element typemod stored in attribute's typmod */ column->io.array.element_typmod = typmod; @@ -2891,7 +2895,7 @@ prepare_column_cache(ColumnIOData *column, /* don't need input function when converting from jsonb to jsonb */ if (json || typid != JSONBOID) { - Oid typioproc; + Oid typioproc; getTypeInputInfo(typid, &typioproc, &column->scalar_io.typioparam); fmgr_info_cxt(typioproc, &column->scalar_io.typiofunc, mcxt); @@ -2903,13 +2907,13 @@ prepare_column_cache(ColumnIOData *column, /* recursively populate a record field or an array element from a json/jsonb value */ static Datum populate_record_field(ColumnIOData *col, - Oid typid, - int32 typmod, - const char *colname, - MemoryContext mcxt, - Datum defaultval, - JsValue *jsv, - bool *isnull) + Oid typid, + int32 typmod, + const char *colname, + MemoryContext mcxt, + Datum defaultval, + JsValue *jsv, + bool *isnull) { TypeCat typcat; @@ -2962,9 +2966,9 @@ static RecordIOData * allocate_record_info(MemoryContext mcxt, int ncolumns) { RecordIOData *data = (RecordIOData *) - MemoryContextAlloc(mcxt, - offsetof(RecordIOData, columns) + - ncolumns * sizeof(ColumnIOData)); + MemoryContextAlloc(mcxt, + offsetof(RecordIOData, columns) + + ncolumns * sizeof(ColumnIOData)); data->record_type = InvalidOid; data->record_typmod = 0; @@ -2986,7 +2990,7 @@ JsObjectGetField(JsObject *obj, char *field, JsValue *jsv) jsv->val.json.type = hashentry ? hashentry->type : JSON_TOKEN_NULL; jsv->val.json.str = jsv->val.json.type == JSON_TOKEN_NULL ? NULL : - hashentry->val; + hashentry->val; jsv->val.json.len = jsv->val.json.str ? -1 : 0; /* null-terminated */ return hashentry != NULL; @@ -2994,8 +2998,8 @@ JsObjectGetField(JsObject *obj, char *field, JsValue *jsv) else { jsv->val.jsonb = !obj->val.jsonb_cont ? NULL : - findJsonbValueFromContainerLen(obj->val.jsonb_cont, JB_FOBJECT, - field, strlen(field)); + findJsonbValueFromContainerLen(obj->val.jsonb_cont, JB_FOBJECT, + field, strlen(field)); return jsv->val.jsonb != NULL; } @@ -3003,23 +3007,23 @@ JsObjectGetField(JsObject *obj, char *field, JsValue *jsv) /* populate a record tuple from json/jsonb value */ static HeapTupleHeader -populate_record(TupleDesc tupdesc, - RecordIOData **precord, - HeapTupleHeader defaultval, - MemoryContext mcxt, - JsObject *obj) -{ - RecordIOData *record = *precord; - Datum *values; - bool *nulls; - HeapTuple res; - int ncolumns = tupdesc->natts; - int i; +populate_record(TupleDesc tupdesc, + RecordIOData **precord, + HeapTupleHeader defaultval, + MemoryContext mcxt, + JsObject *obj) +{ + RecordIOData *record = *precord; + Datum *values; + bool *nulls; + HeapTuple res; + int ncolumns = tupdesc->natts; + int i; /* - * if the input json is empty, we can only skip the rest if we were - * passed in a non-null record, since otherwise there may be issues - * with domain nulls. + * if the input json is empty, we can only skip the rest if we were passed + * in a non-null record, since otherwise there may be issues with domain + * nulls. */ if (defaultval && JsObjectIsEmpty(obj)) return defaultval; @@ -3034,7 +3038,7 @@ populate_record(TupleDesc tupdesc, record->record_typmod != tupdesc->tdtypmod) { MemSet(record, 0, offsetof(RecordIOData, columns) + - ncolumns * sizeof(ColumnIOData)); + ncolumns * sizeof(ColumnIOData)); record->record_type = tupdesc->tdtypeid; record->record_typmod = tupdesc->tdtypmod; record->ncolumns = ncolumns; @@ -3067,10 +3071,10 @@ populate_record(TupleDesc tupdesc, for (i = 0; i < ncolumns; ++i) { - Form_pg_attribute att = tupdesc->attrs[i]; - char *colname = NameStr(att->attname); - JsValue field = { 0 }; - bool found; + Form_pg_attribute att = tupdesc->attrs[i]; + char *colname = NameStr(att->attname); + JsValue field = {0}; + bool found; /* Ignore dropped columns in datatype */ if (att->attisdropped) @@ -3116,7 +3120,7 @@ populate_record_worker(FunctionCallInfo fcinfo, const char *funcname, { int json_arg_num = have_record_arg ? 1 : 0; Oid jtype = get_fn_expr_argtype(fcinfo->flinfo, json_arg_num); - JsValue jsv = { 0 }; + JsValue jsv = {0}; HeapTupleHeader rec = NULL; Oid tupType; int32 tupTypmod; @@ -3134,7 +3138,7 @@ populate_record_worker(FunctionCallInfo fcinfo, const char *funcname, */ if (!cache) fcinfo->flinfo->fn_extra = cache = - MemoryContextAllocZero(fnmcxt, sizeof(*cache)); + MemoryContextAllocZero(fnmcxt, sizeof(*cache)); if (have_record_arg) { @@ -3210,7 +3214,8 @@ populate_record_worker(FunctionCallInfo fcinfo, const char *funcname, jsv.val.json.str = VARDATA_ANY(json); jsv.val.json.len = VARSIZE_ANY_EXHDR(json); - jsv.val.json.type = JSON_TOKEN_INVALID; /* not used in populate_composite() */ + jsv.val.json.type = JSON_TOKEN_INVALID; /* not used in + * populate_composite() */ } else { @@ -3417,8 +3422,8 @@ json_to_recordset(PG_FUNCTION_ARGS) static void populate_recordset_record(PopulateRecordsetState *state, JsObject *obj) { - HeapTupleData tuple; - HeapTupleHeader tuphead = populate_record(state->ret_tdesc, + HeapTupleData tuple; + HeapTupleHeader tuphead = populate_record(state->ret_tdesc, state->my_extra, state->rec, state->fn_mcxt, @@ -4793,9 +4798,9 @@ setPathArray(JsonbIterator **it, Datum *path_elems, bool *path_nulls, void iterate_jsonb_string_values(Jsonb *jb, void *state, JsonIterateStringValuesAction action) { - JsonbIterator *it; - JsonbValue v; - JsonbIteratorToken type; + JsonbIterator *it; + JsonbValue v; + JsonbIteratorToken type; it = JsonbIteratorInit(&jb->root); @@ -4817,7 +4822,7 @@ iterate_json_string_values(text *json, void *action_state, JsonIterateStringValu { JsonLexContext *lex = makeJsonLexContext(json, true); JsonSemAction *sem = palloc0(sizeof(JsonSemAction)); - IterateJsonStringValuesState *state = palloc0(sizeof(IterateJsonStringValuesState)); + IterateJsonStringValuesState *state = palloc0(sizeof(IterateJsonStringValuesState)); state->lex = lex; state->action = action; @@ -4836,7 +4841,8 @@ iterate_json_string_values(text *json, void *action_state, JsonIterateStringValu static void iterate_string_values_scalar(void *state, char *token, JsonTokenType tokentype) { - IterateJsonStringValuesState *_state = (IterateJsonStringValuesState *) state; + IterateJsonStringValuesState *_state = (IterateJsonStringValuesState *) state; + if (tokentype == JSON_TOKEN_STRING) (*_state->action) (_state->action_state, token, strlen(token)); } @@ -4849,14 +4855,15 @@ iterate_string_values_scalar(void *state, char *token, JsonTokenType tokentype) */ Jsonb * transform_jsonb_string_values(Jsonb *jsonb, void *action_state, - JsonTransformStringValuesAction transform_action) + JsonTransformStringValuesAction transform_action) { - JsonbIterator *it; - JsonbValue v, *res = NULL; - JsonbIteratorToken type; - JsonbParseState *st = NULL; - text *out; - bool is_scalar = false; + JsonbIterator *it; + JsonbValue v, + *res = NULL; + JsonbIteratorToken type; + JsonbParseState *st = NULL; + text *out; + bool is_scalar = false; it = JsonbIteratorInit(&jsonb->root); is_scalar = it->isScalar; @@ -4928,6 +4935,7 @@ static void transform_string_values_object_start(void *state) { TransformJsonStringValuesState *_state = (TransformJsonStringValuesState *) state; + appendStringInfoCharMacro(_state->strval, '{'); } @@ -4935,6 +4943,7 @@ static void transform_string_values_object_end(void *state) { TransformJsonStringValuesState *_state = (TransformJsonStringValuesState *) state; + appendStringInfoCharMacro(_state->strval, '}'); } @@ -4942,6 +4951,7 @@ static void transform_string_values_array_start(void *state) { TransformJsonStringValuesState *_state = (TransformJsonStringValuesState *) state; + appendStringInfoCharMacro(_state->strval, '['); } @@ -4949,6 +4959,7 @@ static void transform_string_values_array_end(void *state) { TransformJsonStringValuesState *_state = (TransformJsonStringValuesState *) state; + appendStringInfoCharMacro(_state->strval, ']'); } @@ -4984,7 +4995,8 @@ transform_string_values_scalar(void *state, char *token, JsonTokenType tokentype if (tokentype == JSON_TOKEN_STRING) { - text *out = (*_state->action) (_state->action_state, token, strlen(token)); + text *out = (*_state->action) (_state->action_state, token, strlen(token)); + escape_json(_state->strval, text_to_cstring(out)); } else diff --git a/src/backend/utils/adt/like.c b/src/backend/utils/adt/like.c index b9806069c2..d4d173480d 100644 --- a/src/backend/utils/adt/like.c +++ b/src/backend/utils/adt/like.c @@ -180,7 +180,7 @@ Generic_Text_IC_like(text *str, text *pat, Oid collation) */ ereport(ERROR, (errcode(ERRCODE_INDETERMINATE_COLLATION), - errmsg("could not determine which collation to use for ILIKE"), + errmsg("could not determine which collation to use for ILIKE"), errhint("Use the COLLATE clause to set the collation explicitly."))); } locale = pg_newlocale_from_collation(collation); @@ -189,9 +189,9 @@ Generic_Text_IC_like(text *str, text *pat, Oid collation) /* * For efficiency reasons, in the single byte case we don't call lower() * on the pattern and text, but instead call SB_lower_char on each - * character. In the multi-byte case we don't have much choice :-(. - * Also, ICU does not support single-character case folding, so we go the - * long way. + * character. In the multi-byte case we don't have much choice :-(. Also, + * ICU does not support single-character case folding, so we go the long + * way. */ if (pg_database_encoding_max_length() > 1 || (locale && locale->provider == COLLPROVIDER_ICU)) diff --git a/src/backend/utils/adt/mac.c b/src/backend/utils/adt/mac.c index eff4529a6a..c2b52d8046 100644 --- a/src/backend/utils/adt/mac.c +++ b/src/backend/utils/adt/mac.c @@ -40,7 +40,7 @@ typedef struct bool estimating; /* true if estimating cardinality */ hyperLogLogState abbr_card; /* cardinality estimator */ -} macaddr_sortsupport_state; +} macaddr_sortsupport_state; static int macaddr_cmp_internal(macaddr *a1, macaddr *a2); static int macaddr_fast_cmp(Datum x, Datum y, SortSupport ssup); diff --git a/src/backend/utils/adt/mac8.c b/src/backend/utils/adt/mac8.c index c442eae6c1..1ed4183be7 100644 --- a/src/backend/utils/adt/mac8.c +++ b/src/backend/utils/adt/mac8.c @@ -103,7 +103,7 @@ invalid_input: Datum macaddr8_in(PG_FUNCTION_ARGS) { - const unsigned char *str = (unsigned char*) PG_GETARG_CSTRING(0); + const unsigned char *str = (unsigned char *) PG_GETARG_CSTRING(0); const unsigned char *ptr = str; macaddr8 *result; unsigned char a = 0, diff --git a/src/backend/utils/adt/pg_locale.c b/src/backend/utils/adt/pg_locale.c index e2ccac2d2a..24ae3c6886 100644 --- a/src/backend/utils/adt/pg_locale.c +++ b/src/backend/utils/adt/pg_locale.c @@ -1282,7 +1282,7 @@ pg_newlocale_from_collation(Oid collid) Form_pg_collation collform; const char *collcollate; const char *collctype pg_attribute_unused(); - pg_locale_t result; + pg_locale_t result; Datum collversion; bool isnull; @@ -1294,8 +1294,8 @@ pg_newlocale_from_collation(Oid collid) collcollate = NameStr(collform->collcollate); collctype = NameStr(collform->collctype); - result = malloc(sizeof(* result)); - memset(result, 0, sizeof(* result)); + result = malloc(sizeof(*result)); + memset(result, 0, sizeof(*result)); result->provider = collform->collprovider; if (collform->collprovider == COLLPROVIDER_LIBC) @@ -1308,7 +1308,7 @@ pg_newlocale_from_collation(Oid collid) /* Normal case where they're the same */ #ifndef WIN32 loc = newlocale(LC_COLLATE_MASK | LC_CTYPE_MASK, collcollate, - NULL); + NULL); #else loc = _create_locale(LC_ALL, collcollate); #endif @@ -1330,9 +1330,9 @@ pg_newlocale_from_collation(Oid collid) #else /* - * XXX The _create_locale() API doesn't appear to support this. - * Could perhaps be worked around by changing pg_locale_t to - * contain two separate fields. + * XXX The _create_locale() API doesn't appear to support + * this. Could perhaps be worked around by changing + * pg_locale_t to contain two separate fields. */ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), @@ -1358,18 +1358,18 @@ pg_newlocale_from_collation(Oid collid) collator = ucol_open(collcollate, &status); if (U_FAILURE(status)) ereport(ERROR, - (errmsg("could not open collator for locale \"%s\": %s", - collcollate, u_errorName(status)))); + (errmsg("could not open collator for locale \"%s\": %s", + collcollate, u_errorName(status)))); result->info.icu.locale = strdup(collcollate); result->info.icu.ucol = collator; -#else /* not USE_ICU */ +#else /* not USE_ICU */ /* could get here if a collation was created by a build with ICU */ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("ICU is not supported in this build"), \ - errhint("You need to rebuild PostgreSQL using --with-icu."))); -#endif /* not USE_ICU */ + errhint("You need to rebuild PostgreSQL using --with-icu."))); +#endif /* not USE_ICU */ } collversion = SysCacheGetAttr(COLLOID, tp, Anum_pg_collation_collversion, @@ -1382,9 +1382,11 @@ pg_newlocale_from_collation(Oid collid) actual_versionstr = get_collation_actual_version(collform->collprovider, collcollate); if (!actual_versionstr) { - /* This could happen when specifying a version in CREATE - * COLLATION for a libc locale, or manually creating a mess - * in the catalogs. */ + /* + * This could happen when specifying a version in CREATE + * COLLATION for a libc locale, or manually creating a mess in + * the catalogs. + */ ereport(ERROR, (errmsg("collation \"%s\" has no actual version, but a version was specified", NameStr(collform->collname)))); @@ -1396,13 +1398,13 @@ pg_newlocale_from_collation(Oid collid) (errmsg("collation \"%s\" has version mismatch", NameStr(collform->collname)), errdetail("The collation in the database was created using version %s, " - "but the operating system provides version %s.", + "but the operating system provides version %s.", collversionstr, actual_versionstr), errhint("Rebuild all objects affected by this collation and run " "ALTER COLLATION %s REFRESH VERSION, " - "or build PostgreSQL with the right library version.", + "or build PostgreSQL with the right library version.", quote_qualified_identifier(get_namespace_name(collform->collnamespace), - NameStr(collform->collname))))); + NameStr(collform->collname))))); } ReleaseSysCache(tp); @@ -1478,8 +1480,8 @@ init_icu_converter(void) conv = ucnv_open(icu_encoding_name, &status); if (U_FAILURE(status)) ereport(ERROR, - (errmsg("could not open ICU converter for encoding \"%s\": %s", - icu_encoding_name, u_errorName(status)))); + (errmsg("could not open ICU converter for encoding \"%s\": %s", + icu_encoding_name, u_errorName(status)))); icu_converter = conv; } @@ -1492,7 +1494,7 @@ icu_to_uchar(UChar **buff_uchar, const char *buff, size_t nbytes) init_icu_converter(); - len_uchar = 2 * nbytes; /* max length per docs */ + len_uchar = 2 * nbytes; /* max length per docs */ *buff_uchar = palloc(len_uchar * sizeof(**buff_uchar)); status = U_ZERO_ERROR; len_uchar = ucnv_toUChars(icu_converter, *buff_uchar, len_uchar, buff, nbytes, &status); diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c index 43b1475035..9234bc2a97 100644 --- a/src/backend/utils/adt/ruleutils.c +++ b/src/backend/utils/adt/ruleutils.c @@ -1448,7 +1448,7 @@ pg_get_statisticsobjdef(PG_FUNCTION_ARGS) static char * pg_get_statisticsobj_worker(Oid statextid, bool missing_ok) { - Form_pg_statistic_ext statextrec; + Form_pg_statistic_ext statextrec; HeapTuple statexttup; StringInfoData buf; int colno; @@ -1477,7 +1477,7 @@ pg_get_statisticsobj_worker(Oid statextid, bool missing_ok) nsp = get_namespace_name(statextrec->stxnamespace); appendStringInfo(&buf, "CREATE STATISTICS %s", quote_qualified_identifier(nsp, - NameStr(statextrec->stxname))); + NameStr(statextrec->stxname))); /* * Decode the stxkind column so that we know which stats types to print. @@ -1735,11 +1735,11 @@ pg_get_partkeydef_worker(Oid relid, int prettyFlags, Datum pg_get_partition_constraintdef(PG_FUNCTION_ARGS) { - Oid relationId = PG_GETARG_OID(0); - Expr *constr_expr; - int prettyFlags; - List *context; - char *consrc; + Oid relationId = PG_GETARG_OID(0); + Expr *constr_expr; + int prettyFlags; + List *context; + char *consrc; constr_expr = get_partition_qual_relid(relationId); diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c index 7028d6387c..6e491bbc21 100644 --- a/src/backend/utils/adt/selfuncs.c +++ b/src/backend/utils/adt/selfuncs.c @@ -170,7 +170,7 @@ static double eqjoinsel_semi(Oid operator, VariableStatData *vardata1, VariableStatData *vardata2, RelOptInfo *inner_rel); static bool estimate_multivariate_ndistinct(PlannerInfo *root, - RelOptInfo *rel, List **varinfos, double *ndistinct); + RelOptInfo *rel, List **varinfos, double *ndistinct); static bool convert_to_scalar(Datum value, Oid valuetypid, double *scaledvalue, Datum lobound, Datum hibound, Oid boundstypid, double *scaledlobound, double *scaledhibound); @@ -3364,8 +3364,8 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows, List *relvarinfos = NIL; /* - * Split the list of varinfos in two - one for the current rel, - * one for remaining Vars on other rels. + * Split the list of varinfos in two - one for the current rel, one + * for remaining Vars on other rels. */ relvarinfos = lcons(varinfo1, relvarinfos); for_each_cell(l, lnext(list_head(varinfos))) @@ -3388,9 +3388,9 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows, * Get the numdistinct estimate for the Vars of this rel. We * iteratively search for multivariate n-distinct with maximum number * of vars; assuming that each var group is independent of the others, - * we multiply them together. Any remaining relvarinfos after - * no more multivariate matches are found are assumed independent too, - * so their individual ndistinct estimates are multiplied also. + * we multiply them together. Any remaining relvarinfos after no more + * multivariate matches are found are assumed independent too, so + * their individual ndistinct estimates are multiplied also. * * While iterating, count how many separate numdistinct values we * apply. We apply a fudge factor below, but only if we multiplied @@ -3410,7 +3410,7 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows, } else { - foreach (l, relvarinfos) + foreach(l, relvarinfos) { GroupVarInfo *varinfo2 = (GroupVarInfo *) lfirst(l); @@ -3702,12 +3702,12 @@ estimate_multivariate_ndistinct(PlannerInfo *root, RelOptInfo *rel, } /* look for the ndistinct statistics matching the most vars */ - nmatches = 1; /* we require at least two matches */ + nmatches = 1; /* we require at least two matches */ foreach(lc, rel->statlist) { StatisticExtInfo *info = (StatisticExtInfo *) lfirst(lc); Bitmapset *shared; - int nshared; + int nshared; /* skip statistics of other kinds */ if (info->kind != STATS_EXT_NDISTINCT) @@ -3745,8 +3745,8 @@ estimate_multivariate_ndistinct(PlannerInfo *root, RelOptInfo *rel, */ if (stats) { - int i; - List *newlist = NIL; + int i; + List *newlist = NIL; MVNDistinctItem *item = NULL; /* Find the specific item that exactly matches the combination */ @@ -7766,8 +7766,8 @@ brincostestimate(PlannerInfo *root, IndexPath *path, double loop_count, * * Because we can use all index quals equally when scanning, we can use * the largest correlation (in absolute value) among columns used by the - * query. Start at zero, the worst possible case. If we cannot find - * any correlation statistics, we will keep it as 0. + * query. Start at zero, the worst possible case. If we cannot find any + * correlation statistics, we will keep it as 0. */ *indexCorrelation = 0; @@ -7790,7 +7790,7 @@ brincostestimate(PlannerInfo *root, IndexPath *path, double loop_count, */ if (HeapTupleIsValid(vardata.statsTuple) && !vardata.freefunc) elog(ERROR, - "no function provided to release variable stats with"); + "no function provided to release variable stats with"); } else { @@ -7813,11 +7813,11 @@ brincostestimate(PlannerInfo *root, IndexPath *path, double loop_count, attnum = qinfo->indexcol + 1; if (get_index_stats_hook && - (*get_index_stats_hook) (root, index->indexoid, attnum, &vardata)) + (*get_index_stats_hook) (root, index->indexoid, attnum, &vardata)) { /* - * The hook took control of acquiring a stats tuple. If it did - * supply a tuple, it'd better have supplied a freefunc. + * The hook took control of acquiring a stats tuple. If it + * did supply a tuple, it'd better have supplied a freefunc. */ if (HeapTupleIsValid(vardata.statsTuple) && !vardata.freefunc) @@ -7826,7 +7826,7 @@ brincostestimate(PlannerInfo *root, IndexPath *path, double loop_count, else { vardata.statsTuple = SearchSysCache3(STATRELATTINH, - ObjectIdGetDatum(index->indexoid), + ObjectIdGetDatum(index->indexoid), Int16GetDatum(attnum), BoolGetDatum(false)); vardata.freefunc = ReleaseSysCache; @@ -7872,8 +7872,8 @@ brincostestimate(PlannerInfo *root, IndexPath *path, double loop_count, /* * Now estimate the number of ranges that we'll touch by using the - * indexCorrelation from the stats. Careful not to divide by zero - * (note we're using the absolute value of the correlation). + * indexCorrelation from the stats. Careful not to divide by zero (note + * we're using the absolute value of the correlation). */ if (*indexCorrelation < 1.0e-10) estimatedRanges = indexRanges; @@ -7888,8 +7888,8 @@ brincostestimate(PlannerInfo *root, IndexPath *path, double loop_count, *indexSelectivity = selec; /* - * Compute the index qual costs, much as in genericcostestimate, to add - * to the index costs. + * Compute the index qual costs, much as in genericcostestimate, to add to + * the index costs. */ qual_arg_cost = other_operands_eval_cost(root, qinfos) + orderby_operands_eval_cost(root, path); diff --git a/src/backend/utils/adt/txid.c b/src/backend/utils/adt/txid.c index 5c64e32719..5dd996f62c 100644 --- a/src/backend/utils/adt/txid.c +++ b/src/backend/utils/adt/txid.c @@ -147,8 +147,8 @@ TransactionIdInRecentPast(uint64 xid_with_epoch, TransactionId *extracted_xid) /* * If the transaction ID has wrapped around, it's definitely too old to * determine the commit status. Otherwise, we can compare it to - * ShmemVariableCache->oldestClogXid to determine whether the relevant CLOG - * entry is guaranteed to still exist. + * ShmemVariableCache->oldestClogXid to determine whether the relevant + * CLOG entry is guaranteed to still exist. */ if (xid_epoch + 1 < now_epoch || (xid_epoch + 1 == now_epoch && xid < now_epoch_last_xid) @@ -454,7 +454,7 @@ txid_current_if_assigned(PG_FUNCTION_ARGS) { txid val; TxidEpoch state; - TransactionId topxid = GetTopTransactionIdIfAny(); + TransactionId topxid = GetTopTransactionIdIfAny(); if (topxid == InvalidTransactionId) PG_RETURN_NULL(); @@ -741,9 +741,9 @@ txid_snapshot_xip(PG_FUNCTION_ARGS) Datum txid_status(PG_FUNCTION_ARGS) { - const char *status; - uint64 xid_with_epoch = PG_GETARG_INT64(0); - TransactionId xid; + const char *status; + uint64 xid_with_epoch = PG_GETARG_INT64(0); + TransactionId xid; /* * We must protect against concurrent truncation of clog entries to avoid @@ -770,8 +770,8 @@ txid_status(PG_FUNCTION_ARGS) * it's aborted if it isn't committed and is older than our * snapshot xmin. * - * Otherwise it must be in-progress (or have been at the time - * we checked commit/abort status). + * Otherwise it must be in-progress (or have been at the time we + * checked commit/abort status). */ if (TransactionIdPrecedes(xid, GetActiveSnapshot()->xmin)) status = gettext_noop("aborted"); diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c index 0b0032787b..be399f48f9 100644 --- a/src/backend/utils/adt/varlena.c +++ b/src/backend/utils/adt/varlena.c @@ -1557,8 +1557,10 @@ varstr_cmp(char *arg1, int len1, char *arg2, int len2, Oid collid) else #endif { - int32_t ulen1, ulen2; - UChar *uchar1, *uchar2; + int32_t ulen1, + ulen2; + UChar *uchar1, + *uchar2; ulen1 = icu_to_uchar(&uchar1, arg1, len1); ulen2 = icu_to_uchar(&uchar2, arg2, len2); @@ -1567,10 +1569,10 @@ varstr_cmp(char *arg1, int len1, char *arg2, int len2, Oid collid) uchar1, ulen1, uchar2, ulen2); } -#else /* not USE_ICU */ +#else /* not USE_ICU */ /* shouldn't happen */ elog(ERROR, "unsupported collprovider: %c", mylocale->provider); -#endif /* not USE_ICU */ +#endif /* not USE_ICU */ } else { @@ -2136,13 +2138,15 @@ varstrfastcmp_locale(Datum x, Datum y, SortSupport ssup) &status); if (U_FAILURE(status)) ereport(ERROR, - (errmsg("collation failed: %s", u_errorName(status)))); + (errmsg("collation failed: %s", u_errorName(status)))); } else #endif { - int32_t ulen1, ulen2; - UChar *uchar1, *uchar2; + int32_t ulen1, + ulen2; + UChar *uchar1, + *uchar2; ulen1 = icu_to_uchar(&uchar1, a1p, len1); ulen2 = icu_to_uchar(&uchar2, a2p, len2); @@ -2151,10 +2155,10 @@ varstrfastcmp_locale(Datum x, Datum y, SortSupport ssup) uchar1, ulen1, uchar2, ulen2); } -#else /* not USE_ICU */ +#else /* not USE_ICU */ /* shouldn't happen */ elog(ERROR, "unsupported collprovider: %c", sss->locale->provider); -#endif /* not USE_ICU */ +#endif /* not USE_ICU */ } else { @@ -2300,8 +2304,11 @@ varstr_abbrev_convert(Datum original, SortSupport ssup) } memcpy(sss->buf1, authoritative_data, len); - /* Just like strcoll(), strxfrm() expects a NUL-terminated string. - * Not necessary for ICU, but doesn't hurt. */ + + /* + * Just like strcoll(), strxfrm() expects a NUL-terminated string. Not + * necessary for ICU, but doesn't hurt. + */ sss->buf1[len] = '\0'; sss->last_len1 = len; @@ -2336,13 +2343,13 @@ varstr_abbrev_convert(Datum original, SortSupport ssup) UErrorCode status; uiter_setUTF8(&iter, sss->buf1, len); - state[0] = state[1] = 0; /* won't need that again */ + state[0] = state[1] = 0; /* won't need that again */ status = U_ZERO_ERROR; bsize = ucol_nextSortKeyPart(sss->locale->info.icu.ucol, &iter, state, (uint8_t *) sss->buf2, - Min(sizeof(Datum), sss->buflen2), + Min(sizeof(Datum), sss->buflen2), &status); if (U_FAILURE(status)) ereport(ERROR, @@ -2351,7 +2358,7 @@ varstr_abbrev_convert(Datum original, SortSupport ssup) else bsize = ucol_getSortKey(sss->locale->info.icu.ucol, uchar, ulen, - (uint8_t *) sss->buf2, sss->buflen2); + (uint8_t *) sss->buf2, sss->buflen2); } else #endif diff --git a/src/backend/utils/adt/xml.c b/src/backend/utils/adt/xml.c index 42cffbbdd3..cdcd45419a 100644 --- a/src/backend/utils/adt/xml.c +++ b/src/backend/utils/adt/xml.c @@ -2385,8 +2385,8 @@ database_get_xml_visible_tables(void) CppAsString2(RELKIND_RELATION) "," CppAsString2(RELKIND_MATVIEW) "," CppAsString2(RELKIND_VIEW) ")" - " AND pg_catalog.has_table_privilege(pg_class.oid, 'SELECT')" - " AND relnamespace IN (" XML_VISIBLE_SCHEMAS ");"); + " AND pg_catalog.has_table_privilege(pg_class.oid, 'SELECT')" + " AND relnamespace IN (" XML_VISIBLE_SCHEMAS ");"); } @@ -4518,9 +4518,8 @@ XmlTableGetValue(TableFuncScanState *state, int colnum, * This line ensure mapping of empty tags to PostgreSQL * value. Usually we would to map a empty tag to empty * string. But this mapping can create empty string when - * user doesn't expect it - when empty tag is enforced - * by libxml2 - when user uses a text() function for - * example. + * user doesn't expect it - when empty tag is enforced by + * libxml2 - when user uses a text() function for example. */ cstr = ""; } |
