17
17
* is kept in the type cache.
18
18
*
19
19
* Once created, a type cache entry lives as long as the backend does, so
20
- * there is no need for a call to release a cache entry. (For present uses,
20
+ * there is no need for a call to release a cache entry. If the type is
21
+ * dropped, the cache entry simply becomes wasted storage. (For present uses,
21
22
* it would be okay to flush type cache entries at the ends of transactions,
22
23
* if we needed to reclaim space.)
23
24
*
24
- * There is presently no provision for clearing out a cache entry if the
25
- * stored data becomes obsolete. (The code will work if a type acquires
26
- * opclasses it didn't have before while a backend runs --- but not if the
27
- * definition of an existing opclass is altered.) However, the relcache
28
- * doesn't cope with opclasses changing under it, either, so this seems
29
- * a low-priority problem.
30
- *
31
- * We do support clearing the tuple descriptor and operator/function parts
32
- * of a rowtype's cache entry, since those may need to change as a consequence
33
- * of ALTER TABLE.
25
+ * We have some provisions for updating cache entries if the stored data
26
+ * becomes obsolete. Information dependent on opclasses is cleared if we
27
+ * detect updates to pg_opclass. We also support clearing the tuple
28
+ * descriptor and operator/function parts of a rowtype's cache entry,
29
+ * since those may need to change as a consequence of ALTER TABLE.
34
30
*
35
31
*
36
32
* Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
70
66
static HTAB * TypeCacheHash = NULL ;
71
67
72
68
/* Private flag bits in the TypeCacheEntry.flags field */
73
- #define TCFLAGS_CHECKED_ELEM_PROPERTIES 0x0001
74
- #define TCFLAGS_HAVE_ELEM_EQUALITY 0x0002
75
- #define TCFLAGS_HAVE_ELEM_COMPARE 0x0004
76
- #define TCFLAGS_HAVE_ELEM_HASHING 0x0008
77
- #define TCFLAGS_CHECKED_FIELD_PROPERTIES 0x0010
78
- #define TCFLAGS_HAVE_FIELD_EQUALITY 0x0020
79
- #define TCFLAGS_HAVE_FIELD_COMPARE 0x0040
69
+ #define TCFLAGS_CHECKED_BTREE_OPCLASS 0x0001
70
+ #define TCFLAGS_CHECKED_HASH_OPCLASS 0x0002
71
+ #define TCFLAGS_CHECKED_EQ_OPR 0x0004
72
+ #define TCFLAGS_CHECKED_LT_OPR 0x0008
73
+ #define TCFLAGS_CHECKED_GT_OPR 0x0010
74
+ #define TCFLAGS_CHECKED_CMP_PROC 0x0020
75
+ #define TCFLAGS_CHECKED_HASH_PROC 0x0040
76
+ #define TCFLAGS_CHECKED_ELEM_PROPERTIES 0x0080
77
+ #define TCFLAGS_HAVE_ELEM_EQUALITY 0x0100
78
+ #define TCFLAGS_HAVE_ELEM_COMPARE 0x0200
79
+ #define TCFLAGS_HAVE_ELEM_HASHING 0x0400
80
+ #define TCFLAGS_CHECKED_FIELD_PROPERTIES 0x0800
81
+ #define TCFLAGS_HAVE_FIELD_EQUALITY 0x1000
82
+ #define TCFLAGS_HAVE_FIELD_COMPARE 0x2000
80
83
81
84
/* Private information to support comparisons of enum values */
82
85
typedef struct
@@ -132,6 +135,7 @@ static bool record_fields_have_equality(TypeCacheEntry *typentry);
132
135
static bool record_fields_have_compare (TypeCacheEntry * typentry );
133
136
static void cache_record_field_properties (TypeCacheEntry * typentry );
134
137
static void TypeCacheRelCallback (Datum arg , Oid relid );
138
+ static void TypeCacheOpcCallback (Datum arg , int cacheid , uint32 hashvalue );
135
139
static void load_enum_cache_data (TypeCacheEntry * tcache );
136
140
static EnumItem * find_enumitem (TypeCacheEnumData * enumdata , Oid arg );
137
141
static int enum_oid_cmp (const void * left , const void * right );
@@ -166,8 +170,9 @@ lookup_type_cache(Oid type_id, int flags)
166
170
TypeCacheHash = hash_create ("Type information cache" , 64 ,
167
171
& ctl , HASH_ELEM | HASH_FUNCTION );
168
172
169
- /* Also set up a callback for relcache SI invalidations */
173
+ /* Also set up callbacks for SI invalidations */
170
174
CacheRegisterRelcacheCallback (TypeCacheRelCallback , (Datum ) 0 );
175
+ CacheRegisterSyscacheCallback (CLAOID , TypeCacheOpcCallback , (Datum ) 0 );
171
176
172
177
/* Also make sure CacheMemoryContext exists */
173
178
if (!CacheMemoryContext )
@@ -217,13 +222,14 @@ lookup_type_cache(Oid type_id, int flags)
217
222
}
218
223
219
224
/*
220
- * If we haven't already found the opclasses, try to do so
225
+ * Look up opclasses if we haven't already and any dependent info is
226
+ * requested.
221
227
*/
222
228
if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_LT_OPR | TYPECACHE_GT_OPR |
223
229
TYPECACHE_CMP_PROC |
224
230
TYPECACHE_EQ_OPR_FINFO | TYPECACHE_CMP_PROC_FINFO |
225
231
TYPECACHE_BTREE_OPFAMILY )) &&
226
- typentry -> btree_opf == InvalidOid )
232
+ !( typentry -> flags & TCFLAGS_CHECKED_BTREE_OPCLASS ) )
227
233
{
228
234
Oid opclass ;
229
235
@@ -233,38 +239,36 @@ lookup_type_cache(Oid type_id, int flags)
233
239
typentry -> btree_opf = get_opclass_family (opclass );
234
240
typentry -> btree_opintype = get_opclass_input_type (opclass );
235
241
}
236
- /* If no btree opclass, we force lookup of the hash opclass */
237
- if (typentry -> btree_opf == InvalidOid )
238
- {
239
- if (typentry -> hash_opf == InvalidOid )
240
- {
241
- opclass = GetDefaultOpClass (type_id , HASH_AM_OID );
242
- if (OidIsValid (opclass ))
243
- {
244
- typentry -> hash_opf = get_opclass_family (opclass );
245
- typentry -> hash_opintype = get_opclass_input_type (opclass );
246
- }
247
- }
248
- }
249
242
else
250
243
{
251
- /*
252
- * In case we find a btree opclass where previously we only found
253
- * a hash opclass, reset eq_opr and derived information so that we
254
- * can fetch the btree equality operator instead of the hash
255
- * equality operator. (They're probably the same operator, but we
256
- * don't assume that here.)
257
- */
258
- typentry -> eq_opr = InvalidOid ;
259
- typentry -> eq_opr_finfo .fn_oid = InvalidOid ;
260
- typentry -> hash_proc = InvalidOid ;
261
- typentry -> hash_proc_finfo .fn_oid = InvalidOid ;
244
+ typentry -> btree_opf = typentry -> btree_opintype = InvalidOid ;
262
245
}
246
+
247
+ /*
248
+ * Reset information derived from btree opclass. Note in particular
249
+ * that we'll redetermine the eq_opr even if we previously found one;
250
+ * this matters in case a btree opclass has been added to a type that
251
+ * previously had only a hash opclass.
252
+ */
253
+ typentry -> flags &= ~(TCFLAGS_CHECKED_EQ_OPR |
254
+ TCFLAGS_CHECKED_LT_OPR |
255
+ TCFLAGS_CHECKED_GT_OPR |
256
+ TCFLAGS_CHECKED_CMP_PROC );
257
+ typentry -> flags |= TCFLAGS_CHECKED_BTREE_OPCLASS ;
263
258
}
264
259
260
+ /*
261
+ * If we need to look up equality operator, and there's no btree opclass,
262
+ * force lookup of hash opclass.
263
+ */
264
+ if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO )) &&
265
+ !(typentry -> flags & TCFLAGS_CHECKED_EQ_OPR ) &&
266
+ typentry -> btree_opf == InvalidOid )
267
+ flags |= TYPECACHE_HASH_OPFAMILY ;
268
+
265
269
if ((flags & (TYPECACHE_HASH_PROC | TYPECACHE_HASH_PROC_FINFO |
266
270
TYPECACHE_HASH_OPFAMILY )) &&
267
- typentry -> hash_opf == InvalidOid )
271
+ !( typentry -> flags & TCFLAGS_CHECKED_HASH_OPCLASS ) )
268
272
{
269
273
Oid opclass ;
270
274
@@ -274,11 +278,25 @@ lookup_type_cache(Oid type_id, int flags)
274
278
typentry -> hash_opf = get_opclass_family (opclass );
275
279
typentry -> hash_opintype = get_opclass_input_type (opclass );
276
280
}
281
+ else
282
+ {
283
+ typentry -> hash_opf = typentry -> hash_opintype = InvalidOid ;
284
+ }
285
+
286
+ /*
287
+ * Reset information derived from hash opclass. We do *not* reset the
288
+ * eq_opr; if we already found one from the btree opclass, that
289
+ * decision is still good.
290
+ */
291
+ typentry -> flags &= ~(TCFLAGS_CHECKED_HASH_PROC );
292
+ typentry -> flags |= TCFLAGS_CHECKED_HASH_OPCLASS ;
277
293
}
278
294
279
- /* Look for requested operators and functions */
295
+ /*
296
+ * Look for requested operators and functions, if we haven't already.
297
+ */
280
298
if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO )) &&
281
- typentry -> eq_opr == InvalidOid )
299
+ !( typentry -> flags & TCFLAGS_CHECKED_EQ_OPR ) )
282
300
{
283
301
Oid eq_opr = InvalidOid ;
284
302
@@ -307,17 +325,22 @@ lookup_type_cache(Oid type_id, int flags)
307
325
!record_fields_have_equality (typentry ))
308
326
eq_opr = InvalidOid ;
309
327
328
+ /* Force update of eq_opr_finfo only if we're changing state */
329
+ if (typentry -> eq_opr != eq_opr )
330
+ typentry -> eq_opr_finfo .fn_oid = InvalidOid ;
331
+
310
332
typentry -> eq_opr = eq_opr ;
311
333
312
334
/*
313
335
* Reset info about hash function whenever we pick up new info about
314
336
* equality operator. This is so we can ensure that the hash function
315
337
* matches the operator.
316
338
*/
317
- typentry -> hash_proc = InvalidOid ;
318
- typentry -> hash_proc_finfo . fn_oid = InvalidOid ;
339
+ typentry -> flags &= ~( TCFLAGS_CHECKED_HASH_PROC ) ;
340
+ typentry -> flags |= TCFLAGS_CHECKED_EQ_OPR ;
319
341
}
320
- if ((flags & TYPECACHE_LT_OPR ) && typentry -> lt_opr == InvalidOid )
342
+ if ((flags & TYPECACHE_LT_OPR ) &&
343
+ !(typentry -> flags & TCFLAGS_CHECKED_LT_OPR ))
321
344
{
322
345
Oid lt_opr = InvalidOid ;
323
346
@@ -336,8 +359,10 @@ lookup_type_cache(Oid type_id, int flags)
336
359
lt_opr = InvalidOid ;
337
360
338
361
typentry -> lt_opr = lt_opr ;
362
+ typentry -> flags |= TCFLAGS_CHECKED_LT_OPR ;
339
363
}
340
- if ((flags & TYPECACHE_GT_OPR ) && typentry -> gt_opr == InvalidOid )
364
+ if ((flags & TYPECACHE_GT_OPR ) &&
365
+ !(typentry -> flags & TCFLAGS_CHECKED_GT_OPR ))
341
366
{
342
367
Oid gt_opr = InvalidOid ;
343
368
@@ -356,9 +381,10 @@ lookup_type_cache(Oid type_id, int flags)
356
381
gt_opr = InvalidOid ;
357
382
358
383
typentry -> gt_opr = gt_opr ;
384
+ typentry -> flags |= TCFLAGS_CHECKED_GT_OPR ;
359
385
}
360
386
if ((flags & (TYPECACHE_CMP_PROC | TYPECACHE_CMP_PROC_FINFO )) &&
361
- typentry -> cmp_proc == InvalidOid )
387
+ !( typentry -> flags & TCFLAGS_CHECKED_CMP_PROC ) )
362
388
{
363
389
Oid cmp_proc = InvalidOid ;
364
390
@@ -376,10 +402,15 @@ lookup_type_cache(Oid type_id, int flags)
376
402
!record_fields_have_compare (typentry ))
377
403
cmp_proc = InvalidOid ;
378
404
405
+ /* Force update of cmp_proc_finfo only if we're changing state */
406
+ if (typentry -> cmp_proc != cmp_proc )
407
+ typentry -> cmp_proc_finfo .fn_oid = InvalidOid ;
408
+
379
409
typentry -> cmp_proc = cmp_proc ;
410
+ typentry -> flags |= TCFLAGS_CHECKED_CMP_PROC ;
380
411
}
381
412
if ((flags & (TYPECACHE_HASH_PROC | TYPECACHE_HASH_PROC_FINFO )) &&
382
- typentry -> hash_proc == InvalidOid )
413
+ !( typentry -> flags & TCFLAGS_CHECKED_HASH_PROC ) )
383
414
{
384
415
Oid hash_proc = InvalidOid ;
385
416
@@ -407,7 +438,12 @@ lookup_type_cache(Oid type_id, int flags)
407
438
!array_element_has_hashing (typentry ))
408
439
hash_proc = InvalidOid ;
409
440
441
+ /* Force update of hash_proc_finfo only if we're changing state */
442
+ if (typentry -> hash_proc != hash_proc )
443
+ typentry -> hash_proc_finfo .fn_oid = InvalidOid ;
444
+
410
445
typentry -> hash_proc = hash_proc ;
446
+ typentry -> flags |= TCFLAGS_CHECKED_HASH_PROC ;
411
447
}
412
448
413
449
/*
@@ -416,6 +452,11 @@ lookup_type_cache(Oid type_id, int flags)
416
452
* Note: we tell fmgr the finfo structures live in CacheMemoryContext,
417
453
* which is not quite right (they're really in the hash table's private
418
454
* memory context) but this will do for our purposes.
455
+ *
456
+ * Note: the code above avoids invalidating the finfo structs unless the
457
+ * referenced operator/function OID actually changes. This is to prevent
458
+ * unnecessary leakage of any subsidiary data attached to an finfo, since
459
+ * that would cause session-lifespan memory leaks.
419
460
*/
420
461
if ((flags & TYPECACHE_EQ_OPR_FINFO ) &&
421
462
typentry -> eq_opr_finfo .fn_oid == InvalidOid &&
@@ -928,15 +969,38 @@ TypeCacheRelCallback(Datum arg, Oid relid)
928
969
typentry -> tupDesc = NULL ;
929
970
}
930
971
931
- /* Reset equality/comparison/hashing information */
932
- typentry -> eq_opr = InvalidOid ;
933
- typentry -> lt_opr = InvalidOid ;
934
- typentry -> gt_opr = InvalidOid ;
935
- typentry -> cmp_proc = InvalidOid ;
936
- typentry -> hash_proc = InvalidOid ;
937
- typentry -> eq_opr_finfo .fn_oid = InvalidOid ;
938
- typentry -> cmp_proc_finfo .fn_oid = InvalidOid ;
939
- typentry -> hash_proc_finfo .fn_oid = InvalidOid ;
972
+ /* Reset equality/comparison/hashing validity information */
973
+ typentry -> flags = 0 ;
974
+ }
975
+ }
976
+
977
+ /*
978
+ * TypeCacheOpcCallback
979
+ * Syscache inval callback function
980
+ *
981
+ * This is called when a syscache invalidation event occurs for any pg_opclass
982
+ * row. In principle we could probably just invalidate data dependent on the
983
+ * particular opclass, but since updates on pg_opclass are rare in production
984
+ * it doesn't seem worth a lot of complication: we just mark all cached data
985
+ * invalid.
986
+ *
987
+ * Note that we don't bother watching for updates on pg_amop or pg_amproc.
988
+ * This should be safe because ALTER OPERATOR FAMILY ADD/DROP OPERATOR/FUNCTION
989
+ * is not allowed to be used to add/drop the primary operators and functions
990
+ * of an opclass, only cross-type members of a family; and the latter sorts
991
+ * of members are not going to get cached here.
992
+ */
993
+ static void
994
+ TypeCacheOpcCallback (Datum arg , int cacheid , uint32 hashvalue )
995
+ {
996
+ HASH_SEQ_STATUS status ;
997
+ TypeCacheEntry * typentry ;
998
+
999
+ /* TypeCacheHash must exist, else this callback wouldn't be registered */
1000
+ hash_seq_init (& status , TypeCacheHash );
1001
+ while ((typentry = (TypeCacheEntry * ) hash_seq_search (& status )) != NULL )
1002
+ {
1003
+ /* Reset equality/comparison/hashing validity information */
940
1004
typentry -> flags = 0 ;
941
1005
}
942
1006
}
0 commit comments