@@ -943,7 +943,7 @@ rebuild_indexes(const repack_table *table)
943
943
}
944
944
CLEARPGRES (res );
945
945
}
946
-
946
+
947
947
/* We are only going to re-queue one worker, even
948
948
* though more than one index build might be finished.
949
949
* Any other jobs which may be finished will
@@ -1051,7 +1051,7 @@ repack_one_table(repack_table *table, const char *orderby)
1051
1051
1052
1052
if (dryrun )
1053
1053
return ;
1054
-
1054
+
1055
1055
/* push repack_cleanup_callback() on stack to clean temporary objects */
1056
1056
pgut_atexit_push (repack_cleanup_callback , & table -> target_oid );
1057
1057
@@ -1328,7 +1328,7 @@ repack_one_table(repack_table *table, const char *orderby)
1328
1328
* of APPLY_COUNT, until applying a batch of tuples
1329
1329
* (via LIMIT) results in our having applied
1330
1330
* MIN_TUPLES_BEFORE_SWITCH or fewer tuples. We don't want to
1331
- * get stuck repetitively applying some small number of tuples
1331
+ * get stuck repetitively applying some small number of tuples
1332
1332
* from the log table as inserts/updates/deletes may be
1333
1333
* constantly coming into the original table.
1334
1334
*/
@@ -1392,14 +1392,14 @@ repack_one_table(repack_table *table, const char *orderby)
1392
1392
elog (DEBUG2 , "---- drop ----" );
1393
1393
1394
1394
command ("BEGIN ISOLATION LEVEL READ COMMITTED" , 0 , NULL );
1395
- if (!(lock_exclusive (connection , utoa (table -> target_oid , buffer ),
1396
- table -> lock_table , FALSE)))
1397
- {
1398
- elog (WARNING , "lock_exclusive() failed in connection for %s" ,
1399
- table -> target_name );
1400
- goto cleanup ;
1401
- }
1402
-
1395
+ if (!(lock_exclusive (connection , utoa (table -> target_oid , buffer ),
1396
+ table -> lock_table , FALSE)))
1397
+ {
1398
+ elog (WARNING , "lock_exclusive() failed in connection for %s" ,
1399
+ table -> target_name );
1400
+ goto cleanup ;
1401
+ }
1402
+
1403
1403
params [1 ] = utoa (temp_obj_num , indexbuffer );
1404
1404
command ("SELECT repack.repack_drop($1, $2)" , 2 , params );
1405
1405
command ("COMMIT" , 0 , NULL );
@@ -1711,7 +1711,7 @@ repack_cleanup_callback(bool fatal, void *userdata)
1711
1711
Oid target_table = * (Oid * ) userdata ;
1712
1712
const char * params [2 ];
1713
1713
char buffer [12 ];
1714
- char num_buff [12 ];
1714
+ char num_buff [12 ];
1715
1715
1716
1716
if (fatal )
1717
1717
{
@@ -1900,7 +1900,7 @@ repack_table_indexes(PGresult *index_details)
1900
1900
table_name );
1901
1901
if (!(lock_exclusive (connection , params [1 ], sql .data , TRUE)))
1902
1902
{
1903
- elog (WARNING , "lock_exclusive() failed in connection for %s" ,
1903
+ elog (WARNING , "lock_exclusive() failed in connection for %s" ,
1904
1904
table_name );
1905
1905
goto drop_idx ;
1906
1906
}
@@ -1974,7 +1974,7 @@ repack_all_indexes(char *errbuf, size_t errsize)
1974
1974
1975
1975
if (r_index .head )
1976
1976
{
1977
- appendStringInfoString (& sql ,
1977
+ appendStringInfoString (& sql ,
1978
1978
"SELECT i.relname, idx.indexrelid, idx.indisvalid, idx.indrelid, idx.indrelid::regclass, n.nspname"
1979
1979
" FROM pg_index idx JOIN pg_class i ON i.oid = idx.indexrelid"
1980
1980
" JOIN pg_namespace n ON n.oid = i.relnamespace"
@@ -2019,7 +2019,7 @@ repack_all_indexes(char *errbuf, size_t errsize)
2019
2019
if (table_list .head )
2020
2020
elog (INFO , "repacking indexes of \"%s\"" , cell -> val );
2021
2021
2022
- if (!repack_table_indexes (res ))
2022
+ if (!repack_table_indexes (res ))
2023
2023
elog (WARNING , "repack failed for \"%s\"" , cell -> val );
2024
2024
2025
2025
CLEARPGRES (res );
0 commit comments