From d864fdf709113103300e5130d8c770ea7eb88e2d Mon Sep 17 00:00:00 2001 From: wangjingyuan8 <1577039175@qq.com> Date: Wed, 3 Apr 2024 17:34:12 +0800 Subject: [PATCH] write lsn --- .../backend/utils/adt/pgxlogstatfuncs.cpp | 4 + .../ddes/adapter/ss_dms_callback.cpp | 4 + .../process/threadpool/knl_instance.cpp | 3 + .../storage/access/transam/varsup.cpp | 11 +- .../storage/access/transam/xlog.cpp | 526 ++++++++++++++++-- src/gausskernel/storage/lmgr/lwlocknames.txt | 1 - src/include/access/transam.h | 5 +- src/include/access/xlog.h | 4 + src/include/access/xlog_basic.h | 6 +- src/include/access/xlog_internal.h | 4 + src/include/catalog/pg_control.h | 3 - src/include/knl/knl_instance.h | 3 + src/include/storage/buf/bufpage.h | 2 +- 13 files changed, 506 insertions(+), 70 deletions(-) diff --git a/src/common/backend/utils/adt/pgxlogstatfuncs.cpp b/src/common/backend/utils/adt/pgxlogstatfuncs.cpp index 1d90f358f..8daf9036a 100644 --- a/src/common/backend/utils/adt/pgxlogstatfuncs.cpp +++ b/src/common/backend/utils/adt/pgxlogstatfuncs.cpp @@ -188,7 +188,11 @@ static void ReadFlushLocation(TupleDesc *tupleDesc, Tuplestorestate *tupstore) values[ARR_1] = Int32GetDatum(g_instance.wal_cxt.lastLRCScanned); values[ARR_2] = Int32GetDatum(Insert->CurrLRC); values[ARR_3] = UInt64GetDatum(Insert->CurrBytePos); +#ifdef ENABLE_SS_MULTIMASTER + values[ARR_4] = UInt32GetDatum(Insert->LogicLSN); +#else values[ARR_4] = UInt32GetDatum(Insert->PrevByteSize); +#endif values[ARR_5] = UInt64GetDatum(g_instance.wal_cxt.flushResult); values[ARR_6] = UInt64GetDatum(g_instance.wal_cxt.sentResult); values[ARR_7] = UInt64GetDatum(LogwrtRqst->Write); diff --git a/src/gausskernel/ddes/adapter/ss_dms_callback.cpp b/src/gausskernel/ddes/adapter/ss_dms_callback.cpp index 34149adef..1c1f4df99 100644 --- a/src/gausskernel/ddes/adapter/ss_dms_callback.cpp +++ b/src/gausskernel/ddes/adapter/ss_dms_callback.cpp @@ -52,10 +52,12 @@ #include "storage/buf/bufmgr.h" #include "storage/ipc.h" +#ifdef ENABLE_SS_MULTIMASTER static void CBUpdateGlobalLSN(void *db_handle, unsigned long long lsn) { SetNewLSN(lsn); } +#endif static void CBUpdateGlobalXID(void *db_handle, unsigned long long xid) { @@ -2327,7 +2329,9 @@ void DmsInitCallback(dms_callback_t *callback) callback->mem_free = CBMemFree; callback->mem_reset = CBMemReset; +#ifdef ENABLE_SS_MULTIMASTER callback->update_global_lsn = CBUpdateGlobalLSN; +#endif callback->update_global_scn = CBUpdateGlobalXID; callback->get_global_scn = CBGetGlobalXID; callback->get_global_csn = CBGetGlobalCSN; diff --git a/src/gausskernel/process/threadpool/knl_instance.cpp b/src/gausskernel/process/threadpool/knl_instance.cpp index 05d905ce5..afa7cdbdf 100755 --- a/src/gausskernel/process/threadpool/knl_instance.cpp +++ b/src/gausskernel/process/threadpool/knl_instance.cpp @@ -128,6 +128,9 @@ static void knl_g_wal_init(knl_g_wal_context *const wal_cxt) wal_cxt->isWalWriterUp = false; wal_cxt->flushResult = InvalidXLogRecPtr; wal_cxt->sentResult = InvalidXLogRecPtr; +#ifdef ENABLE_SS_MULTIMASTER + wal_cxt->prevValidPtr = InvalidXLogRecPtr; +#endif wal_cxt->flushResultMutex = PTHREAD_MUTEX_INITIALIZER; wal_cxt->flushResultCV = (pthread_cond_t)PTHREAD_COND_INITIALIZER; wal_cxt->XLogFlusherCPU = 0; diff --git a/src/gausskernel/storage/access/transam/varsup.cpp b/src/gausskernel/storage/access/transam/varsup.cpp index c6d9a6de8..fba6a2751 100644 --- a/src/gausskernel/storage/access/transam/varsup.cpp +++ b/src/gausskernel/storage/access/transam/varsup.cpp @@ -300,16 +300,19 @@ void SetNewTransactionId(TransactionId xid) LWLockRelease(XidGenLock); } -void SetNewLSN(uint64 lsn) +#ifdef ENABLE_SS_MULTIMASTER +void SetNewLSN(uint32 lsn) { - uint64 g_lsn = 0; + uint32 g_lsn = 0; loop: - g_lsn = pg_atomic_read_u64(&t_thrd.xact_cxt.ShmemVariableCache->nextLSN); + volatile XLogCtlInsert *Insert = &t_thrd.shemem_ptr_cxt.XLogCtl->Insert; + g_lsn = pg_atomic_read_u32((uint32*)&Insert->LogicLSN); if (lsn > g_lsn) { - if (!pg_atomic_compare_exchange_u64(&t_thrd.xact_cxt.ShmemVariableCache->nextLSN, &g_lsn, lsn)) + if (!pg_atomic_compare_exchange_u32((uint32*)&Insert->LogicLSN, &g_lsn, lsn)) goto loop; } } +#endif /* * Determine the last safe XID to allocate given the currently oldest diff --git a/src/gausskernel/storage/access/transam/xlog.cpp b/src/gausskernel/storage/access/transam/xlog.cpp index e611d90c2..dddd6c7b1 100755 --- a/src/gausskernel/storage/access/transam/xlog.cpp +++ b/src/gausskernel/storage/access/transam/xlog.cpp @@ -215,7 +215,7 @@ static const int g_retryTimes = 3; static const uint64 REDO_SPEED_LOG_LEN = (XLogSegSize * 64); /* 64 segs */ static const int PG_TBLSPCS = 10; /* strlen(pg_tblspcs/) */ -thread_local uint64 g_curr_lsn = 0; +thread_local uint32 g_curr_lsn = 0; /* MAX_PAGE_FLUSH_LSN_FILE SIZE : 1024 pages, 8k each, file size 8M in total */ static const int MPFL_PAGE_NUM = NUM_MAX_PAGE_FLUSH_LSN_PARTITIONS; @@ -430,9 +430,15 @@ static void SetDummyStandbyEndRecPtr(XLogReaderState *xlogreader); /* XLOG scaling: start */ static void CopyXLogRecordToWAL(int write_len, bool isLogSwitch, XLogRecData *rdata, XLogRecPtr StartPos, XLogRecPtr EndPos, int32 *const currlrc_ptr); +#ifdef ENABLE_SS_MULTIMASTER +static void multiReserveXLogInsertLocation(uint32 size, XLogRecPtr *StartPos, XLogRecPtr *EndPos, uint32* lsn, + int32 *const currlrc_ptr); +static bool multiReserveXLogSwitch(XLogRecPtr *StartPos, XLogRecPtr *EndPos); +#else static void ReserveXLogInsertLocation(uint32 size, XLogRecPtr *StartPos, XLogRecPtr *EndPos, XLogRecPtr *PrevPtr, - int32 *const currlrc_ptr, uint64 *lsn = NULL); + int32 *const currlrc_ptr); static bool ReserveXLogSwitch(XLogRecPtr *StartPos, XLogRecPtr *EndPos, XLogRecPtr *PrevPtr); +#endif static void StartSuspendWalInsert(int32 *const lastlrc_ptr); static void StopSuspendWalInsert(int32 lastlrc); void ResetRecoveryDelayLatch(); @@ -461,8 +467,13 @@ static XLogRecPtr XLogInsertRecordGroup(XLogRecData *rdata, XLogRecPtr fpw_lsn); static void XLogInsertRecordNolock(XLogRecData *rdata, PGPROC *proc, XLogRecPtr StartPos, XLogRecPtr EndPos, XLogRecPtr PrevPos, int32 *const currlrc_ptr); +#ifdef ENABLE_SS_MULTIMASTER +static void multiReserveXLogInsertByteLocation(uint32 size, uint32 followersNum, uint64 *StartBytePos, uint64 *EndBytePos, + uint32* lsn, int32 *const currlrc_ptr); +#else static void ReserveXLogInsertByteLocation(uint32 size, uint32 lastRecordSize, uint64 *StartBytePos, uint64 *EndBytePos, - uint64 *PrevBytePos, int32 *const currlrc_ptr, uint64 *lsn = NULL); + uint64 *PrevBytePos, int32 *const currlrc_ptr); +#endif static void CopyXLogRecordToWALForGroup(int write_len, XLogRecData *rdata, XLogRecPtr StartPos, XLogRecPtr EndPos, PGPROC *proc, int32* const currlrc_ptr); @@ -496,6 +507,7 @@ static void XLogInsertRecordGroupLeader(PGPROC *leader, uint64 *end_byte_pos_ptr uint64 end_byte_pos = 0; int32 current_lrc = 0; uint64 dirty_page_queue_lsn = 0; + uint32 logic_lsn = 0; *leader->xlogGroupRedoRecPtr = t_thrd.shemem_ptr_cxt.XLogCtl->Insert.RedoRecPtr; @@ -523,8 +535,7 @@ static void XLogInsertRecordGroupLeader(PGPROC *leader, uint64 *end_byte_pos_ptr } else { if (likely(record_size != 0)) { #ifdef ENABLE_SS_MULTIMASTER - ReserveXLogInsertByteLocation(record_size, record_size, &start_byte_pos, &end_byte_pos, &prev_byte_pos, - ¤t_lrc, &(((XLogRecord *)(leader->xlogGrouprdata->data))->xl_lsn)); + multiReserveXLogInsertByteLocation(record_size, 1, &start_byte_pos, &end_byte_pos, &logic_lsn, ¤t_lrc); #else ReserveXLogInsertByteLocation(record_size, record_size, &start_byte_pos, &end_byte_pos, &prev_byte_pos, ¤t_lrc); @@ -536,6 +547,9 @@ static void XLogInsertRecordGroupLeader(PGPROC *leader, uint64 *end_byte_pos_ptr start_byte_pos + MAXALIGN(((XLogRecord *)(leader->xlogGrouprdata->data))->xl_tot_len)), XLogBytePosToRecPtr(prev_byte_pos), ¤t_lrc); +#ifdef ENABLE_SS_MULTIMASTER + ((XLogRecord *)leader->xlogGrouprdata->data)->logic_lsn = logic_lsn; +#endif } if (dirty_page_queue_lsn != 0) { @@ -563,6 +577,8 @@ static void XLogInsertRecordGroupFollowers(PGPROC *leader, const uint32 head, ui uint64 prev_byte_pos = 0; int32 current_lrc = 0; uint64 dirty_page_queue_lsn = 0; + uint32 logic_lsn = 0; + uint32 followersNum = 0; /* Walk the list and update the status of all xloginserts. */ nextidx = head; @@ -590,14 +606,14 @@ static void XLogInsertRecordGroupFollowers(PGPROC *leader, const uint32 head, ui Assert(record_size != 0); /* Calculate total size in the group. */ total_size += record_size; + followersNum++; /* Move to next proc in list. */ nextidx = pg_atomic_read_u32(&follower->xlogGroupNext); } if (likely(total_size != 0)) { #ifdef ENABLE_SS_MULTIMASTER - ReserveXLogInsertByteLocation(total_size, record_size, &start_byte_pos, &end_byte_pos, &prev_byte_pos, - ¤t_lrc, &(((XLogRecord *)(follower->xlogGrouprdata->data))->xl_lsn)); + multiReserveXLogInsertByteLocation(total_size, followersNum, &start_byte_pos, &end_byte_pos, &logic_lsn, ¤t_lrc); #else ReserveXLogInsertByteLocation(total_size, record_size, &start_byte_pos, &end_byte_pos, &prev_byte_pos, ¤t_lrc); @@ -620,6 +636,10 @@ static void XLogInsertRecordGroupFollowers(PGPROC *leader, const uint32 head, ui start_byte_pos + MAXALIGN(((XLogRecord *)(follower->xlogGrouprdata->data))->xl_tot_len)), XLogBytePosToRecPtr(prev_byte_pos), ¤t_lrc); +#ifdef ENABLE_SS_MULTIMASTER + ((XLogRecord *)follower->xlogGrouprdata->data)->logic_lsn = logic_lsn; + logic_lsn++; +#endif prev_byte_pos = start_byte_pos; start_byte_pos += MAXALIGN(((XLogRecord *)(follower->xlogGrouprdata->data))->xl_tot_len); /* Move to next proc in list. */ @@ -835,12 +855,14 @@ static void XLogInsertRecordNolock(XLogRecData *rdata, PGPROC *proc, XLogRecPtr START_CRIT_SECTION(); +#ifndef ENABLE_SS_MULTIMASTER /* Now that xl_prev has been filled in, calculate CRC of the record header. */ rdata_crc = ((XLogRecord *)rechdr)->xl_crc; COMP_CRC32C(rdata_crc, (XLogRecord *)rechdr, offsetof(XLogRecord, xl_crc)); FIN_CRC32C(rdata_crc); /* FIN_CRC32C as same as FIN_CRC32 */ ((XLogRecord *)rechdr)->xl_crc = rdata_crc; +#endif /* * All the record data, including the header, is now ready to be @@ -887,6 +909,91 @@ static void XLogInsertRecordNolock(XLogRecData *rdata, PGPROC *proc, XLogRecPtr return; } +#ifdef ENABLE_SS_MULTIMASTER +static void multiReserveXLogInsertByteLocation(uint32 size, uint32 followersNum, uint64 *StartBytePos, uint64 *EndBytePos, + uint32* lsn, int32 *const currlrc_ptr) +{ + volatile XLogCtlInsert *Insert = &t_thrd.shemem_ptr_cxt.XLogCtl->Insert; + + size = MAXALIGN(size); + + /* All (non xlog-switch) records should contain data. */ + Assert(size > SizeOfXLogRecord); + + /* + * The duration the spinlock needs to be held is minimized by minimizing + * the calculations that have to be done while holding the lock. The + * current tip of reserved WAL is kept in CurrBytePos, as a byte position + * that only counts "usable" bytes in WAL, that is, it excludes all WAL + * page headers. The mapping between "usable" byte positions and physical + * positions (XLogRecPtrs) can be done outside the locked region, and + * because the usable byte position doesn't include any headers, reserving + * X bytes from WAL is almost as simple as "CurrBytePos += X". + */ +#if defined(__x86_64__) || defined(__aarch64__) && !defined(__USE_SPINLOCK) + union Union128 compare; + union Union128 exchange; + union Union128 current; + + compare.value = atomic_compare_and_swap_u128((volatile uint128_u *)&Insert->CurrBytePos); + + Assert(sizeof(Insert->CurrBytePos) == SIZE_OF_UINT64); + Assert(sizeof(Insert->LogicLSN) == SIZE_OF_UINT32); + Assert(sizeof(Insert->CurrLRC) == SIZE_OF_UINT32); + loop: + /* + * |CurrBytePos |LogicLSN | CurrLRC + * ------------------------+------------ +----------- + * |64 bits |32 bits |32 bits + */ + + /* + * Suspend WAL insert threads when currlrc equals WAL_COPY_SUSPEND + */ + if (unlikely(compare.struct128.LRC == WAL_COPY_SUSPEND)) { + compare.value = atomic_compare_and_swap_u128((volatile uint128_u *)&Insert->CurrBytePos); + goto loop; + } + + /* increment currlrc by 1 and store it back to the global LRC for the next record */ + + exchange.struct128.currentBytePos = compare.struct128.currentBytePos + size; + exchange.struct128.logicLSN = compare.struct128.logicLSN + followersNum; + exchange.struct128.LRC = (compare.struct128.LRC + 1) & 0x7FFFFFFF; + + current.value = atomic_compare_and_swap_u128((volatile uint128_u *)&Insert->CurrBytePos, compare.value, + exchange.value); + + if (!UINT128_IS_EQUAL(compare.value, current.value)) { + UINT128_COPY(compare.value, current.value); + goto loop; + } + + *currlrc_ptr = compare.struct128.LRC; + *StartBytePos = compare.struct128.currentBytePos; + *EndBytePos = exchange.struct128.currentBytePos; + *lsn = compare.struct128.logicLSN; +#else + loop1: + SpinLockAcquire(&Insert->insertpos_lck); + + if (unlikely(Insert->CurrLRC == WAL_COPY_SUSPEND)) { + SpinLockRelease(&Insert->insertpos_lck); + goto loop1; + } + + *currlrc_ptr = Insert->CurrLRC; + *StartBytePos = Insert->CurrBytePos; + *EndBytePos = Insert->CurrBytePos + size; + *lsn = Insert->logicLSN; + Insert->CurrLRC = (Insert->CurrLRC + 1) & 0x7FFFFFFF; + Insert->logicLSN += followersNum; + Insert->CurrBytePos = *EndBytePos; + + SpinLockRelease(&Insert->insertpos_lck); +#endif /* __x86_64__ || __aarch64__ */ +} +#else /* not ENABLE_SS_MULTI_WRITE */ /* * @Description: Reserves the right amount of space for a given size from the WAL. * already-reserved area in the WAL. The StartBytePos, EndBytePos and PrevBytePos @@ -907,9 +1014,6 @@ static void ReserveXLogInsertByteLocation(uint32 size, uint32 lastRecordSize, ui /* All (non xlog-switch) records should contain data. */ Assert(size > SizeOfXLogRecord); -#ifdef ENABLE_SS_MULTIMASTER - LWLockAcquire(LSNGenLock, LW_EXCLUSIVE); -#endif /* * The duration the spinlock needs to be held is minimized by minimizing * the calculations that have to be done while holding the lock. The @@ -959,13 +1063,6 @@ loop: goto loop; } -#ifdef ENABLE_SS_MULTIMASTER - Assert(t_thrd.xact_cxt.ShmemVariableCache->nextLSN); - *lsn = pg_atomic_fetch_add_u64(&t_thrd.xact_cxt.ShmemVariableCache->nextLSN, 1); - LWLockRelease(LSNGenLock); - g_curr_lsn = *lsn; -#endif - *currlrc_ptr = compare.struct128.LRC; *StartBytePos = compare.struct128.currentBytePos; *EndBytePos = exchange.struct128.currentBytePos; @@ -990,6 +1087,7 @@ loop1: SpinLockRelease(&Insert->insertpos_lck); #endif /* __x86_64__ || __aarch64__ */ } +#endif /* ENABLE_SS_MULTI_WRITE */ /* * @Description: In xlog group insert mode, copy a WAL record to an @@ -1207,18 +1305,23 @@ static XLogRecPtr XLogInsertRecordSingle(XLogRecData *rdata, XLogRecPtr fpw_lsn) * Reserve space for the record in the WAL. This also sets the xl_prev * pointer. */ +#ifdef ENABLE_SS_MULTIMASTER + if (isLogSwitch) { + inserted = multiReserveXLogSwitch(&StartPos, &EndPos); + } else { + multiReserveXLogInsertLocation(rechdr->xl_tot_len, &StartPos, &EndPos, &rechdr->logic_lsn, &currlrc); + inserted = true; + } +#else if (isLogSwitch) { XLogRecPtr tmp_xl_prev = InvalidXLogRecPtr; inserted = ReserveXLogSwitch(&StartPos, &EndPos, &tmp_xl_prev); rechdr->xl_prev = tmp_xl_prev; } else { -#ifdef ENABLE_SS_MULTIMASTER - ReserveXLogInsertLocation(rechdr->xl_tot_len, &StartPos, &EndPos, &rechdr->xl_prev, &currlrc, &rechdr->xl_lsn); -#else ReserveXLogInsertLocation(rechdr->xl_tot_len, &StartPos, &EndPos, &rechdr->xl_prev, &currlrc); -#endif inserted = true; } +#endif if (inserted) { uint32 current_entry = @@ -1227,6 +1330,7 @@ static XLogRecPtr XLogInsertRecordSingle(XLogRecData *rdata, XLogRecPtr fpw_lsn) volatile WALInsertStatusEntry *status_entry_ptr = &g_instance.wal_cxt.walInsertStatusTable[GET_STATUS_ENTRY_INDEX(current_entry)]; +#ifndef ENABLE_SS_MULTIMASTER /* Now that xl_prev has been filled in, calculate CRC of the record header. */ rdata_crc = rechdr->xl_crc; /* using CRC32C */ @@ -1234,6 +1338,7 @@ static XLogRecPtr XLogInsertRecordSingle(XLogRecData *rdata, XLogRecPtr fpw_lsn) FIN_CRC32C(rdata_crc); /* FIN_CRC32C as same as FIN_CRC32 */ rechdr->xl_crc = rdata_crc; +#endif /* * All the record data, including the header, is now ready to be @@ -1367,6 +1472,9 @@ static XLogRecPtr XLogInsertRecordSingle(XLogRecData *rdata, XLogRecPtr fpw_lsn) // Update our global variables t_thrd.xlog_cxt.ProcLastRecPtr = StartPos; t_thrd.xlog_cxt.XactLastRecEnd = EndPos; +#ifdef ENABLE_SS_MULTIMASTER + g_curr_lsn = rechdr->logic_lsn; +#endif return EndPos; } @@ -1401,7 +1509,9 @@ static XLogRecPtr XLogInsertRecordSingle(XLogRecData *rdata, XLogRecPtr fpw_lsn) */ XLogRecPtr XLogInsertRecord(XLogRecData *rdata, XLogRecPtr fpw_lsn) { - g_curr_lsn = 0; +#ifdef ENABLE_SS_MULTIMASTER + g_curr_lsn = 0; +#endif #ifdef __aarch64__ /* * In ARM architecture, insert an XLOG record represented by an already-constructed chain of data @@ -1420,6 +1530,185 @@ XLogRecPtr XLogInsertRecord(XLogRecData *rdata, XLogRecPtr fpw_lsn) #endif /* __aarch64__ */ } +#ifdef ENABLE_SS_MULTIMASTER +static void multiReserveXLogInsertLocation(uint32 size, XLogRecPtr *StartPos, XLogRecPtr *EndPos, uint32* lsn, + int32* const currlrc_ptr) +{ + volatile XLogCtlInsert *Insert = &t_thrd.shemem_ptr_cxt.XLogCtl->Insert; + + size = MAXALIGN(size); + + /* All (non xlog-switch) records should contain data. */ + Assert(size > SizeOfXLogRecord); + + /* + * The duration the spinlock needs to be held is minimized by minimizing + * the calculations that have to be done while holding the lock. The + * current tip of reserved WAL is kept in CurrBytePos, as a byte position + * that only counts "usable" bytes in WAL, that is, it excludes all WAL + * page headers. The mapping between "usable" byte positions and physical + * positions (XLogRecPtrs) can be done outside the locked region, and + * because the usable byte position doesn't include any headers, reserving + * X bytes from WAL is almost as simple as "CurrBytePos += X". + */ +#if defined(__x86_64__) || defined(__aarch64__) && !defined(__USE_SPINLOCK) + union Union128 compare; + union Union128 exchange; + union Union128 current; + + compare.value = atomic_compare_and_swap_u128((uint128_u *)&Insert->CurrBytePos); + Assert(sizeof(Insert->CurrBytePos) == SIZE_OF_UINT64); + Assert(sizeof(Insert->LogicLSN) == SIZE_OF_UINT32); + Assert(sizeof(Insert->CurrLRC) == SIZE_OF_UINT32); + +loop1: + /* + * |CurrBytePos |LogicLSN | CurrLRC + * ------------------------+------------ +----------- + * |64 bits |32 bits |32 bits + */ + + /* + * Suspend WAL insert threads when currlrc equals WAL_COPY_SUSPEND + */ + if (unlikely(compare.struct128.LRC == WAL_COPY_SUSPEND)) { + compare.value = atomic_compare_and_swap_u128((volatile uint128_u *)&Insert->CurrBytePos); + pg_usleep(1); + goto loop1; + } + + /* increment currlrc by 1 and store it back to the global LRC for the next record */ + + exchange.struct128.currentBytePos = compare.struct128.currentBytePos + size; + exchange.struct128.logicLSN = compare.struct128.logicLSN + 1; + exchange.struct128.LRC = (compare.struct128.LRC + 1) & 0x7FFFFFFF; + + current.value = atomic_compare_and_swap_u128((volatile uint128_u *)&Insert->CurrBytePos, compare.value, + exchange.value); + if (!UINT128_IS_EQUAL(compare.value, current.value)) { + UINT128_COPY(compare.value, current.value); + goto loop1; + } + + *currlrc_ptr = compare.struct128.LRC; + *StartPos = XLogBytePosToRecPtr(compare.struct128.currentBytePos); + *EndPos = XLogBytePosToEndRecPtr(exchange.struct128.currentBytePos); + *lsn = compare.struct128.logicLSN; + +#else +loop1: + SpinLockAcquire(&Insert->insertpos_lck); + if (unlikely(Insert->CurrLRC == WAL_COPY_SUSPEND)) { + SpinLockRelease(&Insert->insertpos_lck); + pg_usleep(1); + goto loop1; + } + *currlrc_ptr = Insert->CurrLRC; + *lsn = Insert->logicLSN; + *StartPos = XLogBytePosToRecPtr(Insert->CurrBytePos); + Insert->CurrBytePos = Insert->CurrBytePos + size; + *EndPos = XLogBytePosToEndRecPtr(Insert->CurrBytePos); + Insert->logicLSN++; + Insert->CurrLRC = (Insert->CurrLRC + 1) & 0x7FFFFFFF; + + SpinLockRelease(&Insert->insertpos_lck); +#endif /* __x86_64__|| __aarch64__ */ +} + +static bool multiReserveXLogSwitch(XLogRecPtr *StartPos, XLogRecPtr *EndPos) +{ + volatile XLogCtlInsert *Insert = &t_thrd.shemem_ptr_cxt.XLogCtl->Insert; + uint64 startbytepos; + uint64 endbytepos; + uint32 logicLSN; + int32 currlrc; + uint32 size = MAXALIGN(SizeOfXLogRecord); + XLogRecPtr ptr; + uint32 segleft; + + /* + * These calculations are a bit heavy-weight to be done while holding a + * spinlock, but since we're holding all the WAL insertion locks, there + * are no other inserters competing for it. GetXLogInsertRecPtr() does + * compete for it, but that's not called very frequently. + */ +#if defined(__x86_64__) || defined(__aarch64__) && !defined(__USE_SPINLOCK) + uint128_u exchange; + uint128_u current; + uint128_u compare = atomic_compare_and_swap_u128((uint128_u *)&Insert->CurrBytePos); + +loop: + startbytepos = compare.u64[0]; + + ptr = XLogBytePosToEndRecPtr(startbytepos); + if (ptr % XLogSegSize == 0) { + *EndPos = *StartPos = ptr; + return false; + } + + endbytepos = startbytepos + size; + logicLSN = compare.u32[2]; + currlrc = (int32)compare.u32[3]; + + *StartPos = XLogBytePosToRecPtr(startbytepos); + *EndPos = XLogBytePosToEndRecPtr(endbytepos); + + segleft = XLogSegSize - ((*EndPos) % XLogSegSize); + if (segleft != XLogSegSize) { + /* consume the rest of the segment */ + *EndPos += segleft; + endbytepos = XLogRecPtrToBytePos(*EndPos); + } + + exchange.u64[0] = endbytepos; + exchange.u64[1] = logicLSN + 1; + exchange.u32[3] = currlrc; + + current = atomic_compare_and_swap_u128((uint128_u *)&Insert->CurrBytePos, compare, exchange); + if (!UINT128_IS_EQUAL(compare, current)) { + UINT128_COPY(compare, current); + goto loop; + } +#else + SpinLockAcquire(&Insert->insertpos_lck); + + startbytepos = Insert->CurrBytePos; + + ptr = XLogBytePosToEndRecPtr(startbytepos); + if (ptr % XLogSegSize == 0) { + SpinLockRelease(&Insert->insertpos_lck); + *EndPos = *StartPos = ptr; + return false; + } + + endbytepos = startbytepos + size; + logicLSN = Insert->LogicLSN; + currlrc = Insert->CurrLRC; + + *StartPos = XLogBytePosToRecPtr(startbytepos); + *EndPos = XLogBytePosToEndRecPtr(endbytepos); + + segleft = XLogSegSize - ((*EndPos) % XLogSegSize); + if (segleft != XLogSegSize) { + /* consume the rest of the segment */ + *EndPos += segleft; + endbytepos = XLogRecPtrToBytePos(*EndPos); + } + Insert->CurrBytePos = endbytepos; + Insert->LogicLSN = logicLSN + 1; + Insert->CurrLRC = currlrc; + + SpinLockRelease(&Insert->insertpos_lck); +#endif /* __x86_64__ || __aarch64__ */ + + Assert((*EndPos) % XLogSegSize == 0); + Assert(XLogRecPtrToBytePos(*EndPos) == endbytepos); + Assert(XLogRecPtrToBytePos(*StartPos) == startbytepos); + + return true; +} + +#else /* not ENABLE_SS_MULTIMASTER */ /* * Reserves the right amount of space for a record of given size from the WAL. * *StartPos is set to the beginning of the reserved section, *EndPos to @@ -1444,10 +1733,6 @@ static void ReserveXLogInsertLocation(uint32 size, XLogRecPtr *StartPos, XLogRec /* All (non xlog-switch) records should contain data. */ Assert(size > SizeOfXLogRecord); -#ifdef ENABLE_SS_MULTIMASTER - LWLockAcquire(LSNGenLock, LW_EXCLUSIVE); -#endif - /* * The duration the spinlock needs to be held is minimized by minimizing * the calculations that have to be done while holding the lock. The @@ -1497,13 +1782,6 @@ loop1: goto loop1; } -#ifdef ENABLE_SS_MULTIMASTER - Assert(t_thrd.xact_cxt.ShmemVariableCache->nextLSN); - *lsn = pg_atomic_fetch_add_u64(&t_thrd.xact_cxt.ShmemVariableCache->nextLSN, 1); - LWLockRelease(LSNGenLock); - g_curr_lsn = *lsn; -#endif - *currlrc_ptr = compare.struct128.LRC; *StartPos = XLogBytePosToRecPtr(compare.struct128.currentBytePos); *EndPos = XLogBytePosToEndRecPtr(exchange.struct128.currentBytePos); @@ -1634,6 +1912,7 @@ loop: return true; } +#endif /* not ENABLE_SS_MULTIMASTER */ static void StartSuspendWalInsert(int32 *const lastlrc_ptr) { @@ -1643,25 +1922,29 @@ static void StartSuspendWalInsert(int32 *const lastlrc_ptr) volatile WALInsertStatusEntry *entry; #if (defined(__x86_64__) || defined(__aarch64__)) && !defined(__USE_SPINLOCK) uint64 startbytepos; - uint32 prevbytesize; + uint32 tmp; // present prevbytesize or logicLSN uint128_u compare; uint128_u exchange; uint128_u current; compare = atomic_compare_and_swap_u128((uint128_u *)&Insert->CurrBytePos); Assert(sizeof(Insert->CurrBytePos) == 8); +#ifdef ENABLE_SS_MULTIMASTER + Assert(sizeof(Insert->LogicLSN) == 4); +#else Assert(sizeof(Insert->PrevByteSize) == 4); +#endif Assert(sizeof(Insert->CurrLRC) == 4); loop: /* - * |CurrBytePos |PrevByteSize | CurrLRC - * ------------------------+------------ +----------- - * |64 bits |32 bits |32 bits + * |CurrBytePos |PrevByteSize(LogicLSN) | CurrLRC + * ------------------------+---------------------- +----------- + * |64 bits |32 bits |32 bits */ startbytepos = compare.u64[0]; - prevbytesize = compare.u32[2]; + tmp = compare.u32[2]; currlrc = (int32)compare.u32[3]; /* @@ -1684,7 +1967,7 @@ loop: * as it is */ exchange.u64[0] = startbytepos; - exchange.u32[2] = prevbytesize; + exchange.u32[2] = tmp; exchange.u32[3] = currlrc; current = atomic_compare_and_swap_u128((uint128_u *)&Insert->CurrBytePos, compare, exchange); @@ -3595,6 +3878,120 @@ static void XLogFlushCore(XLogRecPtr writeRqstPtr) WakeupWalSemaphore(&g_instance.wal_cxt.walFlushWaitLock->l.sem); } +#ifdef ENABLE_SS_MULTIMASTER +XLogRecord *XLogReadRecordHeaderFromBuf(char* startpos, uint32 freespace, uint32 pageHeaderSize, bool* crosspage) +{ + XLogRecord * record = NULL; + if (freespace < SizeOfXLogRecord) { + *crosspage = true; + /* Need to reassemble XLogRecord */ + char *contdata = (char*)palloc(SizeOfXLogRecord); + errno_t errorno = EOK; + uint32 rest_len = SizeOfXLogRecord - freespace; + + if (freespace > 0) { + /* Copy the first fragment of the record from the first page. */ + errorno = memcpy_s(contdata, freespace, startpos, freespace); + securec_check_c(errorno, "\0", "\0"); + } + + errorno = memcpy_s(contdata + freespace, rest_len, startpos + freespace + pageHeaderSize, rest_len); + securec_check_c(errorno, "", ""); + record = (XLogRecord *)contdata; + } else { + record = (XLogRecord *)startpos; + } + return record; +} + +void XLogRecordFillPrevBeforeFlush(XLogRecPtr StartPos, XLogRecPtr prevStartPos, XLogRecPtr* switchSegEndPos) +{ + /* When StartPos is in the same segment as XLogSwitch and comes after XLogSwitch, + * switchSegEndPos which represents the endPos of currSegment is validPtr. + * Otherwise, switchSegEndPos is InvalidXLogRecPtr. */ + if (XLByteLE(StartPos, *switchSegEndPos)) { + return; + } + char* startpos = GetXLogBuffer(StartPos); + uint32 freespace = INSERT_FREESPACE(StartPos); + uint32 xl_prev_Startoffset = offsetof(XLogRecord, xl_prev); + uint32 xl_prev_Endoffset = offsetof(XLogRecord, xl_info); + uint32 xl_crc_Startoffset = offsetof(XLogRecord, xl_crc); + uint32 xl_crc_Endoffset = SizeOfXLogRecord; + errno_t errorno = EOK; + pg_crc32c rdata_crc; + bool crosspage = false; + uint32 pageHeaderSize = XLogPageHeaderSize((XLogPageHeader)(startpos + freespace)); + XLogRecord *record = XLogReadRecordHeaderFromBuf(startpos, freespace, pageHeaderSize, &crosspage); + + /* fill xl_prev */ + record->xl_prev = prevStartPos; + if (freespace >= xl_prev_Endoffset) { + /* xl_prev on this page*/ + ((XLogRecord *)startpos)->xl_prev = prevStartPos; + } else if (freespace <= xl_prev_Startoffset){ + /* xl_prev on next page*/ + char* xl_prevpos = startpos + freespace + pageHeaderSize + (xl_prev_Startoffset - freespace); + errorno = memcpy_s(xl_prevpos, sizeof(XLogRecPtr), &prevStartPos, sizeof(XLogRecPtr)); + securec_check_c(errorno, "", ""); + } + + /* Now that xl_prev has been filled in, calculate CRC of the record header. */ + rdata_crc = record->xl_crc; + COMP_CRC32C(rdata_crc, (XLogRecord *)record, offsetof(XLogRecord, xl_crc)); + FIN_CRC32C(rdata_crc); /* FIN_CRC32C as same as FIN_CRC32 */ + record->xl_crc = rdata_crc; + + /* fill xl_crc */ + if (freespace >= xl_crc_Endoffset) { + /* xl_crc on this page*/ + ((XLogRecord *)startpos)->xl_crc = record->xl_crc; + } else if (freespace <= xl_crc_Startoffset){ + /* xl_crc on next page*/ + char* xl_crcpos = startpos + freespace + pageHeaderSize + (xl_crc_Startoffset - freespace); + errorno = memcpy_s(xl_crcpos, sizeof(pg_crc32c), &rdata_crc, sizeof(pg_crc32c)); + securec_check_c(errorno, "", ""); + } + + if (record->xl_info == XLOG_SWITCH) { + XLogRecPtr segmentEnd = StartPos + XLogSegSize - 1; + segmentEnd -= segmentEnd % XLogSegSize; + *switchSegEndPos = segmentEnd; + } + + if (record != NULL && crosspage) { + pfree_ext(record); + } +} + +/* calculate the startPtr of the last XLogRecord in this entry, + * which starts from entryStart and ends at entryEnd */ +XLogRecPtr calLastXLogRecordStartPtr(XLogRecPtr entryStart, XLogRecPtr entryEnd) +{ + XLogRecord *record = NULL; + XLogRecPtr lastPtr = NULL; + XLogRecPtr currStart = entryStart; + uint64 startBytePos; + uint32 total_len; + while (XLByteLT(currStart, entryEnd)) { + /* + * Read the record length. + * + * NB: Even though we use an XLogRecord pointer here, the whole record + * header might not fit on this page. xl_tot_len is the first field of the + * struct, so it must be on this page (the records are MAXALIGNed). + */ + record = (XLogRecord *)GetXLogBuffer(currStart); + total_len = MAXALIGN(record->xl_tot_len); + + startBytePos = XLogRecPtrToBytePos(currStart); + lastPtr = entryStart; + currStart = XLogBytePosToEndRecPtr(startBytePos + total_len); + } + return lastPtr; +} +#endif + /* * Flush xlog, but without specifying exactly where to flush to. * @@ -3651,6 +4048,12 @@ bool XLogBackgroundFlush(void) * - 1 +---------------+--------+-------+ * */ +#ifdef ENABLE_SS_MULTIMASTER + XLogRecPtr prevPos = g_instance.wal_cxt.prevValidPtr; + XLogRecPtr currStartPos = t_thrd.shemem_ptr_cxt.XLogCtl->LogwrtResult.Write; + XLogRecPtr switchSegEndPos = InvalidXLogRecPtr; + bool finishedFillPrev = false; +#endif start_entry_idx = GET_NEXT_STATUS_ENTRY(g_instance.attr.attr_storage.wal_insert_status_entries_power, g_instance.wal_cxt.lastWalStatusEntryFlushed); start_entry_ptr = &g_instance.wal_cxt.walInsertStatusTable[GET_STATUS_ENTRY_INDEX(start_entry_idx)]; @@ -3709,6 +4112,12 @@ bool XLogBackgroundFlush(void) break; } +#ifdef ENABLE_SS_MULTIMASTER + if (!finishedFillPrev) { + XLogRecordFillPrevBeforeFlush(currStartPos, prevPos, &switchSegEndPos); + } +#endif + /* * Flush if accumulate enough bytes or till the LSN in the entry before * an entry associated with the first uncopied record found in the current loop. @@ -3722,11 +4131,24 @@ bool XLogBackgroundFlush(void) next_entry_idx = curr_entry_idx; next_entry_ptr = curr_entry_ptr; entry_count--; +#ifdef ENABLE_SS_MULTIMASTER + finishedFillPrev = true; + } + else { + prevPos = calLastXLogRecordStartPtr(currStartPos, curr_entry_ptr->endLSN); + currStartPos = curr_entry_ptr->endLSN; + finishedFillPrev = false; } +#else + } +#endif } while (true); /* update continuous LRC entries that have been copied without a hole */ g_instance.wal_cxt.lastLRCScanned = curr_entry_ptr->LRC; +#ifdef ENABLE_SS_MULTIMASTER + g_instance.wal_cxt.prevValidPtr = calLastXLogRecordStartPtr(currStartPos, curr_entry_ptr->endLSN); +#endif /* * During REDO, we are reading not writing WAL. @@ -7050,11 +7472,6 @@ void BootStrapXLOG(void) checkPoint.oldestActiveXid = InvalidTransactionId; checkPoint.remove_seg = InvalidXLogSegPtr; -#ifdef ENABLE_SS_MULTIMASTER - t_thrd.xact_cxt.ShmemVariableCache->nextLSN = 1048576; - g_curr_lsn = t_thrd.xact_cxt.ShmemVariableCache->nextLSN; - -#endif t_thrd.xact_cxt.ShmemVariableCache->nextXid = checkPoint.nextXid; t_thrd.xact_cxt.ShmemVariableCache->nextOid = checkPoint.nextOid; t_thrd.xact_cxt.ShmemVariableCache->oidCount = 0; @@ -9879,10 +10296,6 @@ void StartupXLOG(void) checkPointPlus.next_csn, t_thrd.xact_cxt.ShmemVariableCache->nextXid))); } else if (recordLen == CHECKPOINTUNDO_LEN) { t_thrd.xact_cxt.ShmemVariableCache->nextCommitSeqNo = checkPointUndo.next_csn; -#ifdef ENABLE_SS_MULTIMASTER - t_thrd.xact_cxt.ShmemVariableCache->nextLSN = checkPointUndo.next_lsn; - g_curr_lsn = checkPointUndo.next_lsn; -#endif ereport(LOG, (errmsg("%s Mode: start local next csn from checkpoint %lu, next xid %lu", "GTM-Free", checkPointUndo.next_csn, t_thrd.xact_cxt.ShmemVariableCache->nextXid))); } else { @@ -10798,7 +11211,16 @@ void StartupXLOG(void) t_thrd.xlog_cxt.openLogOff = 0; Insert = &t_thrd.shemem_ptr_cxt.XLogCtl->Insert; Insert->CurrBytePos = XLogRecPtrToBytePos(EndOfLog); +#if ENABLE_SS_MULTIMASTER + if (IsBootstrapProcessingMode()) { + Insert->LogicLSN = 1048576; + } else { + Insert->LogicLSN = record->logic_lsn + 1; + } + g_instance.wal_cxt.prevValidPtr = t_thrd.xlog_cxt.LastRec; +#else Insert->PrevByteSize = XLogRecPtrToBytePos(EndOfLog) - XLogRecPtrToBytePos(t_thrd.xlog_cxt.LastRec); +#endif Insert->CurrLRC = 0; /* @@ -12595,19 +13017,11 @@ void CreateCheckPoint(int flags) checkPointUndo.length = (uint64)sizeof(CheckPointUndo); checkPointUndo.recent_global_xmin = InvalidTransactionId; checkPointUndo.globalRecycleXid = 0; -#ifdef ENABLE_SS_MULTIMASTER - checkPointUndo.next_lsn = t_thrd.xact_cxt.ShmemVariableCache->nextLSN; -#endif } else { checkPointUndo.next_csn = t_thrd.xact_cxt.ShmemVariableCache->nextCommitSeqNo; checkPointUndo.length = (uint64)sizeof(CheckPointUndo); checkPointUndo.recent_global_xmin = t_thrd.xact_cxt.ShmemVariableCache->recentGlobalXmin; checkPointUndo.globalRecycleXid = pg_atomic_read_u64(&g_instance.undo_cxt.globalRecycleXid); -#ifdef ENABLE_SS_MULTIMASTER - LWLockAcquire(LSNGenLock, LW_SHARED); - checkPointUndo.next_lsn = t_thrd.xact_cxt.ShmemVariableCache->nextLSN; - LWLockRelease(LSNGenLock); -#endif } XLogRegisterData((char *)(&checkPointUndo), sizeof(checkPointUndo)); } else if (t_thrd.proc->workingVersionNum >= GTMLITE_VERSION_NUM && u_sess->attr.attr_common.upgrade_mode != 1) { diff --git a/src/gausskernel/storage/lmgr/lwlocknames.txt b/src/gausskernel/storage/lmgr/lwlocknames.txt index 1219dd3d8..9934949c9 100755 --- a/src/gausskernel/storage/lmgr/lwlocknames.txt +++ b/src/gausskernel/storage/lmgr/lwlocknames.txt @@ -144,4 +144,3 @@ RedoTruncateLock 135 ExrtoRecycleResidualUndoLock 137 ShareInputScanLock 138 -LSNGenLock 139 \ No newline at end of file diff --git a/src/include/access/transam.h b/src/include/access/transam.h index a787fade9..ff634d117 100755 --- a/src/include/access/transam.h +++ b/src/include/access/transam.h @@ -130,7 +130,6 @@ typedef struct VariableCacheData { * These fields are protected by XidGenLock. */ TransactionId nextXid; /* next XID to assign */ - volatile uint64 nextLSN; TransactionId oldestXid; /* cluster-wide minimum datfrozenxid */ TransactionId xidVacLimit; /* start forcing autovacuums here */ @@ -250,7 +249,9 @@ extern TransactionId GetNewTransactionId(bool isSubXact); #endif /* PGXC */ extern TransactionId ReadNewTransactionId(void); extern void SetNewTransactionId(TransactionId xid); -extern void SetNewLSN(uint64 lsn); +#ifdef ENABLE_SS_MULTIMASTER +extern void SetNewLSN(uint32 lsn); +#endif extern void SetTransactionIdLimit(TransactionId oldest_datfrozenxid, Oid oldest_datoid); extern Oid GetNewObjectId(bool IsToastRel = false); extern TransactionId SubTransGetTopParentXidFromProcs(TransactionId xid); diff --git a/src/include/access/xlog.h b/src/include/access/xlog.h index 1eada1eed..c1e826521 100755 --- a/src/include/access/xlog.h +++ b/src/include/access/xlog.h @@ -421,7 +421,11 @@ typedef struct XLogCtlInsert { * rather than XLogRecPtrs (see XLogBytePosToRecPtr()). */ uint64 CurrBytePos; +#ifdef ENABLE_SS_MULTIMASTER + uint32 LogicLSN; +#else uint32 PrevByteSize; +#endif int32 CurrLRC; #if ((!defined __x86_64__) && (!defined __aarch64__)) || defined(__USE_SPINLOCK) diff --git a/src/include/access/xlog_basic.h b/src/include/access/xlog_basic.h index a92b743f2..771a4780e 100644 --- a/src/include/access/xlog_basic.h +++ b/src/include/access/xlog_basic.h @@ -217,9 +217,6 @@ typedef struct { * used to distinguish between block references, and the main data structs. */ typedef struct XLogRecord { -#ifdef ENABLE_SS_MULTIMASTER - uint64 xl_lsn; -#endif uint32 xl_tot_len; /* total len of entire record */ uint32 xl_term; TransactionId xl_xid; /* xact id */ @@ -227,6 +224,9 @@ typedef struct XLogRecord { uint8 xl_info; /* flag bits, see below */ RmgrId xl_rmid; /* resource manager for this record */ uint2 xl_bucket_id; /* stores bucket id */ +#ifdef ENABLE_SS_MULTIMASTER + uint32 logic_lsn; +#endif pg_crc32c xl_crc; /* CRC for this record */ /* XLogRecordBlockHeaders and XLogRecordDataHeader follow, no padding */ diff --git a/src/include/access/xlog_internal.h b/src/include/access/xlog_internal.h index 776dba2a8..7d0cc0119 100755 --- a/src/include/access/xlog_internal.h +++ b/src/include/access/xlog_internal.h @@ -235,7 +235,11 @@ typedef struct RmgrData { */ struct Combined128 { uint64 currentBytePos; +#ifdef ENABLE_SS_MULTIMASTER + uint32 logicLSN; +#else uint32 byteSize; +#endif int32 LRC; }; union Union128 { diff --git a/src/include/catalog/pg_control.h b/src/include/catalog/pg_control.h index a0d09a096..1c8d47b31 100644 --- a/src/include/catalog/pg_control.h +++ b/src/include/catalog/pg_control.h @@ -78,9 +78,6 @@ typedef struct CheckPointUndo TransactionId recent_global_xmin; uint64 globalRecycleXid; /* you can add more attributes here */ -#ifdef ENABLE_SS_MULTIMASTER - uint64 next_lsn; -#endif } CheckPointUndo; /* XLOG info values for XLOG rmgr */ diff --git a/src/include/knl/knl_instance.h b/src/include/knl/knl_instance.h index aac9c7d7f..b5a6e917c 100755 --- a/src/include/knl/knl_instance.h +++ b/src/include/knl/knl_instance.h @@ -1038,6 +1038,9 @@ typedef struct knl_g_wal_context { volatile bool isWalWriterUp; XLogRecPtr flushResult; XLogRecPtr sentResult; +#ifdef ENABLE_SS_MULTIMASTER + XLogRecPtr prevValidPtr; /* startPosPtr of last valid WalStatusEntry */ +#endif pthread_mutex_t flushResultMutex; pthread_cond_t flushResultCV; int XLogFlusherCPU; diff --git a/src/include/storage/buf/bufpage.h b/src/include/storage/buf/bufpage.h index ac1444f75..ccebc6f3f 100644 --- a/src/include/storage/buf/bufpage.h +++ b/src/include/storage/buf/bufpage.h @@ -405,7 +405,7 @@ inline OffsetNumber PageGetMaxOffsetNumber(char* pghr) (((PageHeader)(page))->pd_lsn.xlogid = (uint32)((lsn) >> 32), ((PageHeader)(page))->pd_lsn.xrecoff = (uint32)(lsn)) #ifndef FRONTEND -extern thread_local uint64 g_curr_lsn; +extern thread_local uint32 g_curr_lsn; inline void PageSetLSN(Page page, XLogRecPtr LSN, bool check = true) { #ifdef ENABLE_SS_MULTIMASTER -- Gitee