From c98b2098ee348ae6d9da58fbd7121101cfa4a2bf Mon Sep 17 00:00:00 2001 From: "arcoalien@qq.com" Date: Wed, 21 Feb 2024 20:02:23 +0800 Subject: [PATCH] fix lsn gen error in arm OS --- src/gausskernel/storage/access/transam/xlog.cpp | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/src/gausskernel/storage/access/transam/xlog.cpp b/src/gausskernel/storage/access/transam/xlog.cpp index 3fac7d1fa..3c0836e06 100755 --- a/src/gausskernel/storage/access/transam/xlog.cpp +++ b/src/gausskernel/storage/access/transam/xlog.cpp @@ -440,7 +440,7 @@ int ParallelXLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr, static XLogRecPtr XLogInsertRecordGroup(XLogRecData *rdata, XLogRecPtr fpw_lsn); static void XLogInsertRecordNolock(XLogRecData *rdata, PGPROC *proc, XLogRecPtr StartPos, XLogRecPtr EndPos, - XLogRecPtr PrevPos, int32 *const currlrc_ptr); + XLogRecPtr PrevPos, int32 *const currlrc_ptr, uint64 *lsn); static void ReserveXLogInsertByteLocation(uint32 size, uint32 lastRecordSize, uint64 *StartBytePos, uint64 *EndBytePos, uint64 *PrevBytePos, int32 *const currlrc_ptr); static void CopyXLogRecordToWALForGroup(int write_len, XLogRecData *rdata, XLogRecPtr StartPos, XLogRecPtr EndPos, @@ -503,7 +503,7 @@ static void XLogInsertRecordGroupLeader(PGPROC *leader, uint64 *end_byte_pos_ptr } else { if (likely(record_size != 0)) { ReserveXLogInsertByteLocation(record_size, record_size, &start_byte_pos, &end_byte_pos, &prev_byte_pos, - ¤t_lrc); + ¤t_lrc, &(((XLogRecord *)(leader->xlogGrouprdata->data))->xl_lsn)); dirty_page_queue_lsn = start_byte_pos; } XLogInsertRecordNolock(leader->xlogGrouprdata, leader, XLogBytePosToRecPtr(start_byte_pos), @@ -571,7 +571,7 @@ static void XLogInsertRecordGroupFollowers(PGPROC *leader, const uint32 head, ui if (likely(total_size != 0)) { ReserveXLogInsertByteLocation(total_size, record_size, &start_byte_pos, &end_byte_pos, &prev_byte_pos, - ¤t_lrc); + ¤t_lrc, &(((XLogRecord *)(follower->xlogGrouprdata->data))->xl_lsn)); dirty_page_queue_lsn = start_byte_pos; } @@ -869,7 +869,7 @@ static void XLogInsertRecordNolock(XLogRecData *rdata, PGPROC *proc, XLogRecPtr * @out PrevBytePos: the previous position of the WAL. */ static void ReserveXLogInsertByteLocation(uint32 size, uint32 lastRecordSize, uint64 *StartBytePos, uint64 *EndBytePos, - uint64 *PrevBytePos, int32* const currlrc_ptr) + uint64 *PrevBytePos, int32* const currlrc_ptr, uint64* lsn) { volatile XLogCtlInsert *Insert = &t_thrd.shemem_ptr_cxt.XLogCtl->Insert; @@ -878,6 +878,7 @@ static void ReserveXLogInsertByteLocation(uint32 size, uint32 lastRecordSize, ui /* All (non xlog-switch) records should contain data. */ Assert(size > SizeOfXLogRecord); + LWLockAcquire(LSNGenLock, LW_EXCLUSIVE); /* * The duration the spinlock needs to be held is minimized by minimizing * the calculations that have to be done while holding the lock. The @@ -942,6 +943,11 @@ loop: SpinLockRelease(&Insert->insertpos_lck); #endif /* __x86_64__ || __aarch64__ */ + Assert(t_thrd.xact_cxt.ShmemVariableCache->nextLSN); + *lsn = pg_atomic_fetch_add_u64(&t_thrd.xact_cxt.ShmemVariableCache->nextLSN, 1); + LWLockRelease(LSNGenLock); + g_curr_lsn = *lsn; + *currlrc_ptr = compare.struct128.LRC; *StartBytePos = compare.struct128.currentBytePos; *EndBytePos = exchange.struct128.currentBytePos; -- Gitee