Skip to content

Commit 8203d6b

Browse files
craig[bot]normanchennAlex Lunevstevendanna
committed
142932: builtins,eval: fix `width_bucket()` and `power()` edge cases to match postgres r=normanchenn a=normanchenn #### builtins: fix `width_bucket()` edge cases to match postgres This commit fixes some edge cases in the `width_bucket()` function to match postgres behaviour: - Return count + 1 for a positive infinity operand. - Return 0 for a negative infinity operand. - Update error codes and messages. Informs #114872 Epic: None Release note (sql change): Update edge cases in the `width_bucket()` function to return count + 1 for a positive infinity operand, and 0 for a negative infinity operand, instead of an error. #### sql/sem/eval: implement postgres `power()` and `^` edge cases This commit adds special case handling for `power()` and `^` operations to match postgres behaviour. The internal apd package implements the General Decimal Arithmetic specification, while postgres follows the POSIX pow(3) spec. This commit adds a function to handle special cases, and a wrapper around `apd.Context.Pow()` that calls the handler. Special cases handled: - NaN ^ 0 = 1. This used to return NaN. - 1 ^ NaN = 1. This used to return NaN. - 0 ^ negative = error. This used to return Infinity. - negative ^ non-integer = error. This used to return NaN. - any ^ 0 = 1 (including 0^0). 0^0 used to return NaN. - |x| < 1 cases with (+/-) infinite y. These used to return 1. - |x| > 1 cases with (+/-) infinite y. These used to return 1. - -infinity ^ negative = 0. Informs #114872 Epic: None Release note (sql change): Implement various `power()` and `^` edge cases to match postgres behaviour. Release note (backward-incompatible change): Implement various `power()` and `^` edge cases to match postgres behaviour. Some expressions that previously returned NaN now return specific numbers, some expressions that previously returned Infinity or NaN now return errors, and some expressions with infinite exponents now return different results. 143367: actions: auto-merge backport test only PRs r=lunevalex a=lunevalex Add a github action to merge backport PRs that are approved and have only test only changes. This action relies on Blathers to label and approve PRs that meet the criteria for auto-merging. To prevent folks from tagging and PR and getting it merged, this still relies on a PR being approved to be merged, which is a key control from abuse of this function. Epic: None Release note: None 143411: kvclient: sort txnWriteBuffer by sequence number r=arul,yuzefovich a=stevendanna The pipeliner interceptor depends on writes in the batch being sorted by sequence number. Epic: none Release note: None Co-authored-by: Norman Chen <[email protected]> Co-authored-by: Alex Lunev <[email protected]> Co-authored-by: Steven Danna <[email protected]>
4 parents 8e44ad4 + fa49bbc + 7826429 + f98ecc7 commit 8203d6b

File tree

15 files changed

+966
-229
lines changed

15 files changed

+966
-229
lines changed
Lines changed: 74 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,74 @@
1+
name: Auto-Merge Test Backport PRs
2+
3+
on:
4+
schedule:
5+
- cron: "0 * * * *" # Every hour
6+
workflow_dispatch:
7+
8+
jobs:
9+
auto-merge:
10+
runs-on: ubuntu-latest
11+
12+
permissions:
13+
contents: write
14+
pull-requests: write
15+
16+
steps:
17+
- name: Merge qualifying PRs
18+
uses: actions/github-script@v7
19+
with:
20+
github-token: ${{ secrets.GITHUB_TOKEN }}
21+
script: |
22+
// Get timestamp for 24 hours ago in ISO format
23+
const now = new Date();
24+
const iso24HoursAgo = new Date(now.getTime() - 24 * 60 * 60 * 1000).toISOString();
25+
26+
const query = `repo:${context.repo.owner}/${context.repo.repo} is:pr is:open label:backport label:blathers-backport label:backport-test-only created:<${iso24HoursAgo}`;
27+
const searchResults = await github.paginate(github.rest.search.issuesAndPullRequests, {
28+
q: query,
29+
per_page: 100,
30+
});
31+
32+
for (const prItem of searchResults) {
33+
const prNumber = prItem.number;
34+
35+
// Fetch full PR details
36+
const { data: pr } = await github.rest.pulls.get({
37+
owner: context.repo.owner,
38+
repo: context.repo.repo,
39+
pull_number: prNumber,
40+
});
41+
42+
// Check for approvals
43+
const reviews = await github.rest.pulls.listReviews({
44+
owner: context.repo.owner,
45+
repo: context.repo.repo,
46+
pull_number: prNumber,
47+
});
48+
49+
const approved = reviews.some(
50+
r =>
51+
r.state === 'APPROVED' &&
52+
r.user?.login === 'blathers-crl[bot]'
53+
);
54+
55+
if (!approved) {
56+
console.log(`Skipping PR #${prNumber}: not approved`);
57+
continue;
58+
}
59+
60+
const labels = prItem.labels.map(l => l.name).join(', ');
61+
console.log(`Merging PR #${prNumber}, Created at: ${pr.created_at}, Approved: ${approved}, Labels: ${labels}`);
62+
// Merge the PR
63+
// try {
64+
// console.log(`Merging PR #${prNumber}`);
65+
// await github.rest.pulls.merge({
66+
// owner: context.repo.owner,
67+
// repo: context.repo.repo,
68+
// pull_number: prNumber,
69+
// merge_method: "merge",
70+
// });
71+
// } catch (err) {
72+
// console.warn(`Failed to merge PR #${prNumber}: ${err.message}`);
73+
// }
74+
}

docs/generated/sql/functions.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1157,7 +1157,7 @@ available replica will error.</p>
11571157
</span></td><td>Leakproof</td></tr>
11581158
<tr><td><a name="fnv64a"></a><code>fnv64a(<a href="string.html">string</a>...) &rarr; <a href="int.html">int</a></code></td><td><span class="funcdesc"><p>Calculates the 64-bit FNV-1a hash value of a set of values.</p>
11591159
</span></td><td>Leakproof</td></tr>
1160-
<tr><td><a name="width_bucket"></a><code>width_bucket(operand: <a href="decimal.html">decimal</a>, b1: <a href="decimal.html">decimal</a>, b2: <a href="decimal.html">decimal</a>, count: <a href="int.html">int</a>) &rarr; <a href="int.html">int</a></code></td><td><span class="funcdesc"><p>return the bucket number to which operand would be assigned in a histogram having count equal-width buckets spanning the range b1 to b2.</p>
1160+
<tr><td><a name="width_bucket"></a><code>width_bucket(operand: <a href="decimal.html">decimal</a>, b1: <a href="decimal.html">decimal</a>, b2: <a href="decimal.html">decimal</a>, count: <a href="int.html">int</a>) &rarr; <a href="int.html">int</a></code></td><td><span class="funcdesc"><p>return the bucket number to which operand would be assigned in a histogram having count equal-width buckets spanning the range b1 to b2. Returns 0 or count+1 for an input outside that range.</p>
11611161
</span></td><td>Immutable</td></tr>
11621162
<tr><td><a name="width_bucket"></a><code>width_bucket(operand: <a href="int.html">int</a>, b1: <a href="int.html">int</a>, b2: <a href="int.html">int</a>, count: <a href="int.html">int</a>) &rarr; <a href="int.html">int</a></code></td><td><span class="funcdesc"><p>return the bucket number to which operand would be assigned in a histogram having count equal-width buckets spanning the range b1 to b2.</p>
11631163
</span></td><td>Immutable</td></tr>

pkg/kv/kvclient/kvcoord/txn_interceptor_write_buffer.go

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ package kvcoord
88
import (
99
"context"
1010
"encoding/binary"
11+
"slices"
1112

1213
"github.com/cockroachdb/cockroach/pkg/kv/kvpb"
1314
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/concurrency/lock"
@@ -953,6 +954,21 @@ func (twb *txnWriteBuffer) flushWithEndTxn(
953954
reqs = append(reqs, it.Cur().toRequest())
954955
}
955956

957+
// Layers below us expect that writes inside a batch are in sequence number
958+
// order but the iterator above returns data in key order. Here we re-sort it
959+
// which is unfortunate but required we make a change to the pipeliner.
960+
slices.SortFunc(reqs, func(a kvpb.RequestUnion, b kvpb.RequestUnion) int {
961+
aHeader := a.GetInner().Header()
962+
bHeader := b.GetInner().Header()
963+
if aHeader.Sequence == bHeader.Sequence {
964+
return aHeader.Key.Compare(bHeader.Key)
965+
} else if aHeader.Sequence < bHeader.Sequence {
966+
return -1
967+
} else {
968+
return 1
969+
}
970+
})
971+
956972
ba = ba.ShallowCopy()
957973
reqs = append(reqs, ba.Requests...)
958974
ba.Requests = reqs

pkg/kv/kvclient/kvcoord/txn_interceptor_write_buffer_test.go

Lines changed: 61 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -652,8 +652,8 @@ func TestTxnWriteBufferServesPointReadsLocally(t *testing.T) {
652652
require.Len(t, ba.Requests, 3)
653653

654654
// We now expect the buffer to be flushed along with the commit.
655-
require.IsType(t, &kvpb.PutRequest{}, ba.Requests[0].GetInner())
656-
require.IsType(t, &kvpb.DeleteRequest{}, ba.Requests[1].GetInner())
655+
require.IsType(t, &kvpb.DeleteRequest{}, ba.Requests[0].GetInner())
656+
require.IsType(t, &kvpb.PutRequest{}, ba.Requests[1].GetInner())
657657
require.IsType(t, &kvpb.EndTxnRequest{}, ba.Requests[2].GetInner())
658658

659659
br = ba.CreateReply()
@@ -936,8 +936,8 @@ func TestTxnWriteBufferServesOverlappingReadsCorrectly(t *testing.T) {
936936
require.Len(t, ba.Requests, 3)
937937

938938
// We now expect the buffer to be flushed along with the commit.
939-
require.IsType(t, &kvpb.PutRequest{}, ba.Requests[0].GetInner())
940-
require.IsType(t, &kvpb.DeleteRequest{}, ba.Requests[1].GetInner())
939+
require.IsType(t, &kvpb.DeleteRequest{}, ba.Requests[0].GetInner())
940+
require.IsType(t, &kvpb.PutRequest{}, ba.Requests[1].GetInner())
941941
require.IsType(t, &kvpb.EndTxnRequest{}, ba.Requests[2].GetInner())
942942

943943
br = ba.CreateReply()
@@ -1272,3 +1272,60 @@ func TestTxnWriteBufferRespectsMustAcquireExclusiveLock(t *testing.T) {
12721272
require.Len(t, br.Responses, 1)
12731273
require.IsType(t, &kvpb.EndTxnResponse{}, br.Responses[0].GetInner())
12741274
}
1275+
1276+
// TestTxnWriteBufferMustSortBatchesBySequenceNumber verifies that flushed
1277+
// batches are sorted in sequence number order, as currently required by the txn
1278+
// pipeliner interceptor.
1279+
func TestTxnWriteBufferMustSortBatchesBySequenceNumber(t *testing.T) {
1280+
defer leaktest.AfterTest(t)()
1281+
defer log.Scope(t).Close(t)
1282+
ctx := context.Background()
1283+
twb, mockSender := makeMockTxnWriteBuffer()
1284+
1285+
txn := makeTxnProto()
1286+
txn.Sequence = 10
1287+
keyA := roachpb.Key("a")
1288+
keyB := roachpb.Key("b")
1289+
keyC := roachpb.Key("b")
1290+
val := "val"
1291+
1292+
// Send a batch that should be completely buffered.
1293+
ba := &kvpb.BatchRequest{}
1294+
ba.Header = kvpb.Header{Txn: &txn}
1295+
ba.Add(putArgs(keyC, val, txn.Sequence))
1296+
ba.Add(putArgs(keyB, val, txn.Sequence+1))
1297+
ba.Add(putArgs(keyA, val, txn.Sequence+2))
1298+
1299+
prevNumCalled := mockSender.numCalled
1300+
br, pErr := twb.SendLocked(ctx, ba)
1301+
require.Nil(t, pErr)
1302+
require.NotNil(t, br)
1303+
require.Equal(t, prevNumCalled, mockSender.numCalled, "batch sent unexpectedly")
1304+
1305+
// Commit the batch to flush the buffer.
1306+
ba = &kvpb.BatchRequest{}
1307+
ba.Header = kvpb.Header{Txn: &txn}
1308+
ba.Add(&kvpb.EndTxnRequest{
1309+
RequestHeader: kvpb.RequestHeader{Sequence: txn.Sequence + 4},
1310+
Commit: true,
1311+
})
1312+
1313+
mockSender.MockSend(func(ba *kvpb.BatchRequest) (*kvpb.BatchResponse, *kvpb.Error) {
1314+
require.Len(t, ba.Requests, 3)
1315+
lastSeq := enginepb.TxnSeq(0)
1316+
for _, r := range ba.Requests {
1317+
seq := r.GetInner().Header().Sequence
1318+
if seq >= lastSeq {
1319+
lastSeq = seq
1320+
} else {
1321+
t.Fatal("expected batch to be sorted by sequence number")
1322+
}
1323+
}
1324+
br = ba.CreateReply()
1325+
return br, nil
1326+
})
1327+
1328+
br, pErr = twb.SendLocked(ctx, ba)
1329+
require.Nil(t, pErr)
1330+
require.NotNil(t, br)
1331+
}

0 commit comments

Comments
 (0)