Mercurial > hg > MonetDB
changeset 86169:51f88573403f smart-merge-jan22
Merge with Jan2022.
| author | Aris Koning <aris.koning@monetdbsolutions.com> |
|---|---|
| date | Wed, 27 Jul 2022 11:45:48 +0200 |
| parents | cb28be30354f (current diff) aa86c05de2ff (diff) |
| children | ec241b102d85 |
| files | sql/storage/objectset.c sql/storage/store.c |
| diffstat | 8 files changed, 30 insertions(+), 9 deletions(-) [+] |
line wrap: on
line diff
--- a/README.rst +++ b/README.rst @@ -31,8 +31,8 @@ once an hour. .. _MonetDB: https://dev.monetdb.org/hg/MonetDB/ __ MonetDB_ -.. _github: https://github.com/MonetDB/MonetDB -__ github_ +.. _mirror: https://github.com/MonetDB/MonetDB +__ mirror_ Building -------- @@ -92,8 +92,8 @@ WITH_XML2 Include xml2 support WITH_ZLIB Include zlib support ============== =============================================================================================== -Required packages -................. +Required and Optional Packages +.............................. On Fedora, the following packages are required: ``bison``, ``cmake``, ``gcc``, ``pkgconf``, ``python3``.
--- a/gdk/gdk_align.c +++ b/gdk/gdk_align.c @@ -140,6 +140,7 @@ VIEWcreate(oid seq, BAT *b) } if (bn->tvheap) { BBPunshare(bn->tvheap->parentid); + BBPunfix(bn->tvheap->parentid); HEAPdecref(bn->tvheap, false); } HEAPdecref(bn->theap, false);
--- a/gdk/gdk_batop.c +++ b/gdk/gdk_batop.c @@ -91,8 +91,10 @@ insert_string_bat(BAT *b, BAT *n, struct /* we can share the vheaps, so we then only need to * append the offsets */ MT_lock_set(&b->theaplock); - if (b->tvheap->parentid != b->batCacheid) + if (b->tvheap->parentid != b->batCacheid) { BBPunshare(b->tvheap->parentid); + BBPunfix(b->tvheap->parentid); + } HEAPdecref(b->tvheap, b->tvheap->parentid == b->batCacheid); HEAPincref(ni.vh); b->tvheap = ni.vh; @@ -374,8 +376,10 @@ append_varsized_bat(BAT *b, BAT *n, stru * is read-only, we replace b's vheap with a reference * to n's */ MT_lock_set(&b->theaplock); - if (b->tvheap->parentid != b->batCacheid) + if (b->tvheap->parentid != b->batCacheid) { BBPunshare(b->tvheap->parentid); + BBPunfix(b->tvheap->parentid); + } BBPshare(ni.vh->parentid); HEAPdecref(b->tvheap, true); HEAPincref(ni.vh); @@ -433,12 +437,14 @@ append_varsized_bat(BAT *b, BAT *n, stru GDKfree(h); return GDK_FAIL; } - BBPunshare(b->tvheap->parentid); + bat parid = b->tvheap->parentid; + BBPunshare(parid); MT_lock_set(&b->theaplock); HEAPdecref(b->tvheap, false); ATOMIC_INIT(&h->refs, 1); b->tvheap = h; MT_lock_unset(&b->theaplock); + BBPunfix(parid); } if (BATcount(b) == 0 && BATatoms[b->ttype].atomFix == NULL && ci->tpe == cand_dense && ci->ncand == ni.count) {
--- a/gdk/gdk_logger.c +++ b/gdk/gdk_logger.c @@ -2236,7 +2236,7 @@ logger_next_logfile(logger *lg, ulng ts) { if (!lg->pending || !lg->pending->next) return 0; - if (lg->pending->last_ts < ts) + if (lg->pending->last_ts <= ts) return lg->pending->id; return 0; }
--- a/sql/storage/objectset.c +++ b/sql/storage/objectset.c @@ -767,6 +767,7 @@ os_add_name_based(objectset *os, struct objectversion *co = name_based_node->ov; objectversion *oo = get_valid_object_name(tr, co); if (co != oo) { /* conflict ? */ + TRC_WARNING(SQL_STORE, "%s" "if (co != oo) { /* conflict ? */", __func__); return -3; } @@ -781,6 +782,7 @@ os_add_name_based(objectset *os, struct */ ATOMIC_BASE_TYPE expected_deleted = deleted; if (!ATOMIC_CAS(&oo->state, &expected_deleted, block_destruction)) { + TRC_WARNING(SQL_STORE, "%s: " "if (!ATOMIC_CAS(&oo->state, &expected_deleted, block_destruction)) { /*conflict with cleaner or write-write conflict*/ ", __func__); return -3; /*conflict with cleaner or write-write conflict*/ } } @@ -817,6 +819,7 @@ os_add_id_based(objectset *os, struct sq objectversion *co = id_based_node->ov; objectversion *oo = get_valid_object_id(tr, co); if (co != oo) { /* conflict ? */ + TRC_WARNING(SQL_STORE, "%s" "if (co != oo) { /* conflict ? */", __func__); return -3; } @@ -831,6 +834,7 @@ os_add_id_based(objectset *os, struct sq */ ATOMIC_BASE_TYPE expected_deleted = deleted; if (!ATOMIC_CAS(&oo->state, &expected_deleted, block_destruction)) { + TRC_WARNING(SQL_STORE, "%s" "if (!ATOMIC_CAS(&oo->state, &expected_deleted, block_destruction)) { /*conflict with cleaner or write-write conflict*/", __func__); return -3; /*conflict with cleaner or write-write conflict*/ } } @@ -871,6 +875,7 @@ os_add_(objectset *os, struct sql_trans if (os->destroy) os->destroy(os->store, ov->b); _DELETE(ov); + TRC_WARNING(SQL_STORE, "%s" "if (!os->concurrent && os_has_changes(os, tr)) { /* for object sets without concurrent support, conflict if concurrent changes are there */", __func__); return -3; /* conflict */ } @@ -913,6 +918,7 @@ os_del_name_based(objectset *os, struct objectversion *oo = get_valid_object_name(tr, co); ov->name_based_head = oo->name_based_head; if (co != oo) { /* conflict ? */ + TRC_WARNING(SQL_STORE, "%s: " "if (co != oo) { /* conflict ? */", __func__); return -3; } ov->name_based_older = oo; @@ -944,6 +950,7 @@ os_del_id_based(objectset *os, struct sq objectversion *oo = get_valid_object_id(tr, co); ov->id_based_head = oo->id_based_head; if (co != oo) { /* conflict ? */ + TRC_WARNING(SQL_STORE, "%s" "if (co != oo) { /* conflict ? */", __func__); return -3; } ov->id_based_older = oo;
--- a/sql/storage/store.c +++ b/sql/storage/store.c @@ -5848,6 +5848,12 @@ int sql_trans_drop_table(sql_trans *tr, sql_schema *s, const char *name, int drop_action) { sql_table *t = find_sql_table(tr, s, name), *gt = NULL; + + if (!t) { + TRC_ERROR(SQL_STORE, "sql_trans_drop_table: Table %s.%s does not exist\n", s->base.name, name); + return -1; + } + if (t && isTempTable(t)) { gt = find_sql_table_id(tr, s, t->base.id); if (gt)
new file mode 100644 --- /dev/null +++ b/sql/test/BugTracker-2018/Tests/truncate_geom_tables.Bug-6543.reqtests @@ -0,0 +1,1 @@ +truncate_add_user
--- a/sql/test/rename/Tests/rename00.SQL.py +++ b/sql/test/rename/Tests/rename00.SQL.py @@ -21,7 +21,7 @@ with tempfile.TemporaryDirectory() as fa tc.execute('insert into "newname" values (1);').assertSucceeded().assertRowCount(1) tc.execute('select "a" from "newname";').assertSucceeded().assertDataResultMatch([(1,)]) s.communicate() - with process.server(mapiport=s.dbport, dbname='db1', + with process.server(mapiport='0', dbname='db1', dbfarm=os.path.join(farm_dir, 'db1'), stdin=process.PIPE, stdout=process.PIPE, stderr=process.PIPE) as s:
