i defined a user defined function "binarycopy",when i use my function,
there is error:
sql>select binarycopy('starid','1111',1);
TypeException:user.s1_1[5]:'svom.binarycopy' undefined in: _9:any :=
svom.binarycopy(_6:str, _7:str, _8:int)
program contains errors
i want to debug the error with mal, there also some error:
[svomhpc@localhost /home/data1]$>mclient -l mal -u monetdb -d mydb4
password:
Welcome to mclient, the MonetDB interactive terminal (unreleased)
Type \q to quit, \? for a list of available commands
mal>
mal>
mal>function test(i:int):str;
mal>io.print(i);
mal>i:=i*2;
mal>b:=bat.new(:int,:int);
mal>bat.insert(b,1,i);
mal>io.print(b);
mal>return test:= "ok";
mal>end test;
MAPI = (monetdb) /tmp/.s.monetdb.50000
QUERY = end test;
ERROR = !TypeException:user.test[3]:'bat.new' undefined in: b:any :=
bat.new(_5:int, _6:int)
!TypeException:user.test[4]:'bat.insert' undefined in: _7:any :=
bat.insert(b:any, _8:int, i:int)
mal>
could anyone tell me how to debug my function ,please?
Thanks!
Hello,
I'm a French user of MonetDB, and I have a SQL question,
I want to execute on MonetDB equivalent of this mysql request, and i don't
find any solution :
"SELECT field1, field2 FROM table GROUP BY field3 LIMIT 5;"
I want to do a GROUP BY of a field3 which is not in the SELECT to get a
limit of request results by this field3, like the "first" function of some
databases.
Does anyone have any idea on how I can do that ?
Thanks for your help,
Remi
--
<http://inbox.fr>
Rémi Daniel
Développeur Web rdaniel(a)inbox.fr
30, rue Vincent Moris
92240 Malakoff - France
Fax : +33 (0)1 55 48 02 79
i want to create a monetdb function to help write my astronomical
*binary*processing result into database,this function will load the
binary result
from memory directly into monetdb to save time, so i defined a UDF
binaryCopy() ,
svom.c
int binaryCopy(str columnname, str buffer, int count)
{
mvc *sql = NULL;
sql_table *tmatch = NULL;
/*BATs in which the data of the columns of the output table will be
stored*/
BAT *tmatch_column = NULL;
BUN nr_rows;
/* pointer to the tail of above BAT, which is just a normal C-array */
lng *tmatch_column_lng_t = NULL;
flt *tmatch_column_flt_t = NULL;
/*_bind_table is just a wrapper that tries to see if it can find the
table
match_table under three default schemas*/
if(!(tmatch = _bind_table(sql, NULL, "testtable")))
//return sql_message("42S02!BINARYLOAD():no such table test");
return 0;
//nr_match = 20000;
nr_rows = count;
/*
create BATs for all columns of the output table 'tmatch'
*/
if(strcmp(columnname,"starid")==0 || strcmp(columnname,"crossid")==0
||strcmp(columnname,"catid")==0 ) {
tmatch_column = BATnew(TYPE_void, TYPE_lng,nr_rows);
}else {
tmatch_column = BATnew(TYPE_void,TYPE_flt,nr_rows);
}
/*
now tmatch_starid_t points to the first element of the BAT's tails
*/
tmatch_column_lng_t = (lng*)Tloc(tmatch_column,BUNfirst(tmatch_column));
/*
now we can write data into the BAT tail
*/
if((strcmp(columnname,"starid")==0) || (strcmp(columnname,"crossid")==0)
||(strcmp(columnname,"catid")==0) ) {
*memcpy(tmatch_column_lng_t,buffer,nr_rows*sizeof(lng)); //is it
right to write binary data into BAT???*
}else {
memcpy(tmatch_column_flt_t, buffer, nr_rows*sizeof(flt));
}
/* Set proper properties for all BATs */
BATsetcount(tmatch_column,nr_rows);
BATseqbase(tmatch_column,0);
/*
sorted:1, column is sorted in ascending order
*/
tmatch_column->tsorted = 0;
/*
if the tail is reverse sorted.
*/
tmatch_column->trevsorted = 0;
/*
if the tail contains NIL values
*/
tmatch_column->T->nil = 0;
/*
if the tail doesn't contain NIL values
*/
tmatch_column->T->nonil = 0;
/*
if the values in the tail are unique.Since BATkey only works with BAT heads,
* BATmirror is used to switch the head and tail of the BATs
*/
if(strcmp(columnname,"starid")==0) {
BATkey(BATmirror(tmatch_column),1); //only starid is 1
}else {
BATkey(BATmirror(tmatch_column),0);
}
/*
finally, append the result BATs to the result table
* ignore errors, since we are finishing
*/
_append_bat(sql,tmatch,columnname,tmatch_column);
/*
The BATs now contain the data for the columns of the output tables,
* append_bat registers in the system where to find the BAT for those
columns.
*/
/*
To avoid memory leak, BAT reference counter must be decreased after use.
* That can be done using BBPunfix.
*/
if(tmatch_column) BBPunfix(tmatch_column->batCacheid);
return 1; //success
}
Thanks!
Hi,
I am trying to get MonetDB Server for Windows x64 to listen on something other than 127.0.0.1.
In the Getting started guide it mentions setting "mapi_open=yes" which I have added to M5Server.bat as:
"%MONETDB%\bin\mserver5.exe" --set "mapi_open=yes" --set "prefix=%MONETDB%" --set "exec_prefix=%MONETDB%" %MONETDBFARM% %*
However when I start the server I still see:
# MonetDB 5 server v11.15.7 "Feb2013-SP2"
# Serving database 'Data', using 4 threads
# Compiled for x86_64-pc-winnt/64bit with 64bit OIDs dynamically linked
# Found 11.989 GiB available main-memory.
# Copyright (c) 1993-July 2008 CWI.
# Copyright (c) August 2008-2013 MonetDB B.V., all rights reserved
# Visit http://www.monetdb.org/ for further information
# Listening for connection requests on mapi:monetdb://127.0.0.1:50000/
# MonetDB/GIS module loaded
# MonetDB/JAQL module loaded
# MonetDB/SQL module loaded
When I do a netstat –a I see port 50000 is listening only on 127.0.0.1 with nothing else using port 50000 on any other address.
When I perform a select * from environment(); I see:
mapi_port | 50000
mapi_open | yes
mapi_autosense | false
I have Windows Firewall disabled and have tried allowing m5server.exe through the firewall.
Could somebody please tell me how I get MonetDB to listen on anything other than 127.0.0.1 on Windows?
Thanks,
Neil.
IMPORTANT The information contained in this e-mail and any attachments is intended only for the named recipient and may be privileged or confidential. If you are not the intended recipient, please notify us immediately on +44 (0)1908 425000 and do not disclose, copy, distribute or take any action based on the contents of this e-mail. You should understand and accept that, when communicating with us by e-mail, it is not a totally secure communication medium. We accept no liability for any direct, indirect or consequential loss arising from any action taken in reliance on the information contained in this e-mail and give no warranty or representation as to its accuracy or reliability. DIGITALK has the facility to monitor and read both incoming and outgoing communications by e-mail. In line with industry efforts to reduce the proliferation of un-solicited SPAM, DIGITALK uses methods and ban-lists to prevent malicious content reaching our users. This message and any attachments has been scanned for known viruses. However, we would advise you to ensure the content is indeed virus free. We do not, to the extent permitted by law, accept any liability (whether in contract, negligence or otherwise) for any virus infection and/or external compromise of security and/or breach of confidentiality in relation to transmissions sent by e-mail. VAT No: GB 876 3287 81. Reg No: 3080801 Place of Registration: England Registered Office Address: 2 Radian Court, Knowlhill, Milton Keynes.
Hello everyone on the mailing list,
Please find some time to go through my queries.
I created a table with some fields out of which 1 is timestamp. I wanted to
have indexing on this table. While going through the available documentation
for monetdb, I found out that it creates its own indexing and might ignore
user provided indexing. I went ahead and started pushing in data into the
table with single inserts (10000 of them accumulated in a transaction). My
table is pretty big now and it contains about 250 million records (over 2
months of data). Even simple queries to find records between specified
timestamps takes a lot of time(12-13 mins in some cases)
We really feel the need of indexing on the table(don't know if that can
help). I happened to stumble upon an article on monetdb mailers:
<http://mail.monetdb.org/pipermail/users-list/2008-March/001726.html>
http://mail.monetdb.org/pipermail/users-list/2008-March/001726.html
According to this article if I insert data into the table using - INSERT
INTO table_name (...) ORDER BY X,Y.
The order by here will enforce indexing on X, Y. I would like to confirm
whether this really enforce indexing?
I have a few other questions pertaining to my case:
1) Is there a predefined way of speeding up such queries in monetdb?
2) If the insert with an order by clause is a work around then what if going
further, I start inserting with the insert query with the order by clause.
a)Will that enforce indexing on the whole table or on only those records
which were inserted using such inserts? b) Or should I just move the data
from my table with 250 million records into another table using the insert
with order by clause?
3)One more question regarding the setup of monetdb - I have initiated
monetdb using the M5server.bat on my windows 2008 R2 64 bit server. This
makes it run as a service and starts off at the server start-up. This is all
nice and acceptable. The problem comes when I need to restart the monetdb
server. The only way I was able to do this was to kill the process
(mServer5.exe) since I was not able to figure out the service (in the
windows services) which starts monetdb.
So my simple question is how to restart monetdb server if it gets installed
by M5server.bat?
Can somebody please help me with the queries?
Regards,
Anshuman
Anshuman
I do not have answer to your queries. We are trying to move monetdb and would be extremely helpful if you could let me know
1. How big is your database?
2. Are you running in production?
--- Original Message ---
From: "Anshuman Kumar" <anshuman.kumar(a)flowedge.in>
Sent: June 27, 2013 5:44 AM
To: users-list(a)monetdb.org
Subject: Indexing in monetdb
Hello everyone on the mailing list,
Please find some time to go through my queries.
I created a table with some fields out of which 1 is timestamp. I wanted to
have indexing on this table. While going through the available documentation
for monetdb, I found out that it creates its own indexing and might ignore
user provided indexing. I went ahead and started pushing in data into the
table with single inserts (10000 of them accumulated in a transaction). My
table is pretty big now and it contains about 250 million records (over 2
months of data). Even simple queries to find records between specified
timestamps takes a lot of time(12-13 mins in some cases)
We really feel the need of indexing on the table(don't know if that can
help). I happened to stumble upon an article on monetdb mailers:
<http://mail.monetdb.org/pipermail/users-list/2008-March/001726.html>
http://mail.monetdb.org/pipermail/users-list/2008-March/001726.html
According to this article if I insert data into the table using - INSERT
INTO table_name (...) ORDER BY X,Y.
The order by here will enforce indexing on X, Y. I would like to confirm
whether this really enforce indexing?
I have a few other questions pertaining to my case:
1) Is there a predefined way of speeding up such queries in monetdb?
2) If the insert with an order by clause is a work around then what if going
further, I start inserting with the insert query with the order by clause.
a)Will that enforce indexing on the whole table or on only those records
which were inserted using such inserts? b) Or should I just move the data
from my table with 250 million records into another table using the insert
with order by clause?
3)One more question regarding the setup of monetdb - I have initiated
monetdb using the M5server.bat on my windows 2008 R2 64 bit server. This
makes it run as a service and starts off at the server start-up. This is all
nice and acceptable. The problem comes when I need to restart the monetdb
server. The only way I was able to do this was to kill the process
(mServer5.exe) since I was not able to figure out the service (in the
windows services) which starts monetdb.
So my simple question is how to restart monetdb server if it gets installed
by M5server.bat?
Can somebody please help me with the queries?
Regards,
Anshuman
_______________________________________________
users-list mailing list
users-list(a)monetdb.org
http://mail.monetdb.org/mailman/listinfo/users-list
Hello everyone on the mailing list,
Please find some time to go through my queries.
I created a table with some fields out of which 1 is timestamp. I wanted to
have indexing on this table. While going through the available documentation
for monetdb, I found out that it creates its own indexing and might ignore
user provided indexing. I went ahead and started pushing in data into the
table with single inserts (10000 of them accumulated in a transaction). My
table is pretty big now and it contains about 250 million records (over 2
months of data). Even simple queries to find records between specified
timestamps takes a lot of time(12-13 mins in some cases)
We really feel the need of indexing on the table(don't know if that can
help). I happened to stumble upon an article on monetdb mailers:
http://mail.monetdb.org/pipermail/users-list/2008-March/001726.html
According to this article if I insert data into the table using - INSERT
INTO table_name (...) ORDER BY X,Y.
The order by here will enforce indexing on X, Y. I would like to confirm
whether this really enforce indexing?
I have a few other questions pertaining to my case:
1) Is there a predefined way of speeding up such queries in monetdb?
2) If the insert with an order by clause is a work around then what if going
further, I start inserting with the insert query with the order by clause.
a)Will that enforce indexing on the whole table or on only those records
which were inserted using such inserts? b) Or should I just move the data
from my table with 250 million records into another table using the insert
with order by clause?
3)One more question regarding the setup of monetdb - I have initiated
monetdb using the M5server.bat on my windows 2008 R2 64 bit server. This
makes it run as a service and starts off at the server start-up. This is all
nice and acceptable. The problem comes when I need to restart the monetdb
server. The only way I was able to do this was to kill the process
(mServer5.exe) since I was not able to figure out the service (in the
windows services) which starts monetdb.
So my simple question is how to restart monetdb server if it gets installed
by M5server.bat?
Can somebody please help me with the queries?
Regards,
Anshuman
Hi all,
I'm using monetdb on two servers and meet some strange behavior.
The two servers are of the same configuration as the following:
CPU: Intel(R) Xeon(R) CPU E5-2430L, 6 Cores, 12 Threads, 2 GHz, 64
bits.
Memory: 64GB
OS: Ubuntu Server 13.04 with 3.8.0-25-generic x86_64 kernel
MonetDB: v11.15.7
And they are both bought for less than 4 months.
The problem is that the performance of MonetDB is very unstable, even
for very simple select * queries.
The experiment setting is as the following:
Data:
Server1: Table1 loaded from a 19GB CSV file (CSV1).
Server2: Table1 loaded from another 19GB CSV file (CSV2).
CSV1 and CSV2 have the same schema but have different contents.
But they both have size of about 19GB and both have 155000000 tuples.
The schema is:
Field1 VARCHAR(16),
Field2 VARCHAR(100),
Field3 DATE,
Field4 FLOAT,
Field5 VARCHAR(64),
Field6 CHAR(3),
Field7 CHAR(6),
Field8 VARCHAR(32),
Field9 INT
In addition, both the tables have indices on Field1, Field2 and Field3.
Experiment Query:
echo -e '\\w-1 \n \\> /dev/null \n select * from Table1;' |
mclient -d expdatabase --interactive
The problem is that the performance varies a lot between the servers and
also
between different runs on the same server.
Here is the result:
Server1: 4m 12s
Server2: 305m 29s
The system conditions during the experiment:
Disk speed:
I run the command: cat CSV > /dev/null. The speed
is normal, about 2m ~ 2m 30s .
System load:
The machine load during the query executions are both very low.
The CPUs are almost completely idle and free memories are abundant.
Each monetdb server took about 50G.
To eliminate possible system configuration problems. I did two more
experiments.
1) I move CSV2 to server1, stop the server1
monetdb and build and start a new monetdb database using CSV2.
Then I run the same query again on the new database. The time
elapse is 134m 8s.
2) After 1) is done I stop the new monetdb and restart the old monetdb
on server1, and run the same query again. This time the time
elapse becomes 388m 37s (Comparing 4m 12s in the first run).
Further disk observations:
During the above two experiments, I monitor the disk read speed of
monetdb using iotop. The speed keeps about only 500 ~ 550 k/s. While the
read speed of cat Table1.csv > /dev/null keeps around 135m/s.
I did not do any configuration about monetdb. Is it possible that I
misconfigured something?
Thanks.
Victor
Hi all,
I am evaluating MonetDB in the context of genomic data. Therefore I'm
trying to implement a custom aggregation function.
To get a feeling for the system I set up a UDF that just returns
identical values. Now I'm trying to implement this simple function as
aggregate function for groupings but always run into an error when
executing this:
select grpident(base) from test.bases group by pos;
"TypeException:user.s1_1[18]:'udf.subident' undefined in:
_30:bat[:any,:str] := udf.subident(_15:bat[:oid,:str],
_27:bat[:oid,:oid], r1_27:bat[:oid,:oid], _1:bit)
program contains errors"
Here is the MAL plan:
+-----------------------------------------------------------------+
| mal |
+=================================================================+
| function user.s1_1{autoCommit=true}():void; |
| X_2 := sql.mvc(); |
| X_3:bat[:oid,:oid] := sql.tid(X_2,"test","bases"); |
| X_6 := sql.bind(X_2,"test","bases","base",0); |
| (X_9,r1_9) := sql.bind(X_2,"test","bases","base",2); |
| X_12 := sql.bind(X_2,"test","bases","base",1); |
| X_14 := sql.delta(X_6,X_9,r1_9,X_12); |
| X_15 := algebra.leftfetchjoin(X_3,X_14); |
| calc.str("bases"); |
| calc.str("base"); |
| X_18 := sql.bind(X_2,"test","bases","pos",0); |
| (X_20,r1_20) := sql.bind(X_2,"test","bases","pos",2); |
| X_22 := sql.bind(X_2,"test","bases","pos",1); |
| X_23 := sql.delta(X_18,X_20,r1_20,X_22); |
| X_24 := algebra.leftfetchjoin(X_3,X_23); |
| calc.str("bases"); |
| calc.str("pos"); |
| (X_27,r1_27,r2_27) := group.subgroupdone(X_24); |
| X_30:bat[:any,:str] := udf.subident(X_15,X_27,r1_27,true); |
| calc.str("bases"); |
| calc.str("L1"); |
| calc.str("bases"); |
| calc.str("L1"); |
| X_36 := sql.resultSet(1,1,X_30); |
| sql.rsColumn(X_36,"test.bases","L1","clob",1,0,X_30); |
| X_40 := io.stdout(); |
| sql.exportResult(X_40,X_36); |
| end s1_1; |
+-----------------------------------------------------------------+
I'm using following setup:
MonetDB 5 server v11.15.7 "Feb2013-SP2" (64-bit, 64-bit oids)
Found 1.9GiB available memory, 2 available cpu cores
Libraries:
libpcre: 8.31 2012-07-06 (compiled with 8.31)
openssl: OpenSSL 1.0.1c 10 May 2012 (compiled with OpenSSL 1.0.1c 10
May 2012)
libxml2: 2.9.0 (compiled with 2.9.0)
Here's what my related code looks like:
udf.c
#####
str GRPident(int *ret, int *bid, int *eid){ ... }
udf.h
#####
udf_export str GRPident(int *ret, int *bid, int *eid);
udf.mal
#######
module udf
...
command ident(b:bat[:oid,:str], e:bat[:oid,:str]) :bat[:oid,:str]
address GRPident
comment "identity of groups";
udf.sql
#######
create aggregate grpident(src string) returns string external name
udf.ident;
I already renamed the MAL command to "subident" and changed the SQL
expression accordingly without success.
Furthermore I already had a look at "batxml.c" as suggested here:
http://old.nabble.com/users-list-Digest,-Vol-3,-Issue-30-td34736774.html
but don't see what I'm doing wrong.
What do I have to do to implement a custom aggregate function?
Thanks,
Sebastian