diff --git a/.gitignore b/.gitignore index 4491eaf5..27b76e0f 100644 --- a/.gitignore +++ b/.gitignore @@ -158,4 +158,7 @@ setup.py docs *.swp *~ -.env \ No newline at end of file +.env +taos/*.c +taos/*.cpp +poetry.lock diff --git a/build.py b/build.py new file mode 100644 index 00000000..00c2af00 --- /dev/null +++ b/build.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python +# -*- coding: UTF-8 -*- +""" +@Project :taos-connector-python +@File :build.py +@Author :hadrianl +@Date :2023/8/10 14:05 +""" + +from Cython.Build import cythonize, build_ext +from setuptools import Extension +# from setuptools.command.build_ext import build_ext +import platform + +compiler_directives = {"language_level": 3, "embedsignature": True} + + +def build(setup_kwargs): + if platform.system() == "Linux": + extensions = [ + Extension("taos._cinterface", ["taos/_cinterface.pyx"], + libraries=["taos"], + ), + Extension("taos._parser", ["taos/_parser.pyx"]), + Extension("taos._objects", ["taos/_objects.pyx"], + libraries=["taos"], + ), + Extension("taos._constants", ["taos/_constants.pyx"], + libraries=["taos"], + ), + ] + elif platform.system() == "Windows": + extensions = [ + Extension("taos._cinterface", ["taos/_cinterface.pyx"], + libraries=["taos"], + include_dirs=[r"C:\TDengine\include"], + library_dirs=[r"C:\TDengine\driver"], + ), + Extension("taos._parser", ["taos/_parser.pyx"]), + Extension("taos._objects", ["taos/_objects.pyx"], + libraries=["taos"], + include_dirs=[r"C:\TDengine\include"], + library_dirs=[r"C:\TDengine\driver"], + ), + Extension("taos._constants", ["taos/_constants.pyx"], + libraries=["taos"], + include_dirs=[r"C:\TDengine\include"], + library_dirs=[r"C:\TDengine\driver"], + ), + ] + else: + raise Exception("unsupported platform") + + setup_kwargs.update({ + "ext_modules": cythonize(extensions, compiler_directives=compiler_directives, force=True), + "cmdclass": {"build_ext": build_ext}, + }) + diff --git a/examples/cython/bind-multi.py b/examples/cython/bind-multi.py new file mode 100644 index 00000000..7a320ce4 --- /dev/null +++ b/examples/cython/bind-multi.py @@ -0,0 +1,48 @@ +from taos._objects import TaosConnection, TaosMultiBinds +import datetime as dt +import pytz + +conn = TaosConnection(host="localhost", timezone="Asia/Shanghai") +dbname = "pytest_taos_stmt_multi" +conn.execute("drop database if exists %s" % dbname) +conn.execute("create database if not exists %s" % dbname) +conn.select_db(dbname) + +conn.execute( + "create table if not exists log(ts timestamp, bo bool, nil tinyint, \ + ti tinyint, si smallint, ii int, bi bigint, tu tinyint unsigned, \ + su smallint unsigned, iu int unsigned, bu bigint unsigned, \ + ff float, dd double, bb binary(100), nn nchar(100), tt timestamp)", +) + +stmt = conn.statement("insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)") + + +params = TaosMultiBinds(16) +params[0].timestamp((1626861392589, 1626861392590, 1626861392591)) +params[1].bool((True, None, False)) +params[2].tinyint([-128, -128, None]) # -128 is tinyint null +params[3].tinyint([0, 127, None]) +params[4].smallint([3, None, 2]) +params[5].int([3, 4, None]) +params[6].bigint([3, 4, None]) +params[7].tinyint_unsigned([3, 4, None]) +params[8].smallint_unsigned([3, 4, None]) +params[9].int_unsigned([3, 4, None]) +params[10].bigint_unsigned([3, 4, None]) +params[11].float([3, None, 1]) +params[12].double([3, None, 1.2]) +params[13].binary(["abc", "dddafadfadfadfadfa", None]) +params[14].nchar(["涛思数据", None, "a long string with 中文字符"]) +params[15].timestamp([dt.datetime.now(tz=pytz.timezone("Asia/Shanghai")), dt.datetime.now(tz=pytz.timezone("UTC")), dt.datetime.now()]) +stmt.bind_param_batch(params) +stmt.execute() + +assert stmt.affected_rows == 3 + +result = conn.query("select * from log") +for row in result: + print(row) + +conn.execute("drop database if exists %s" % dbname) +conn.close() \ No newline at end of file diff --git a/examples/cython/connection_usage_native_reference.py b/examples/cython/connection_usage_native_reference.py new file mode 100644 index 00000000..c6e41487 --- /dev/null +++ b/examples/cython/connection_usage_native_reference.py @@ -0,0 +1,45 @@ +from taos._objects import TaosConnection + +# ANCHOR: insert +conn = TaosConnection(host="localhost") +# Execute a sql, ignore the result set, just get affected rows. It's useful for DDL and DML statement. +conn.execute("DROP DATABASE IF EXISTS test") +conn.execute("CREATE DATABASE test") +# change database. same as execute "USE db" +conn.select_db("test") +conn.execute("CREATE STABLE weather(ts TIMESTAMP, temperature FLOAT) TAGS (location INT)") +affected_row = conn.execute("INSERT INTO t1 USING weather TAGS(1) VALUES (now, 23.5) (now+1m, 23.5) (now+2m, 24.4)") +print("affected_row", affected_row) +# output: +# affected_row 3 +# ANCHOR_END: insert + +# ANCHOR: query +# Execute a sql and get its result set. It's useful for SELECT statement +result = conn.query("SELECT * from weather") + +# Get fields from result +fields = result.fields +for field in fields: + print(field) # {name: ts, type: 9, bytes: 8} + +# output: +# {name: ts, type: 9, bytes: 8} +# {name: temperature, type: 6, bytes: 4} +# {name: location, type: 4, bytes: 4} + +# Get data from result as list of tuple +data = result.fetch_all() +print(data) +# output: +# [(datetime.datetime(2022, 4, 27, 9, 4, 25, 367000), 23.5, 1), (datetime.datetime(2022, 4, 27, 9, 5, 25, 367000), 23.5, 1), (datetime.datetime(2022, 4, 27, 9, 6, 25, 367000), 24.399999618530273, 1)] + +# Or get data from result as a list of dict +# map_data = result.fetch_all_into_dict() +# print(map_data) +# output: +# [{'ts': datetime.datetime(2022, 4, 27, 9, 1, 15, 343000), 'temperature': 23.5, 'location': 1}, {'ts': datetime.datetime(2022, 4, 27, 9, 2, 15, 343000), 'temperature': 23.5, 'location': 1}, {'ts': datetime.datetime(2022, 4, 27, 9, 3, 15, 343000), 'temperature': 24.399999618530273, 'location': 1}] +# ANCHOR_END: query + +conn.execute("DROP DATABASE IF EXISTS test") +conn.close() \ No newline at end of file diff --git a/examples/cython/connection_usage_native_reference_with_req_id.py b/examples/cython/connection_usage_native_reference_with_req_id.py new file mode 100644 index 00000000..938b3d70 --- /dev/null +++ b/examples/cython/connection_usage_native_reference_with_req_id.py @@ -0,0 +1,45 @@ +from taos._objects import TaosConnection + +# ANCHOR: insert +conn = TaosConnection(host="localhost") +# Execute a sql, ignore the result set, just get affected rows. It's useful for DDL and DML statement. +conn.execute("DROP DATABASE IF EXISTS test", req_id=1) +conn.execute("CREATE DATABASE test", req_id=2) +# change database. same as execute "USE db" +conn.select_db("test") +conn.execute("CREATE STABLE weather(ts TIMESTAMP, temperature FLOAT) TAGS (location INT)", req_id=3) +affected_row = conn.execute("INSERT INTO t1 USING weather TAGS(1) VALUES (now, 23.5) (now+1m, 23.5) (now+2m, 24.4)", req_id=4) +print("affected_row", affected_row) +# output: +# affected_row 3 +# ANCHOR_END: insert + +# ANCHOR: query +# Execute a sql and get its result set. It's useful for SELECT statement +result = conn.query("SELECT * from weather", req_id=5) + +# Get fields from result +fields = result.fields +for field in fields: + print(field) # {name: ts, type: 9, bytes: 8} + +# output: +# {name: ts, type: 9, bytes: 8} +# {name: temperature, type: 6, bytes: 4} +# {name: location, type: 4, bytes: 4} + +# Get data from result as list of tuple +data = result.fetch_all() +print(data) +# output: +# [(datetime.datetime(2022, 4, 27, 9, 4, 25, 367000), 23.5, 1), (datetime.datetime(2022, 4, 27, 9, 5, 25, 367000), 23.5, 1), (datetime.datetime(2022, 4, 27, 9, 6, 25, 367000), 24.399999618530273, 1)] + +# Or get data from result as a list of dict +# map_data = result.fetch_all_into_dict() +# print(map_data) +# output: +# [{'ts': datetime.datetime(2022, 4, 27, 9, 1, 15, 343000), 'temperature': 23.5, 'location': 1}, {'ts': datetime.datetime(2022, 4, 27, 9, 2, 15, 343000), 'temperature': 23.5, 'location': 1}, {'ts': datetime.datetime(2022, 4, 27, 9, 3, 15, 343000), 'temperature': 24.399999618530273, 'location': 1}] +# ANCHOR_END: query + +conn.execute("DROP DATABASE IF EXISTS test") +conn.close() \ No newline at end of file diff --git a/examples/cython/cursor_execute_many.py b/examples/cython/cursor_execute_many.py new file mode 100644 index 00000000..3883b934 --- /dev/null +++ b/examples/cython/cursor_execute_many.py @@ -0,0 +1,91 @@ +from taos._objects import TaosConnection + +env = { + 'user': "root", + 'password': "taosdata", + 'host': "localhost", + 'port': 6030, +} + + +def make_context(config): + db_protocol = config.get('db_protocol', 'taos') + db_user = config['user'] + db_pass = config['password'] + db_host = config['host'] + db_port = config['port'] + + db_url = f"{db_protocol}://{db_user}:{db_pass}@{db_host}:{db_port}" + print('dsn: ', db_url) + + conn = TaosConnection(**config) + + db_name = config.get('database', 'c_cursor') + + return conn, db_name + + +def test_cursor(): + conn, db_name = make_context(env) + + cur = conn.cursor() + + cur.execute(f"DROP DATABASE IF EXISTS {db_name}") + cur.execute(f"CREATE DATABASE {db_name}") + cur.execute(f"USE {db_name}") + + cur.execute("create stable stb (ts timestamp, v1 int) tags(t1 int)") + + create_table_data = [ + { + "name": "tb1", + "t1": 1, + }, + { + "name": "tb2", + "t1": 2, + }, + { + "name": "tb3", + "t1": 3, + } + ] + + res = cur.executemany( + "create table {name} using stb tags({t1})", + create_table_data, + ) + print(f"affected_rows: {res}") + assert res == 0 + + data = [ + ('2018-10-03 14:38:05.100', 219), + ('2018-10-03 14:38:15.300', 218), + ('2018-10-03 14:38:16.800', 221), + ] + + for table in create_table_data: + table_name = table['name'] + + res = cur.executemany( + f"insert into {table_name} values", + data, + ) + print(f"affected_rows: {res}") + assert res == 3 + + cur.execute('select * from stb') + + data = cur.fetchall() + column_names = [meta[0] for meta in cur.description] + print(column_names) + for r in data: + print(r) + + cur.execute(f"DROP DATABASE IF EXISTS {db_name}") + cur.close() + conn.close() + + +if __name__ == "__main__": + test_cursor() \ No newline at end of file diff --git a/examples/cython/demo.py b/examples/cython/demo.py new file mode 100644 index 00000000..7606f018 --- /dev/null +++ b/examples/cython/demo.py @@ -0,0 +1,24 @@ +from taos._objects import TaosConnection + +conn = TaosConnection(host="localhost") +cursor = conn.cursor() + +sql = "drop database if exists db" +cursor.execute(sql) +sql = "create database if not exists db" +cursor.execute(sql) +sql = "create table db.tb(ts timestamp, n int, bin binary(10), nc nchar(10))" +cursor.execute(sql) +sql = "insert into db.tb values (1650000000000, 1, 'abc', '北京')" +cursor.execute(sql) +sql = "insert into db.tb values (1650000000001, null, null, null)" +cursor.execute(sql) +sql = "select * from db.tb" +cursor.execute(sql) + +for row in cursor: + print(row) + +sql = "drop database if exists db" +cursor.execute(sql) +conn.close() diff --git a/examples/cython/import-json.py b/examples/cython/import-json.py new file mode 100644 index 00000000..9c83f951 --- /dev/null +++ b/examples/cython/import-json.py @@ -0,0 +1,34 @@ +import json +from taos._objects import TaosConnection +import requests + +j = requests.get("http://api.coindesk.com/v1/bpi/currentprice.json").json() + +# json to sql +ts = j["time"]["updatedISO"] +sql = "insert into" +for id in j["bpi"]: + bpi = j["bpi"][id] + sql += ' %s using price tags("%s","%s","%s") values("%s", %lf) ' % ( + id, + bpi["code"], + bpi["symbol"], + bpi["description"], + ts, + bpi["rate_float"], + ) + +# sql to TDengine +conn = TaosConnection(host="localhost") +conn.execute("drop database if exists bpi") +conn.execute("create database if not exists bpi") +conn.execute("use bpi") +conn.execute( + "create stable if not exists price (ts timestamp, rate double)" + + " tags (code binary(10), symbol binary(10), description binary(100))" +) +conn.execute(sql) +result = conn.query("select * from bpi.price") +print(result.fetch_all()) +conn.execute("drop database if exists bpi") +conn.close() \ No newline at end of file diff --git a/examples/cython/insert-lines.py b/examples/cython/insert-lines.py new file mode 100644 index 00000000..cbb7f096 --- /dev/null +++ b/examples/cython/insert-lines.py @@ -0,0 +1,25 @@ +from taos._objects import TaosConnection +from taos._constants import SmlProtocol, SmlPrecision + +conn = TaosConnection(host="localhost") +dbname = "pytest_line" +conn.execute("drop database if exists %s" % dbname) +conn.execute("create database if not exists %s precision 'us'" % dbname) +conn.select_db(dbname) + +lines = [ + 'st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"pass",c2=false,c4=4f64 1626006833639000000', +] +conn.schemaless_insert(lines, SmlProtocol.LINE_PROTOCOL, SmlPrecision.NOT_CONFIGURED) +print("inserted") + +conn.schemaless_insert(lines, SmlProtocol.LINE_PROTOCOL, SmlPrecision.NOT_CONFIGURED) + +tb = conn.query("show tables").fetch_all()[0][0] +print(tb) +result = conn.query("select * from %s" % tb) +for row in result: + print(row) + +conn.execute("drop database if exists %s" % dbname) +conn.close() diff --git a/examples/cython/json-tag.py b/examples/cython/json-tag.py new file mode 100644 index 00000000..69358d2c --- /dev/null +++ b/examples/cython/json-tag.py @@ -0,0 +1,19 @@ +# encoding:UTF-8 +from taos._objects import TaosConnection + +conn = TaosConnection(host="localhost") +conn.execute("drop database if exists py_test_json_type") +conn.execute("create database if not exists py_test_json_type") +conn.execute("use py_test_json_type") + +conn.execute("create stable s1 (ts timestamp, v1 int) tags (info json)") +conn.execute("create table s1_1 using s1 tags ('{\"k1\": \"v1\"}')") +tags = conn.query("select info, tbname from s1").fetch_all_into_dict() +print(tags) + +k1 = conn.query("select info->'k1' as k1 from s1").fetch_all_into_dict() +print(k1) + +conn.execute("drop database py_test_json_type") + +conn.close() diff --git a/examples/cython/pep-249.py b/examples/cython/pep-249.py new file mode 100644 index 00000000..754a5e46 --- /dev/null +++ b/examples/cython/pep-249.py @@ -0,0 +1,11 @@ +from taos._objects import TaosConnection + +conn = TaosConnection(host="localhost") +cursor = conn.cursor() + +cursor.execute("show databases") +results = cursor.fetchall() +for row in results: + print(row) + +conn.close() diff --git a/examples/cython/query-async.py b/examples/cython/query-async.py new file mode 100644 index 00000000..33da0644 --- /dev/null +++ b/examples/cython/query-async.py @@ -0,0 +1,43 @@ +import asyncio +import time +from taos._objects import TaosConnection + + +def print_all(fut: asyncio.Future): + result = fut.result() + print("result:", result) + print("fields:", result.fields) + # print("data:", result.fetch_all()) # NOT USE fetch_all directly right here + +def callback_print_row(conn, cb=None): + query_task = asyncio.create_task(conn.query_a("select * from test.meters")) + if cb: + query_task.add_done_callback(cb) + + return query_task + +async def test_query(conn): + conn.execute('create database if not exists power') + conn.execute( + 'CREATE STABLE if not exists test.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) ' + 'TAGS (location BINARY(64), groupId INT)') + + # async iter row + result = await conn.query_a("select * from power.meters limit 1000") + print("result:", result) + async for row in result: + print("row:", row) + + + # callback and async fetch all data + task = callback_print_row(conn, print_all) + res = await task + data = await res.fetch_all_a() + print(data) + + +if __name__ == "__main__": + conn = TaosConnection(host="localhost") + loop = asyncio.get_event_loop() + loop.run_until_complete(test_query(conn)) + conn.close() diff --git a/examples/cython/query-objectively.py b/examples/cython/query-objectively.py new file mode 100644 index 00000000..9e6ad8ca --- /dev/null +++ b/examples/cython/query-objectively.py @@ -0,0 +1,14 @@ +from taos._objects import TaosConnection + +conn = TaosConnection(host="localhost") +conn.execute("drop database if exists pytest") +conn.execute("create database if not exists pytest") + +result = conn.query("show databases") +num_of_fields = result.field_count +for field in result.fields: + print(field) +for row in result: + print(row) +conn.execute("drop database if exists pytest") +conn.close() diff --git a/examples/cython/result_set_examples.py b/examples/cython/result_set_examples.py new file mode 100644 index 00000000..6bd9ad18 --- /dev/null +++ b/examples/cython/result_set_examples.py @@ -0,0 +1,33 @@ +from taos._objects import TaosConnection + +conn = TaosConnection(host="localhost", timezone="Asia/Shanghai") +conn.execute("DROP DATABASE IF EXISTS test") +conn.execute("CREATE DATABASE test") +conn.select_db("test") +conn.execute("CREATE STABLE weather(ts TIMESTAMP, temperature FLOAT) TAGS (location INT)") +# prepare data +for i in range(2000): + location = str(i % 10) + tb = "t" + location + conn.execute(f"INSERT INTO {tb} USING weather TAGS({location}) VALUES (now+{i}a, 23.5) (now+{i + 1}a, 23.5)") + +result = conn.query("SELECT * FROM weather") + +block_index = 0 +blocks = result.blocks_iter() +for rows, length in blocks: + print("block ", block_index, " length", length) + print("first row in this block:", rows[0]) + block_index += 1 + +conn.close() + +# possible output: +# block 0 length 1200 +# first row in this block: (datetime.datetime(2022, 4, 27, 15, 14, 52, 46000), 23.5, 0) +# block 1 length 1200 +# first row in this block: (datetime.datetime(2022, 4, 27, 15, 14, 52, 76000), 23.5, 3) +# block 2 length 1200 +# first row in this block: (datetime.datetime(2022, 4, 27, 15, 14, 52, 99000), 23.5, 6) +# block 3 length 400 +# first row in this block: (datetime.datetime(2022, 4, 27, 15, 14, 52, 122000), 23.5, 9) diff --git a/examples/cython/result_set_with_req_id_examples.py b/examples/cython/result_set_with_req_id_examples.py new file mode 100644 index 00000000..9d237e4b --- /dev/null +++ b/examples/cython/result_set_with_req_id_examples.py @@ -0,0 +1,33 @@ +from taos._objects import TaosConnection + +conn = TaosConnection(host="localhost") +conn.execute("DROP DATABASE IF EXISTS test", req_id=1) +conn.execute("CREATE DATABASE test", req_id=2) +conn.select_db("test") +conn.execute("CREATE STABLE weather(ts TIMESTAMP, temperature FLOAT) TAGS (location INT)", req_id=3) +# prepare data +for i in range(2000): + location = str(i % 10) + tb = "t" + location + conn.execute(f"INSERT INTO {tb} USING weather TAGS({location}) VALUES (now+{i}a, 23.5) (now+{i + 1}a, 23.5)", req_id=4+i) + +result = conn.query("SELECT * FROM weather", req_id=2004) + +block_index = 0 +blocks = result.blocks_iter() +for rows, length in blocks: + print("block ", block_index, " length", length) + print("first row in this block:", rows[0]) + block_index += 1 + +conn.close() + +# possible output: +# block 0 length 1200 +# first row in this block: (datetime.datetime(2022, 4, 27, 15, 14, 52, 46000), 23.5, 0) +# block 1 length 1200 +# first row in this block: (datetime.datetime(2022, 4, 27, 15, 14, 52, 76000), 23.5, 3) +# block 2 length 1200 +# first row in this block: (datetime.datetime(2022, 4, 27, 15, 14, 52, 99000), 23.5, 6) +# block 3 length 400 +# first row in this block: (datetime.datetime(2022, 4, 27, 15, 14, 52, 122000), 23.5, 9) diff --git a/examples/cython/schemaless_insert.py b/examples/cython/schemaless_insert.py new file mode 100644 index 00000000..56d3b6e5 --- /dev/null +++ b/examples/cython/schemaless_insert.py @@ -0,0 +1,565 @@ +import taos +from taos._objects import TaosConnection +from taos import utils +from taos.error import OperationalError, SchemalessError, InterfaceError + + +def schemaless_insert(conn: TaosConnection) -> None: + dbname = "schemaless_insert" + try: + conn.execute("drop database if exists %s" % dbname) + if taos.IS_V3: + conn.execute("create database if not exists %s schemaless 1 precision 'ns'" % dbname) + else: + conn.execute("create database if not exists %s update 2 precision 'ns'" % dbname) + + conn.select_db(dbname) + + lines = [ + 'st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000', + 'st,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin, abc",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000', + 'stf,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000', + ] + res = conn.schemaless_insert(lines, 1, 0) + print("affected rows: ", res) + assert (res == 3) + + lines = [ + 'stf,t1=5i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000', + ] + res = conn.schemaless_insert(lines, 1, 0) + print("affected rows: ", res) + assert (res == 1) + result = conn.query("select * from st") + + dict2 = result.fetch_all_into_dict() + print(dict2) + print(result.row_count) + all = result.rows_iter() + for row in all: + print(row) + result.close() + assert (result.row_count == 2) + + # error test + lines = [ + ',t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000', + ] + try: + res = conn.schemaless_insert(lines, 1, 0) + print(res) + # assert(False) + except SchemalessError as err: + print('**** error: ', err) + # assert (err.msg == 'Invalid data format') + + result = conn.query("select * from st") + print(result.row_count) + all = result.rows_iter() + for row in all: + print(row) + result.close() + + conn.execute("drop database if exists %s" % dbname) + except InterfaceError as err: + conn.execute("drop database if exists %s" % dbname) + print(err) + except Exception as err: + conn.execute("drop database if exists %s" % dbname) + conn.close() + print(err) + raise err + + +def schemaless_insert_with_req_id(conn: TaosConnection) -> None: + dbname = "taos_schemaless_insert" + try: + conn.execute("drop database if exists %s" % dbname) + if taos.IS_V3: + conn.execute("create database if not exists %s schemaless 1 precision 'ns'" % dbname) + else: + conn.execute("create database if not exists %s update 2 precision 'ns'" % dbname) + + conn.select_db(dbname) + + lines = [ + 'st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000', + 'st,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin, abc",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000', + 'stf,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000', + ] + req_id = utils.gen_req_id() + res = conn.schemaless_insert(lines, 1, 0, req_id) + print("affected rows: ", res) + assert (res == 3) + + lines = [ + 'stf,t1=5i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000', + ] + req_id = utils.gen_req_id() + res = conn.schemaless_insert(lines, 1, 0, req_id) + print("affected rows: ", res) + assert (res == 1) + result = conn.query("select * from st") + + dict2 = result.fetch_all_into_dict() + print(dict2) + result.row_count + all = result.rows_iter() + for row in all: + print(row) + result.close() + assert (result.row_count == 2) + + # error test + lines = [ + ',t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000', + ] + try: + req_id = utils.gen_req_id() + res = conn.schemaless_insert(lines, 1, 0, req_id) + print(res) + # assert(False) + except SchemalessError as e: + print(e) + + req_id = utils.gen_req_id() + result = conn.query("select * from st", req_id) + all = result.rows_iter() + for row in all: + print(row) + result.close() + + conn.execute("drop database if exists %s" % dbname) + except InterfaceError as err: + conn.execute("drop database if exists %s" % dbname) + print(err) + except Exception as err: + conn.execute("drop database if exists %s" % dbname) + conn.close() + print(err) + raise err + + +def schemaless_insert_ttl(conn: TaosConnection) -> None: + dbname = "taos_schemaless_insert" + try: + conn.execute("drop database if exists %s" % dbname) + if taos.IS_V3: + conn.execute("create database if not exists %s schemaless 1 precision 'ns'" % dbname) + else: + conn.execute("create database if not exists %s update 2 precision 'ns'" % dbname) + + conn.select_db(dbname) + + lines = [ + 'st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000', + 'st,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin, abc",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000', + 'stf,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000', + ] + ttl = 1000 + res = conn.schemaless_insert(lines, 1, 0, ttl=ttl) + print("affected rows: ", res) + assert (res == 3) + + lines = [ + 'stf,t1=5i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000', + ] + ttl = 1000 + res = conn.schemaless_insert(lines, 1, 0, ttl=ttl) + print("affected rows: ", res) + assert (res == 1) + result = conn.query("select * from st") + + dict2 = result.fetch_all_into_dict() + print(dict2) + print(result.row_count) + all = result.rows_iter() + for row in all: + print(row) + result.close() + assert (result.row_count == 2) + + # error test + lines = [ + ',t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000', + ] + try: + ttl = 1000 + res = conn.schemaless_insert(lines, 1, 0, ttl=ttl) + print(res) + # assert(False) + except SchemalessError as err: + print('**** error: ', err) + # assert (err.msg == 'Invalid data format') + + result = conn.query("select * from st") + print(result.row_count) + all = result.rows_iter() + for row in all: + print(row) + result.close() + + conn.execute("drop database if exists %s" % dbname) + except InterfaceError as err: + conn.execute("drop database if exists %s" % dbname) + print(err) + except Exception as err: + conn.execute("drop database if exists %s" % dbname) + conn.close() + print(err) + raise err + + +def schemaless_insert_ttl_with_req_id(conn: TaosConnection) -> None: + dbname = "taos_schemaless_insert" + try: + conn.execute("drop database if exists %s" % dbname) + if taos.IS_V3: + conn.execute("create database if not exists %s schemaless 1 precision 'ns'" % dbname) + else: + conn.execute("create database if not exists %s update 2 precision 'ns'" % dbname) + + conn.select_db(dbname) + + lines = [ + 'st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000', + 'st,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin, abc",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000', + 'stf,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000', + ] + ttl = 1000 + req_id = utils.gen_req_id() + res = conn.schemaless_insert(lines, 1, 0, ttl=ttl, req_id=req_id) + print("affected rows: ", res) + assert (res == 3) + + lines = [ + 'stf,t1=5i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000', + ] + ttl = 1000 + req_id = utils.gen_req_id() + res = conn.schemaless_insert(lines, 1, 0, ttl=ttl, req_id=req_id) + print("affected rows: ", res) + assert (res == 1) + result = conn.query("select * from st") + + dict2 = result.fetch_all_into_dict() + print(dict2) + print(result.row_count) + all = result.rows_iter() + for row in all: + print(row) + result.close() + assert (result.row_count == 2) + + # error test + lines = [ + ',t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000', + ] + try: + ttl = 1000 + req_id = utils.gen_req_id() + res = conn.schemaless_insert(lines, 1, 0, ttl=ttl, req_id=req_id) + print(res) + # assert(False) + except SchemalessError as err: + print('**** error: ', err) + # assert (err.msg == 'Invalid data format') + + result = conn.query("select * from st") + print(result.row_count) + all = result.rows_iter() + for row in all: + print(row) + result.close() + + conn.execute("drop database if exists %s" % dbname) + except InterfaceError as err: + conn.execute("drop database if exists %s" % dbname) + print(err) + except Exception as err: + conn.execute("drop database if exists %s" % dbname) + conn.close() + print(err) + raise err + + +def schemaless_insert_raw(conn: TaosConnection) -> None: + dbname = "taos_schemaless_insert" + try: + conn.execute("drop database if exists %s" % dbname) + + if taos.IS_V3: + conn.execute("create database if not exists %s schemaless 1 precision 'ns'" % dbname) + else: + conn.execute("create database if not exists %s update 2 precision 'ns'" % dbname) + + conn.select_db(dbname) + + lines = '''st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000 + st,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin, abc",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000 + stf,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000''' + + res = conn.schemaless_insert_raw(lines, 1, 0) + print("affected rows: ", res) + assert (res == 3) + + lines = '''stf,t1=5i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000''' + res = conn.schemaless_insert_raw(lines, 1, 0) + print("affected rows: ", res) + assert (res == 1) + + result = conn.query("select * from st") + dict2 = result.fetch_all_into_dict() + print(dict2) + print(result.row_count) + + all = result.rows_iter() + for row in all: + print(row) + result.close() + assert (result.row_count == 2) + + # error test + lines = ''',t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000''' + try: + res = conn.schemaless_insert_raw(lines, 1, 0) + print(res) + # assert(False) + except SchemalessError as err: + print('**** error: ', err) + # assert (err.msg == 'Invalid data format') + + result = conn.query("select * from st") + print(result.row_count) + all = result.rows_iter() + for row in all: + print(row) + result.close() + + conn.execute("drop database if exists %s" % dbname) + except InterfaceError as err: + conn.execute("drop database if exists %s" % dbname) + conn.close() + raise err + except SchemalessError as err: + conn.execute("drop database if exists %s" % dbname) + print(err) + except Exception as err: + conn.execute("drop database if exists %s" % dbname) + conn.close() + print(err) + raise err + + +def schemaless_insert_raw_with_req_id(conn: TaosConnection) -> None: + dbname = "taos_schemaless_insert" + try: + conn.execute("drop database if exists %s" % dbname) + + if taos.IS_V3: + conn.execute("create database if not exists %s schemaless 1 precision 'ns'" % dbname) + else: + conn.execute("create database if not exists %s update 2 precision 'ns'" % dbname) + + conn.select_db(dbname) + + lines = '''st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000 + st,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin, abc",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000 + stf,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000''' + + req_id = utils.gen_req_id() + res = conn.schemaless_insert_raw(lines, 1, 0, req_id=req_id) + print("affected rows: ", res) + assert (res == 3) + + lines = '''stf,t1=5i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000''' + req_id = utils.gen_req_id() + res = conn.schemaless_insert_raw(lines, 1, 0, req_id=req_id) + print("affected rows: ", res) + assert (res == 1) + + result = conn.query("select * from st") + dict2 = result.fetch_all_into_dict() + print(dict2) + print(result.row_count) + + all = result.rows_iter() + for row in all: + print(row) + result.close() + assert (result.row_count == 2) + + # error test + lines = ''',t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000''' + try: + req_id = utils.gen_req_id() + res = conn.schemaless_insert_raw(lines, 1, 0, req_id=req_id) + print(res) + # assert(False) + except SchemalessError as err: + print('**** error: ', err) + # assert (err.msg == 'Invalid data format') + + result = conn.query("select * from st") + print(result.row_count) + all = result.rows_iter() + for row in all: + print(row) + result.close() + + conn.execute("drop database if exists %s" % dbname) + except InterfaceError as err: + conn.execute("drop database if exists %s" % dbname) + print(err) + except Exception as err: + conn.execute("drop database if exists %s" % dbname) + conn.close() + print(err) + raise err + + +def schemaless_insert_raw_ttl(conn: TaosConnection) -> None: + dbname = "taos_schemaless_insert" + try: + conn.execute("drop database if exists %s" % dbname) + + if taos.IS_V3: + conn.execute("create database if not exists %s schemaless 1 precision 'ns'" % dbname) + else: + conn.execute("create database if not exists %s update 2 precision 'ns'" % dbname) + + conn.select_db(dbname) + + lines = '''st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000 + st,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin, abc",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000 + stf,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000''' + + ttl = 1000 + res = conn.schemaless_insert_raw(lines, 1, 0, ttl=ttl) + print("affected rows: ", res) + assert (res == 3) + + lines = '''stf,t1=5i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000''' + ttl = 1000 + res = conn.schemaless_insert_raw(lines, 1, 0, ttl=ttl) + print("affected rows: ", res) + assert (res == 1) + + result = conn.query("select * from st") + dict2 = result.fetch_all_into_dict() + print(dict2) + print(result.row_count) + + all = result.rows_iter() + for row in all: + print(row) + result.close() + assert (result.row_count == 2) + + # error test + lines = ''',t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000''' + try: + ttl = 1000 + res = conn.schemaless_insert_raw(lines, 1, 0, ttl=ttl) + print(res) + # assert(False) + except SchemalessError as err: + print('**** error: ', err) + # assert (err.msg == 'Invalid data format') + + result = conn.query("select * from st") + print(result.row_count) + all = result.rows_iter() + for row in all: + print(row) + result.close() + + conn.execute("drop database if exists %s" % dbname) + except InterfaceError as err: + conn.execute("drop database if exists %s" % dbname) + print(err) + except Exception as err: + conn.execute("drop database if exists %s" % dbname) + conn.close() + print(err) + raise err + + +def schemaless_insert_raw_ttl_with_req_id(conn: TaosConnection) -> None: + dbname = "taos_schemaless_insert" + try: + conn.execute("drop database if exists %s" % dbname) + + if taos.IS_V3: + conn.execute("create database if not exists %s schemaless 1 precision 'ns'" % dbname) + else: + conn.execute("create database if not exists %s update 2 precision 'ns'" % dbname) + + conn.select_db(dbname) + + lines = '''st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000 + st,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin, abc",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000 + stf,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000''' + + ttl = 1000 + req_id = utils.gen_req_id() + res = conn.schemaless_insert_raw(lines, 1, 0, ttl=ttl, req_id=req_id) + print("affected rows: ", res) + assert (res == 3) + + lines = '''stf,t1=5i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000''' + ttl = 1000 + req_id = utils.gen_req_id() + res = conn.schemaless_insert_raw(lines, 1, 0, ttl=ttl, req_id=req_id) + print("affected rows: ", res) + assert (res == 1) + + result = conn.query("select * from st") + dict2 = result.fetch_all_into_dict() + print(dict2) + print(result.row_count) + + all = result.rows_iter() + for row in all: + print(row) + result.close() + assert (result.row_count == 2) + + # error test + lines = ''',t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000''' + try: + ttl = 1000 + req_id = utils.gen_req_id() + res = conn.schemaless_insert_raw(lines, 1, 0, ttl=ttl, req_id=req_id) + print(res) + # assert(False) + except SchemalessError as err: + print('**** error: ', err) + # assert (err.msg == 'Invalid data format') + + result = conn.query("select * from st") + print(result.row_count) + all = result.rows_iter() + for row in all: + print(row) + result.close() + + conn.execute("drop database if exists %s" % dbname) + except InterfaceError as err: + conn.execute("drop database if exists %s" % dbname) + print(err) + except Exception as err: + conn.execute("drop database if exists %s" % dbname) + conn.close() + print(err) + raise err + + +if __name__ == '__main__': + conn = TaosConnection(host="localhost") + schemaless_insert(conn) + schemaless_insert_with_req_id(conn) + schemaless_insert_raw(conn) + schemaless_insert_raw_with_req_id(conn) + schemaless_insert_raw_ttl(conn) + schemaless_insert_raw_ttl_with_req_id(conn) diff --git a/examples/cython/tmq_assignment.py b/examples/cython/tmq_assignment.py new file mode 100644 index 00000000..50cea07a --- /dev/null +++ b/examples/cython/tmq_assignment.py @@ -0,0 +1,67 @@ +#! +from taos._objects import TaosConnection, TaosConsumer +import taos +from taos.tmq import Consumer +import time + + +def prepare(conn): + conn.execute("drop topic if exists tmq_assignment_demo_topic") + conn.execute("drop database if exists tmq_assignment_demo_db") + conn.execute("create database if not exists tmq_assignment_demo_db wal_retention_period 3600") + conn.select_db("tmq_assignment_demo_db") + conn.execute( + "create table if not exists tmq_assignment_demo_table (ts timestamp, c1 int, c2 float, c3 binary(10)) tags(t1 int)") + conn.execute( + "create topic if not exists tmq_assignment_demo_topic as select ts, c1, c2, c3 from tmq_assignment_demo_table") + conn.execute("insert into d0 using tmq_assignment_demo_table tags (0) values (now-2s, 1, 1.0, 'tmq test')") + conn.execute("insert into d0 using tmq_assignment_demo_table tags (0) values (now-1s, 2, 2.0, 'tmq test')") + conn.execute("insert into d0 using tmq_assignment_demo_table tags (0) values (now, 3, 3.0, 'tmq test')") + +def cleanup(conn): + conn.execute("drop topic if exists tmq_assignment_demo_topic") + +def taos_get_assignment_and_seek_demo(): + consumer = TaosConsumer( + { + "group.id": "0", + # should disable snapshot, + # otherwise it will cause invalid params error + 'td.connect.ip': "localhost", + 'td.connect.user': "root", + 'td.connect.pass': "taosdata", + 'td.connect.port': "6030", + # 'td.connect.db', + "experimental.snapshot.enable": "false", + } + ) + consumer.subscribe(["tmq_assignment_demo_topic"]) + + # get topic assignment + assignments = consumer.assignment() + for assignment in assignments: + print(assignment) + + # poll + consumer.poll(1) + consumer.poll(1) + + # get topic assignment again + after_pool_assignments = consumer.assignment() + for assignment in after_pool_assignments: + print(assignment) + + # seek to the beginning + for assignment in assignments: + consumer.seek(assignment) + + # now the assignment should be the same as before poll + assignments = consumer.assignment() + for assignment in assignments: + print(assignment) + +if __name__ == "__main__": + conn = TaosConnection(host="localhost") + prepare(conn) + taos_get_assignment_and_seek_demo() + cleanup(conn) diff --git a/examples/cython/tmq_consumer.py b/examples/cython/tmq_consumer.py new file mode 100644 index 00000000..531ac681 --- /dev/null +++ b/examples/cython/tmq_consumer.py @@ -0,0 +1,47 @@ +from taos._objects import TaosConnection, TaosConsumer + +def init_tmq_env(db, topic): + conn = TaosConnection(host="localhost") + conn.execute("drop topic if exists {}".format(topic)) + conn.execute("drop database if exists {}".format(db)) + conn.execute("create database if not exists {} WAL_RETENTION_PERIOD 3600000 ".format(db)) + conn.select_db(db) + conn.execute("create stable if not exists stb1 (ts timestamp, c1 int, c2 float, c3 binary(10)) tags(t1 int)") + conn.execute("create table if not exists tb1 using stb1 tags(1)") + conn.execute("create table if not exists tb2 using stb1 tags(2)") + conn.execute("create table if not exists tb3 using stb1 tags(3)") + conn.execute("create topic if not exists {} as select ts, c1, c2, c3 from stb1".format(topic)) + conn.execute("insert into tb1 values (now-2s, 1, 1.0, 'tmq test')") + conn.execute("insert into tb2 values (now-1s, 2, 2.0, 'tmq test')") + conn.execute("insert into tb3 values (now, 3, 3.0, 'tmq test')") + + +if __name__ == '__main__': + # init env + init_tmq_env("tmq_test", "tmq_test_topic") + consumer = TaosConsumer( + { + "group.id": "test", + 'td.connect.ip': "localhost", + "td.connect.user": "root", + "td.connect.pass": "taosdata", + "msg.with.table.name": "true", + "enable.auto.commit": "true", + }, + timezone="Asia/Shanghai" + ) + consumer.subscribe(["tmq_test_topic"]) + consumer.set_auto_commit_cb(print, print) + + while True: + res = consumer.poll(10) + if not res: + consumer.close() + break + err = res.error() + if err is not None: + raise err + val = res.value() + + for block in val: + print(block.fetchall()) diff --git a/pyproject.toml b/pyproject.toml index b93b9502..cbbc2f14 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,6 +10,10 @@ packages = [ { include = "taosrest" }, ] +[tool.poetry.build] +generate-setup-file = true +script = "build.py" + [tool.poetry.plugins] # Optional super table [tool.poetry.plugins."sqlalchemy.dialects"] @@ -38,6 +42,7 @@ pytest = [ { version = "^4.6", python = ">=2.7,<3.0" }, { version = "^6.2", python = ">=3.7,<4.0" }, ] +pytest-asyncio = { version = "^0.16.0" } mypy = { version = "^0.910", python = "^3.6" } black = [{ version = ">=21.0", python = ">=3.6.2,<4.0" }] sqlalchemy = { version = "^1.4", python = ">=3.6,<4.0" } @@ -45,7 +50,7 @@ pandas = { version = ">=1.0", python = ">=3.8,<4.0" } python-dotenv = { version = "0.20.0" } [build-system] -requires = ["poetry-core>=1.0.5"] +requires = ["poetry-core>=1.6.1", "Cython>=3.0.0", "setuptools>=62.0.0"] build-backend = "poetry.core.masonry.api" [tool.black] diff --git a/taos/_cinterface.pxd b/taos/_cinterface.pxd new file mode 100644 index 00000000..39fe166f --- /dev/null +++ b/taos/_cinterface.pxd @@ -0,0 +1,180 @@ +from libc.stdint cimport int8_t, int16_t, int32_t, int64_t, uint8_t, uint16_t, uint32_t, uint64_t, uintptr_t + +ctypedef bint bool + +cdef extern from "taos.h": + ctypedef void TAOS + ctypedef void TAOS_STMT + ctypedef void TAOS_RES + ctypedef void **TAOS_ROW + ctypedef struct TAOS_FIELD: + char name[65] + int8_t type + int32_t bytes + ctypedef enum TSDB_OPTION: + TSDB_OPTION_LOCALE + TSDB_OPTION_CHARSET + TSDB_OPTION_TIMEZONE + TSDB_OPTION_CONFIGDIR + TSDB_OPTION_SHELL_ACTIVITY_TIMER + TSDB_OPTION_USE_ADAPTER + TSDB_MAX_OPTIONS + ctypedef enum TSDB_SML_PROTOCOL_TYPE: + TSDB_SML_UNKNOWN_PROTOCOL + TSDB_SML_LINE_PROTOCOL + TSDB_SML_TELNET_PROTOCOL + TSDB_SML_JSON_PROTOCOL + ctypedef enum TSDB_SML_TIMESTAMP_TYPE: + TSDB_SML_TIMESTAMP_NOT_CONFIGURED + TSDB_SML_TIMESTAMP_HOURS + TSDB_SML_TIMESTAMP_MINUTES + TSDB_SML_TIMESTAMP_SECONDS + TSDB_SML_TIMESTAMP_MILLI_SECONDS + TSDB_SML_TIMESTAMP_MICRO_SECONDS + TSDB_SML_TIMESTAMP_NANO_SECONDS + ctypedef struct TAOS_MULTI_BIND: + int buffer_type + void *buffer + uintptr_t buffer_length + int32_t *length + char *is_null + int num + + ctypedef void (*__taos_async_fn_t)(void *param, TAOS_RES *res, int num_of_rows) except * + int TSDB_DATA_TYPE_NULL + int TSDB_DATA_TYPE_BOOL + int TSDB_DATA_TYPE_TINYINT + int TSDB_DATA_TYPE_SMALLINT + int TSDB_DATA_TYPE_INT + int TSDB_DATA_TYPE_BIGINT + int TSDB_DATA_TYPE_FLOAT + int TSDB_DATA_TYPE_DOUBLE + int TSDB_DATA_TYPE_VARCHAR + int TSDB_DATA_TYPE_TIMESTAMP + int TSDB_DATA_TYPE_NCHAR + int TSDB_DATA_TYPE_UTINYINT + int TSDB_DATA_TYPE_USMALLINT + int TSDB_DATA_TYPE_UINT + int TSDB_DATA_TYPE_UBIGINT + int TSDB_DATA_TYPE_JSON + int TSDB_DATA_TYPE_VARBINARY + int TSDB_DATA_TYPE_DECIMAL + int TSDB_DATA_TYPE_BLOB + int TSDB_DATA_TYPE_MEDIUMBLOB + int TSDB_DATA_TYPE_BINARY + int TSDB_DATA_TYPE_GEOMETRY + int TSDB_DATA_TYPE_MAX + int taos_init() + bool taos_is_null(TAOS_RES *res, int32_t row, int32_t col) + TAOS_FIELD *taos_fetch_fields(TAOS_RES *res) + void taos_stop_query(TAOS_RES *res) + int taos_field_count(TAOS_RES *res) + int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows) + int taos_result_precision(TAOS_RES *res) + int *taos_get_column_data_offset(TAOS_RES *res, int columnIndex) + int taos_validate_sql(TAOS *taos, const char *sql) + int taos_errno(TAOS_RES *res) + char *taos_errstr(TAOS_RES *res) + TAOS *taos_connect(const char *ip, const char *user, const char *password, const char *db, uint16_t port) + void taos_close(TAOS *taos) + int taos_options(TSDB_OPTION option, const void *arg, ...) + const char *taos_get_client_info() + const char *taos_get_server_info(TAOS *taos) + int taos_get_current_db(TAOS *taos, char *database, int len, int *required) + int taos_select_db(TAOS *taos, const char *db) + TAOS_RES *taos_query(TAOS *taos, const char *sql) + TAOS_RES *taos_query_with_reqid(TAOS *taos, const char *sql, int64_t reqId) + int taos_affected_rows(TAOS_RES *res) + void taos_free_result(TAOS_RES *res) + TAOS_ROW taos_fetch_row(TAOS_RES *res) + void taos_fetch_rows_a(TAOS_RES *res, __taos_async_fn_t fp, void *param) + void taos_query_a(TAOS *taos, const char *sql, __taos_async_fn_t fp, void *param) + void taos_query_a_with_reqid(TAOS *taos, const char *sql, __taos_async_fn_t fp, void *param, int64_t reqid) + void taos_fetch_raw_block_a(TAOS_RES *res, __taos_async_fn_t fp, void *param) + const void *taos_get_raw_block(TAOS_RES *res) + + TAOS_STMT *taos_stmt_init(TAOS *taos) + int taos_stmt_prepare(TAOS_STMT *stmt, const char *sql, unsigned long length) + int taos_stmt_set_tbname(TAOS_STMT *stmt, const char *name) + int taos_stmt_set_tbname_tags(TAOS_STMT *stmt, const char *name, TAOS_MULTI_BIND *tags) + int taos_stmt_affected_rows(TAOS_STMT *stmt) + TAOS_RES *taos_stmt_use_result(TAOS_STMT *stmt) + int taos_stmt_close(TAOS_STMT *stmt) + int taos_stmt_execute(TAOS_STMT *stmt) + int taos_stmt_add_batch(TAOS_STMT *stmt) + int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind) + int taos_stmt_bind_param_batch(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind) + int taos_get_table_vgId(TAOS *taos, const char *db, const char *table, int *vgId) + int taos_load_table_info(TAOS *taos, const char *tableNameList) + char *taos_stmt_errstr(TAOS_STMT *stmt) + + TAOS_RES *taos_schemaless_insert(TAOS *taos, char *lines[], int numLines, int protocol, int precision) + TAOS_RES *taos_schemaless_insert_with_reqid(TAOS *taos, char *lines[], int numLines, int protocol, int precision, int64_t reqid) + TAOS_RES *taos_schemaless_insert_raw(TAOS *taos, char *lines, int len, int32_t *totalRows, int protocol, int precision) + TAOS_RES *taos_schemaless_insert_raw_with_reqid(TAOS *taos, char *lines, int len, int32_t *totalRows, int protocol, int precision, int64_t reqid) + TAOS_RES *taos_schemaless_insert_ttl(TAOS *taos, char *lines[], int numLines, int protocol, int precision, int32_t ttl) + TAOS_RES *taos_schemaless_insert_ttl_with_reqid(TAOS *taos, char *lines[], int numLines, int protocol, int precision, int32_t ttl, int64_t reqid) + TAOS_RES *taos_schemaless_insert_raw_ttl(TAOS *taos, char *lines, int len, int32_t *totalRows, int protocol, int precision, int32_t ttl) + TAOS_RES *taos_schemaless_insert_raw_ttl_with_reqid(TAOS *taos, char *lines, int len, int32_t *totalRows, int protocol, int precision, int32_t ttl, int64_t reqid) + ctypedef struct tmq_t: + pass + ctypedef struct tmq_conf_t: + pass + ctypedef struct tmq_list_t: + pass + ctypedef void tmq_commit_cb(tmq_t *tmq, int32_t code, void *param) except * + ctypedef enum tmq_conf_res_t: + TMQ_CONF_UNKNOWN + TMQ_CONF_INVALID + TMQ_CONF_OK + + ctypedef struct tmq_topic_assignment: + int32_t vgId + int64_t currentOffset + int64_t begin + int64_t end + + tmq_conf_t *tmq_conf_new() + tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value) + void tmq_conf_destroy(tmq_conf_t *conf) + void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param) + + tmq_list_t *tmq_list_new() + int32_t tmq_list_append(tmq_list_t *, const char *) + void tmq_list_destroy(tmq_list_t *) + int32_t tmq_list_get_size(const tmq_list_t *) + char **tmq_list_to_c_array(const tmq_list_t *) + + tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen) + int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list) + int32_t tmq_unsubscribe(tmq_t *tmq) + int32_t tmq_subscription(tmq_t *tmq, tmq_list_t **topics) + TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout) + int32_t tmq_consumer_close(tmq_t *tmq) + int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg) + int32_t tmq_commit_offset_sync(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset) + void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param) + void tmq_commit_offset_async(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset, tmq_commit_cb *cb, void *param) + int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment,int32_t *numOfAssignment) + void tmq_free_assignment(tmq_topic_assignment* pAssignment) + int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset) + int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId) + int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId) + + const char *tmq_get_topic_name(TAOS_RES *res) + const char *tmq_get_db_name(TAOS_RES *res) + int32_t tmq_get_vgroup_id(TAOS_RES *res) + int64_t tmq_get_vgroup_offset(TAOS_RES* res) + const char *tmq_err2str(int32_t code) + + ctypedef enum tmq_res_t: + TMQ_RES_INVALID + TMQ_RES_DATA + TMQ_RES_TABLE_META + TMQ_RES_METADATA + + const char *tmq_get_table_name(TAOS_RES *res) + tmq_res_t tmq_get_res_type(TAOS_RES *res) + +cdef bool *taos_get_column_data_is_null(TAOS_RES *res, int field, int rows) +cdef taos_fetch_block_v3(TAOS_RES *res, TAOS_FIELD *fields, int field_count, object dt_epoch) diff --git a/taos/_cinterface.pyx b/taos/_cinterface.pyx new file mode 100644 index 00000000..56b26078 --- /dev/null +++ b/taos/_cinterface.pyx @@ -0,0 +1,127 @@ +# cython: profile=True + +from libc.stdlib cimport malloc, free +import cython +import datetime as dt +import pytz +from collections import namedtuple +from taos.error import ProgrammingError, OperationalError, ConnectionError, DatabaseError, StatementError, InternalError +from taos._parser cimport (_parse_bool, _parse_int8_t, _parse_int16_t, _parse_int, _parse_int64_t, _parse_float, _parse_double, _parse_timestamp, + _parse_uint8_t, _parse_uint16_t, _parse_uint, _parse_uint64_t, _parse_string, _parse_bytes, _parse_datetime, _convert_timestamp_to_datetime) +from taos._constants import PrecisionEnum + +cdef bool *taos_get_column_data_is_null(TAOS_RES *res, int field, int rows): + is_null = malloc(rows * sizeof(bool)) + cdef int r + for r in range(rows): + is_null[r] = taos_is_null(res, r, field) + + return is_null + +# --------------------------------------------- parser ---------------------------------------------------------------v + +# --------------------------------------------- parser ---------------------------------------------------------------^ +SIZED_TYPE = { + TSDB_DATA_TYPE_BOOL, + TSDB_DATA_TYPE_TINYINT, + TSDB_DATA_TYPE_SMALLINT, + TSDB_DATA_TYPE_INT, + TSDB_DATA_TYPE_BIGINT, + TSDB_DATA_TYPE_FLOAT, + TSDB_DATA_TYPE_DOUBLE, + TSDB_DATA_TYPE_UTINYINT, + TSDB_DATA_TYPE_USMALLINT, + TSDB_DATA_TYPE_UINT, + TSDB_DATA_TYPE_UBIGINT, + TSDB_DATA_TYPE_TIMESTAMP, +} + +UNSIZED_TYPE = { + TSDB_DATA_TYPE_VARCHAR, + TSDB_DATA_TYPE_NCHAR, + TSDB_DATA_TYPE_JSON, + TSDB_DATA_TYPE_VARBINARY, + TSDB_DATA_TYPE_BINARY, +} + +CONVERT_FUNC = { + TSDB_DATA_TYPE_BOOL: _parse_bool, + TSDB_DATA_TYPE_TINYINT: _parse_int8_t, + TSDB_DATA_TYPE_SMALLINT: _parse_int16_t, + TSDB_DATA_TYPE_INT: _parse_int, + TSDB_DATA_TYPE_BIGINT: _parse_int64_t, + TSDB_DATA_TYPE_FLOAT: _parse_float, + TSDB_DATA_TYPE_DOUBLE: _parse_double, + TSDB_DATA_TYPE_VARCHAR: _parse_string, + TSDB_DATA_TYPE_TIMESTAMP: _parse_timestamp, + TSDB_DATA_TYPE_NCHAR: _parse_string, + TSDB_DATA_TYPE_UTINYINT: _parse_uint8_t, + TSDB_DATA_TYPE_USMALLINT: _parse_uint16_t, + TSDB_DATA_TYPE_UINT: _parse_uint, + TSDB_DATA_TYPE_UBIGINT: _parse_uint64_t, + TSDB_DATA_TYPE_JSON: _parse_string, + TSDB_DATA_TYPE_VARBINARY: _parse_bytes, + TSDB_DATA_TYPE_DECIMAL: None, + TSDB_DATA_TYPE_BLOB: None, + TSDB_DATA_TYPE_MEDIUMBLOB: None, + TSDB_DATA_TYPE_BINARY: _parse_string, + TSDB_DATA_TYPE_GEOMETRY: None, +} + + +cdef taos_fetch_block_v3(TAOS_RES *res, TAOS_FIELD *fields, int field_count, dt_epoch): + cdef TAOS_ROW pblock + num_of_rows = taos_fetch_block(res, &pblock) + if num_of_rows == 0: + return [], 0 + + precision = taos_result_precision(res) + blocks = [None] * field_count + + cdef int i + for i in range(field_count): + data = pblock[i] + field = fields[i] + + if field.type in SIZED_TYPE: + is_null = taos_get_column_data_is_null(res, i, num_of_rows) + blocks[i] = CONVERT_FUNC[field.type](data, num_of_rows, is_null) + free(is_null) + elif field.type in UNSIZED_TYPE: + offsets = taos_get_column_data_offset(res, i) + blocks[i] = CONVERT_FUNC[field.type](data, num_of_rows, offsets) + else: + pass + + if field.type == TSDB_DATA_TYPE_TIMESTAMP and precision in (PrecisionEnum.Milliseconds, PrecisionEnum.Microseconds): + blocks[i] = _convert_timestamp_to_datetime(blocks[i], precision, dt_epoch) + + return blocks, abs(num_of_rows) + +cpdef fetch_all(size_t ptr, dt_epoch): + res = ptr + fields = taos_fetch_fields(res) + field_count = taos_field_count(res) + + chunks = [] + cdef int row_count = 0 + while True: + block, num_of_rows = taos_fetch_block_v3(res, fields, field_count, dt_epoch) + errno = taos_errno(res) + + if errno != 0: + raise ProgrammingError(taos_errstr(res), errno) + + if num_of_rows == 0: + break + + row_count += num_of_rows + chunks.append(block) + + return [row for chunk in chunks for row in map(tuple, zip(*chunk))] + + +# ---------------------------------------- Taos Object --------------------------------------------------------------- v + + +# ---------------------------------------- Taos Object --------------------------------------------------------------- ^ diff --git a/taos/_constants.pxd b/taos/_constants.pxd new file mode 100644 index 00000000..17ba6876 --- /dev/null +++ b/taos/_constants.pxd @@ -0,0 +1,8 @@ +cdef class TaosOption: + """taos option""" + +cdef class SmlPrecision: + """Schemaless timestamp precision constants""" + +cdef class SmlProtocol: + """Schemaless protocol constants""" diff --git a/taos/_constants.pyx b/taos/_constants.pyx new file mode 100644 index 00000000..56435f6a --- /dev/null +++ b/taos/_constants.pyx @@ -0,0 +1,82 @@ +from taos._cinterface cimport TSDB_OPTION, TSDB_SML_PROTOCOL_TYPE, TSDB_SML_TIMESTAMP_TYPE, tmq_conf_res_t, tmq_res_t + +cdef class TaosOption: + """taos option""" + Locale = TSDB_OPTION.TSDB_OPTION_LOCALE + Charset = TSDB_OPTION.TSDB_OPTION_CHARSET + Timezone = TSDB_OPTION.TSDB_OPTION_TIMEZONE + ConfigDir = TSDB_OPTION.TSDB_OPTION_CONFIGDIR + ShellActivityTimer = TSDB_OPTION.TSDB_OPTION_SHELL_ACTIVITY_TIMER + UseAdapter = TSDB_OPTION.TSDB_OPTION_USE_ADAPTER + MaxOptions = TSDB_OPTION.TSDB_MAX_OPTIONS + +cdef class SmlPrecision: + """Schemaless timestamp precision constants""" + NOT_CONFIGURED = TSDB_SML_TIMESTAMP_TYPE.TSDB_SML_TIMESTAMP_NOT_CONFIGURED + HOURS = TSDB_SML_TIMESTAMP_TYPE.TSDB_SML_TIMESTAMP_HOURS + MINUTES = TSDB_SML_TIMESTAMP_TYPE.TSDB_SML_TIMESTAMP_MINUTES + SECONDS = TSDB_SML_TIMESTAMP_TYPE.TSDB_SML_TIMESTAMP_SECONDS + MILLI_SECONDS = TSDB_SML_TIMESTAMP_TYPE.TSDB_SML_TIMESTAMP_MILLI_SECONDS + MICRO_SECONDS = TSDB_SML_TIMESTAMP_TYPE.TSDB_SML_TIMESTAMP_MICRO_SECONDS + NANO_SECONDS = TSDB_SML_TIMESTAMP_TYPE.TSDB_SML_TIMESTAMP_NANO_SECONDS + +cdef class SmlProtocol: + """Schemaless protocol constants""" + UNKNOWN_PROTOCOL = TSDB_SML_PROTOCOL_TYPE.TSDB_SML_UNKNOWN_PROTOCOL + LINE_PROTOCOL = TSDB_SML_PROTOCOL_TYPE.TSDB_SML_LINE_PROTOCOL + TELNET_PROTOCOL = TSDB_SML_PROTOCOL_TYPE.TSDB_SML_TELNET_PROTOCOL + JSON_PROTOCOL = TSDB_SML_PROTOCOL_TYPE.TSDB_SML_JSON_PROTOCOL + +cdef class TmqResultType: + INVALID = tmq_res_t.TMQ_RES_INVALID + DATA = tmq_res_t.TMQ_RES_DATA + TABLE_META = tmq_res_t.TMQ_RES_TABLE_META + METADATA = tmq_res_t.TMQ_RES_METADATA + +class PrecisionEnum: + """Precision enums""" + Milliseconds = 0 + Microseconds = 1 + Nanoseconds = 2 + +class FieldType: + """TDengine Field Types""" + + # type_code + C_NULL = 0 + C_BOOL = 1 + C_TINYINT = 2 + C_SMALLINT = 3 + C_INT = 4 + C_BIGINT = 5 + C_FLOAT = 6 + C_DOUBLE = 7 + C_VARCHAR = 8 + C_BINARY = 8 + C_TIMESTAMP = 9 + C_NCHAR = 10 + C_TINYINT_UNSIGNED = 11 + C_SMALLINT_UNSIGNED = 12 + C_INT_UNSIGNED = 13 + C_BIGINT_UNSIGNED = 14 + C_JSON = 15 + C_VARBINARY = 16 + # NULL value definition + # NOTE: These values should change according to C definition in tsdb.h + C_BOOL_NULL = 0x02 + C_TINYINT_NULL = -128 + C_TINYINT_UNSIGNED_NULL = 255 + C_SMALLINT_NULL = -32768 + C_SMALLINT_UNSIGNED_NULL = 65535 + C_INT_NULL = -2147483648 + C_INT_UNSIGNED_NULL = 4294967295 + C_BIGINT_NULL = -9223372036854775808 + C_BIGINT_UNSIGNED_NULL = 18446744073709551615 + C_FLOAT_NULL = float('nan') + C_DOUBLE_NULL = float('nan') + C_BINARY_NULL = bytearray([int("0xff", 16)]) + # Timestamp precision definition + C_TIMESTAMP_MILLI = 0 + C_TIMESTAMP_MICRO = 1 + C_TIMESTAMP_NANO = 2 + C_TIMESTAMP_UNKNOWN = 3 \ No newline at end of file diff --git a/taos/_objects.pyx b/taos/_objects.pyx new file mode 100644 index 00000000..0611593c --- /dev/null +++ b/taos/_objects.pyx @@ -0,0 +1,2177 @@ +# cython: profile=True + +from libc.stdlib cimport malloc, free +from libc.string cimport memset, strcpy, memcpy +import asyncio +import datetime as dt +import pytz +from typing import Optional, List, Tuple, Dict, Iterator, AsyncIterator, Callable, Union +from taos._cinterface cimport * +from taos._cinterface import SIZED_TYPE, UNSIZED_TYPE, CONVERT_FUNC +from taos._parser cimport _convert_timestamp_to_datetime +from taos._constants import TaosOption, SmlPrecision, SmlProtocol, TmqResultType, PrecisionEnum +from taos.error import ProgrammingError, OperationalError, ConnectionError, DatabaseError, StatementError, InternalError, TmqError, SchemalessError +from taos.constants import FieldType + +DB_MAX_LEN = 64 +DEFAULT_TZ = None +DEFAULT_DT_EPOCH = dt.datetime.fromtimestamp(0, tz=DEFAULT_TZ) + + +cdef _check_malloc(void *ptr): + if ptr is NULL: + raise MemoryError() + + +# ---------------------------------------------- TAOS -------------------------------------------------------------- v +cdef void async_result_future_wrapper(void *param, TAOS_RES *res, int code) with gil: + fut = param + if code != 0: + errstr = taos_errstr(res).decode("utf-8") + e = ProgrammingError(errstr, code) + fut.get_loop().call_soon_threadsafe(fut.set_exception, e) + else: + taos_result = TaosResult(res) + fut.get_loop().call_soon_threadsafe(fut.set_result, taos_result) + + +cdef void async_rows_future_wrapper(void *param, TAOS_RES *res, int num_of_rows) with gil: + cdef int i = 0 + taos_result, fut = param + rows = [] + if num_of_rows > 0: + while True: + row = taos_result._fetch_row() + rows.append(row) + i += 1 + if i >= num_of_rows: + break + + fut.get_loop().call_soon_threadsafe(fut.set_result, rows) + + +cdef void async_block_future_wrapper(void *param, TAOS_RES *res, int num_of_rows) with gil: + cdef int i = 0 + taos_result, fut = param + if num_of_rows > 0: + block, n = taos_result._fetch_block() + else: + block, n = [], 0 + + fut.get_loop().call_soon_threadsafe(fut.set_result, (block, n)) + + +cdef class TaosConnection: + cdef char *_host + cdef char *_user + cdef char *_password + cdef char *_database + cdef uint16_t _port + # cdef char *_raw_tz # can not name as _timezone + cdef char *_config + cdef TAOS *_raw_conn + cdef char *_current_db + cdef object _tz + cdef object _dt_epoch + + def __cinit__(self, *args, **kwargs): + self._current_db = malloc(DB_MAX_LEN * sizeof(char)) + self._tz = DEFAULT_TZ + self._dt_epoch = DEFAULT_DT_EPOCH + _check_malloc(self._current_db) + self._init_options(**kwargs) + self._init_conn(**kwargs) + self._check_conn_error() + + def _init_options(self, **kwargs): + if "timezone" in kwargs: + _timezone = kwargs["timezone"].encode("utf-8") + taos_options(TaosOption.Timezone, _timezone) + self.tz = kwargs["timezone"] + + if "config" in kwargs: + _config = kwargs["config"].encode("utf-8") + self._config = _config + taos_options(TaosOption.ConfigDir, self._config) + + def _init_conn(self, **kwargs): + if "host" in kwargs: + _host = kwargs["host"].encode("utf-8") + self._host = _host + + if "user" in kwargs: + _user = kwargs["user"].encode("utf-8") + self._user = _user + + if "password" in kwargs: + _password = kwargs["password"].encode("utf-8") + self._password = _password + + if "database" in kwargs: + _database = kwargs["database"].encode("utf-8") + self._database = _database + + if "port" in kwargs: + self._port = int(kwargs.get("port", 0)) + + self._raw_conn = taos_connect(self._host, self._user, self._password, self._database, self._port) + + def _check_conn_error(self): + errno = taos_errno(self._raw_conn) + if errno != 0: + errstr = taos_errstr(self._raw_conn).decode("utf-8") + raise ConnectionError(errstr, errno) + + def __dealloc__(self): + if self._current_db is not NULL: + free(self._current_db) + + self.close() + + @property + def client_info(self) -> str: + return taos_get_client_info().decode("utf-8") + + @property + def server_info(self) -> Optional[str]: + if self._raw_conn is NULL: + return + + return taos_get_server_info(self._raw_conn).decode("utf-8") + + @property + def tz(self) -> Optional[dt.tzinfo]: + return self._tz + + @tz.setter + def tz(self, timezone: Optional[Union[str, dt.tzinfo]]): + if isinstance(timezone, str): + timezone = pytz.timezone(timezone) + + assert timezone is None or isinstance(timezone, dt.tzinfo) + + self._tz = timezone + self._dt_epoch = dt.datetime.fromtimestamp(0, tz=self._tz) + + @property + def current_db(self) -> Optional[str]: + if self._raw_conn is NULL: + return + + memset(self._current_db, 0, DB_MAX_LEN * sizeof(char)) + cdef int required + errno = taos_get_current_db(self._raw_conn, self._current_db, DB_MAX_LEN * sizeof(char), &required) + if errno != 0: + errstr = taos_errstr(NULL).decode("utf-8") + raise ProgrammingError(errstr, errno) + + return self._current_db.decode("utf-8") + + def validate_sql(self, str sql) -> bool: + _sql = sql.encode("utf-8") + errno = taos_validate_sql(self._raw_conn, _sql) + return errno == 0 + + def select_db(self, str database): + _database = database.encode("utf-8") + errno = taos_select_db(self._raw_conn, _database) + if errno != 0: + raise DatabaseError("select database error", errno) + + def execute(self, str sql, req_id: Optional[int] = None) -> int: + return self.query(sql, req_id).affected_rows + + def query(self, str sql, req_id: Optional[int] = None) -> TaosResult: + _sql = sql.encode("utf-8") + if req_id is None: + res = taos_query(self._raw_conn, _sql) + else: + res = taos_query_with_reqid(self._raw_conn, _sql, req_id) + + errno = taos_errno(res) + if errno != 0: + errstr = taos_errstr(res).decode("utf-8") + taos_free_result(res) + raise ProgrammingError(errstr, errno) + + taos_res = TaosResult(res) + taos_res._set_dt_epoch(self._dt_epoch) + return taos_res + + async def query_a(self, str sql, req_id: Optional[int] = None) -> TaosResult: + loop = asyncio.get_event_loop() + _sql = sql.encode("utf-8") + fut = loop.create_future() + if req_id is None: + taos_query_a(self._raw_conn, _sql, async_result_future_wrapper, fut) + else: + taos_query_a_with_reqid(self._raw_conn, _sql, async_result_future_wrapper, fut, req_id) + + taos_res = await fut + taos_res._set_dt_epoch(self._dt_epoch) + taos_res._mark_async() + return taos_res + + def statement(self, sql: Optional[str]=None) -> Optional[TaosStmt]: + if self._raw_conn is NULL: + return None + + stmt = taos_stmt_init(self._raw_conn) + if stmt is NULL: + raise StatementError("init taos statement failed!") + + if sql: + _sql = sql.encode("utf-8") + errno = taos_stmt_prepare(stmt, _sql, len(_sql)) + if errno != 0: + stmt_errstr = taos_stmt_errstr(stmt).decode("utf-8") + raise StatementError(stmt_errstr, errno) + + return TaosStmt(stmt) + + def load_table_info(self, tables: List[str]): + _tables = ",".join(tables).encode("utf-8") + errno = taos_load_table_info(self._raw_conn, _tables) + if errno != 0: + errstr = taos_errstr(NULL).decode("utf-8") + raise OperationalError(errstr, errno) + + def close(self): + if self._raw_conn is not NULL: + taos_close(self._raw_conn) + self._raw_conn = NULL + + def schemaless_insert( + self, + lines: List[str], + protocol: int, + precision: int, + req_id: Optional[int] = None, + ttl: Optional[int] = None, + ) -> int: + """ + 1.Line protocol and schemaless support + + ## Example + + ```python + import taos + conn = taos.connect() + conn.exec("drop database if exists test") + conn.select_db("test") + lines = [ + 'ste,t2=5,t3=L"ste" c1=true,c2=4,c3="string" 1626056811855516532', + ] + conn.schemaless_insert(lines, 0, "ns") + ``` + + 2.OpenTSDB telnet style API format support + + ## Example + + ```python + import taos + conn = taos.connect() + conn.exec("drop database if exists test") + conn.select_db("test") + lines = [ + 'cpu_load 1626056811855516532ns 2.0f32 id="tb1",host="host0",interface="eth0"', + ] + conn.schemaless_insert(lines, 1, None) + ``` + + 3.OpenTSDB HTTP JSON format support + + ## Example + + ```python + import taos + conn = taos.connect() + conn.exec("drop database if exists test") + conn.select_db("test") + payload = [''' + { + "metric": "cpu_load_0", + "timestamp": 1626006833610123, + "value": 55.5, + "tags": + { + "host": "ubuntu", + "interface": "eth0", + "Id": "tb0" + } + } + '''] + conn.schemaless_insert(lines, 2, None) + ``` + """ + num_of_lines = len(lines) + _lines = malloc(num_of_lines * sizeof(char*)) + if _lines is NULL: + raise MemoryError() + + try: + for i in range(num_of_lines): + _line = lines[i].encode("utf-8") + _lines[i] = _line + + if ttl is None: + if req_id is None: + res = taos_schemaless_insert( + self._raw_conn, + _lines, + num_of_lines, + protocol, + precision, + ) + else: + res = taos_schemaless_insert_with_reqid( + self._raw_conn, + _lines, + num_of_lines, + protocol, + precision, + req_id, + ) + else: + if req_id is None: + res = taos_schemaless_insert_ttl( + self._raw_conn, + _lines, + num_of_lines, + protocol, + precision, + ttl, + ) + else: + res = taos_schemaless_insert_ttl_with_reqid( + self._raw_conn, + _lines, + num_of_lines, + protocol, + precision, + ttl, + req_id, + ) + finally: + free(_lines) + + errno = taos_errno(res) + affected_rows = taos_affected_rows(res) + if errno != 0: + errstr = taos_errstr(res).decode("utf-8") + taos_free_result(res) + raise SchemalessError(errstr, errno, affected_rows) + + return affected_rows + + def schemaless_insert_raw( + self, + lines: str, + protocol: int, + precision: int, + req_id: Optional[int] = None, + ttl: Optional[int] = None, + ) -> int: + """ + 1.Line protocol and schemaless support + + ## Example + + ```python + import taos + conn = taos.connect() + conn.exec("drop database if exists test") + conn.select_db("test") + lines = 'ste,t2=5,t3=L"ste" c1=true,c2=4,c3="string" 1626056811855516532' + conn.schemaless_insert_raw(lines, 0, "ns") + ``` + + 2.OpenTSDB telnet style API format support + + ## Example + + ```python + import taos + conn = taos.connect() + conn.exec("drop database if exists test") + conn.select_db("test") + lines = 'cpu_load 1626056811855516532ns 2.0f32 id="tb1",host="host0",interface="eth0"' + conn.schemaless_insert_raw(lines, 1, None) + ``` + + 3.OpenTSDB HTTP JSON format support + + ## Example + + ```python + import taos + conn = taos.connect() + conn.exec("drop database if exists test") + conn.select_db("test") + payload = ''' + { + "metric": "cpu_load_0", + "timestamp": 1626006833610123, + "value": 55.5, + "tags": + { + "host": "ubuntu", + "interface": "eth0", + "Id": "tb0" + } + } + ''' + conn.schemaless_insert_raw(lines, 2, None) + ``` + """ + cdef int32_t total_rows + _lines = lines.encode("utf-8") + _length = len(_lines) + + if ttl is None: + if req_id is None: + res = taos_schemaless_insert_raw( + self._raw_conn, + _lines, + _length, + &total_rows, + protocol, + precision, + ) + else: + res = taos_schemaless_insert_raw_with_reqid( + self._raw_conn, + _lines, + _length, + &total_rows, + protocol, + precision, + req_id, + ) + else: + if req_id is None: + res = taos_schemaless_insert_raw_ttl( + self._raw_conn, + _lines, + _length, + &total_rows, + protocol, + precision, + ttl, + ) + else: + res = taos_schemaless_insert_raw_ttl_with_reqid( + self._raw_conn, + _lines, + _length, + &total_rows, + protocol, + precision, + ttl, + req_id, + ) + + errno = taos_errno(res) + affected_rows = taos_affected_rows(res) + if errno != 0: + errstr = taos_errstr(res).decode("utf-8") + taos_free_result(res) + raise SchemalessError(errstr, errno, affected_rows) + + return affected_rows + + def commit(self): + """ + Commit any pending transaction to the database. + Since TDengine do not support transactions, the implement is void functionality. + """ + pass + + def rollback(self): + """Void functionality""" + pass + + def cursor(self) -> TaosCursor: + return TaosCursor(self) + + def clear_result_set(self): + """Clear unused result set on this connection.""" + pass + + def get_table_vgroup_id(self, str db, str table) -> int: + """ + get table's vgroup id. It's require db name and table name, and return an int type vgroup id. + """ + cdef int vg_id + _db = db.encode("utf-8") + _table = table.encode("utf-8") + errno = taos_get_table_vgId(self._raw_conn, _db, _table, &vg_id) + if errno != 0: + errstr = taos_errstr(NULL).decode("utf-8") + raise InternalError(errstr, errno) + return vg_id + + +cdef class TaosField: + cdef str _name + cdef int8_t _type + cdef int32_t _bytes + + def __cinit__(self, str name, int8_t type_, int32_t bytes): + self._name = name + self._type = type_ + self._bytes = bytes + + @property + def name(self): + return self._name + + @property + def type(self): + return self._type + + @property + def bytes(self): + return self._bytes + + @property + def length(self): + return self._bytes + + def __str__(self): + return "TaosField{name: %s, type: %d, bytes: %d}" % (self.name, self.type, self.bytes) + + def __repr__(self): + return "TaosField{name: %s, type: %d, bytes: %d}" % (self.name, self.type, self.bytes) + + def __getitem__(self, item): + return getattr(self, item) + + +cdef class TaosResult: + """TDengine result interface""" + cdef TAOS_RES *_res + cdef TAOS_FIELD *_fields + cdef list _taos_fields + cdef int _field_count + cdef int _precision + cdef int _row_count + cdef int _affected_rows + cdef object _dt_epoch + cdef bool _is_async + + def __cinit__(self, size_t res): + self._res = res + self._check_result_error() + self._field_count = taos_field_count(self._res) + self._fields = taos_fetch_fields(self._res) + self._taos_fields = [TaosField(f.name.decode("utf-8"), f.type, f.bytes) for f in self._fields[:self._field_count]] + self._precision = taos_result_precision(self._res) + self._affected_rows = taos_affected_rows(self._res) + self._row_count = self._affected_rows if self._field_count == 0 else 0 + self._dt_epoch = DEFAULT_DT_EPOCH + self._is_async = False + + def __str__(self): + return "TaosResult(field_count=%d, precision=%d, affected_rows=%d, row_count=%d)" % (self._field_count, self._precision, self._affected_rows, self._row_count) + + def __repr__(self): + return "TaosResult(field_count=%d, precision=%d, affected_rows=%d, row_count=%d)" % (self._field_count, self._precision, self._affected_rows, self._row_count) + + def __iter__(self): + return self.rows_iter() + + def __aiter__(self): + return self.rows_iter_a() + + @property + def fields(self) -> List[TaosField]: + """fields definitions of the current result""" + return self._taos_fields + + @property + def field_count(self): + """the fields count of the current result""" + return self._field_count + + @property + def precision(self): + """the precision of the current result""" + return self._precision + + @property + def affected_rows(self): + """the affected_rows of the current result""" + return self._affected_rows + + @property + def row_count(self): + """the row_count of the object""" + return self._row_count + + def _set_dt_epoch(self, dt_epoch): + self._dt_epoch = dt_epoch + + def _mark_async(self): + self._is_async = True + + def _check_result_error(self): + errno = taos_errno(self._res) + if errno != 0: + errstr = taos_errstr(self._res).decode("utf-8") + raise ProgrammingError(errstr, errno) + + def _fetch_block(self) -> Tuple[List, int]: + cdef TAOS_ROW pblock + num_of_rows = taos_fetch_block(self._res, &pblock) + if num_of_rows == 0: + return [], 0 + + block = [None] * self._field_count + cdef int i + for i in range(self._field_count): + data = pblock[i] + field = self._fields[i] + + if field.type in SIZED_TYPE: + is_null = taos_get_column_data_is_null(self._res, i, num_of_rows) + block[i] = CONVERT_FUNC[field.type](data, num_of_rows, is_null) + free(is_null) + elif field.type in UNSIZED_TYPE: + offsets = taos_get_column_data_offset(self._res, i) + block[i] = CONVERT_FUNC[field.type](data, num_of_rows, offsets) + else: + pass + + if field.type == TSDB_DATA_TYPE_TIMESTAMP and self._precision in (PrecisionEnum.Milliseconds, PrecisionEnum.Microseconds): + block[i] = _convert_timestamp_to_datetime(block[i], self._precision, self._dt_epoch) + + self._row_count += num_of_rows + return block, abs(num_of_rows) + + def _fetch_row(self) -> Optional[Tuple]: + cdef TAOS_ROW taos_row + cdef int i + cdef int[1] offsets = [-2] + cdef bool[1] is_null = [False] + + taos_row = taos_fetch_row(self._res) + if taos_row is NULL: + return None + + row = [None] * self._field_count + for i in range(self._field_count): + data = taos_row[i] + field = self._fields[i] + if field.type in SIZED_TYPE: + row[i] = CONVERT_FUNC[field.type](data, 1, is_null)[0] if data is not NULL else None + elif field.type in UNSIZED_TYPE: + row[i] = CONVERT_FUNC[field.type](data, 1, offsets)[0] if data is not NULL else None # FIXME: is it ok to set offsets = [-2] here + else: + pass + + if field.type == TSDB_DATA_TYPE_TIMESTAMP and self._precision in (PrecisionEnum.Milliseconds, PrecisionEnum.Microseconds): + row[i] = _convert_timestamp_to_datetime([row[i]], self._precision, self._dt_epoch)[0] + + self._row_count += 1 + return tuple(row) # TODO: list -> tuple, too much object is create here + + async def _fetch_block_a(self) -> Tuple[List, int]: + loop = asyncio.get_event_loop() + fut = loop.create_future() + param = (self, fut) + taos_fetch_rows_a(self._res, async_block_future_wrapper, param) + # taos_fetch_raw_block_a(self._res, async_block_future_wrapper, param) # FIXME: have some problem when parsing nchar + + block, num_of_rows = await fut + return block, num_of_rows + + async def _fetch_rows_a(self) -> List[Tuple]: + loop = asyncio.get_event_loop() + fut = loop.create_future() + param = (self, fut) + taos_fetch_rows_a(self._res, async_rows_future_wrapper, param) + rows = await fut + return rows + + def fetch_block(self) -> Tuple[List, int]: + if self._res is NULL: + raise OperationalError("Invalid use of fetch_block") + + if self._is_async: + raise OperationalError("Invalid use of fetch_block, async result can not use sync func") + + block, num_of_rows = self._fetch_block() + self._check_result_error() + + return [r for r in map(tuple, zip(*block))], num_of_rows + + async def fetch_block_a(self) -> Tuple[List, int]: + if self._res is NULL: + raise OperationalError("Invalid use of fetch_block_a") + + if not self._is_async: + raise OperationalError("Invalid use of fetch_block_a, sync result can not use async func") + + block, num_of_rows = await self._fetch_block_a() + self._check_result_error() + + return [r for r in map(tuple, zip(*block))], num_of_rows + + def fetch_all(self) -> List[Tuple]: + """ + fetch all data from taos result into python list + using `taos_fetch_block` + """ + if self._res is NULL: + raise OperationalError("Invalid use of fetchall") + + if self._is_async: + raise OperationalError("Invalid use of fetch_all, async result can not use sync func") + + blocks = [] + while True: + block, num_of_rows = self._fetch_block() + + if num_of_rows == 0: + break + + blocks.append(block) + self._check_result_error() + + return [r for b in blocks for r in map(tuple, zip(*b))] + + async def fetch_all_a(self) -> List[Tuple]: + """ + async fetch all data from taos result into python list + using `taos_fetch_block` + """ + if self._res is NULL: + raise OperationalError("Invalid use of fetch_all_a") + + if not self._is_async: + raise OperationalError("Invalid use of fetch_all_a, sync result can not use async func") + + blocks = [] + while True: + block, num_of_rows = await self._fetch_block_a() + + if num_of_rows == 0: + break + + blocks.append(block) + self._check_result_error() + + return [r for b in blocks for r in map(tuple, zip(*b))] + + def fetch_all_into_dict(self) -> List[Dict]: + """ + fetch all data from taos result into python dict + using `taos_fetch_block` + """ + if self._res is NULL: + raise OperationalError("Invalid use of fetch_all_into_dict") + + if self._is_async: + raise OperationalError("Invalid use of fetch_all_into_dict, async result can not use sync func") + + field_names = [field.name for field in self.fields] + blocks = [] + while True: + block, num_of_rows = self._fetch_block() + + if num_of_rows == 0: + break + + blocks.append(block) + self._check_result_error() + return [dict((f, v) for f, v in zip(field_names, r)) for b in blocks for r in map(tuple, zip(*b))] + + async def fetch_all_into_dict_a(self) -> List[Dict]: + """ + async fetch all data from taos result into python dict + using `taos_fetch_block` + """ + if self._res is NULL: + raise OperationalError("Invalid use of fetch_all_into_dict_a") + + if not self._is_async: + raise OperationalError("Invalid use of fetch_all_into_dict_a, sync result can not use async func") + + field_names = [field.name for field in self.fields] + blocks = [] + while True: + block, num_of_rows = await self._fetch_block_a() + + if num_of_rows == 0: + break + + blocks.append(block) + self._check_result_error() + + return [dict((f, v) for f, v in zip(field_names, r)) for b in blocks for r in map(tuple, zip(*b))] + + def rows_iter(self) -> Iterator[Tuple]: + """ + Iterate row from taos result into python list + """ + if self._res is NULL: + raise OperationalError("Invalid use of rows_iter") + + if self._is_async: + raise OperationalError("Invalid use of rows_iter, async result can not use sync func") + + while True: + row = self._fetch_row() + + if row is None: + break + + yield row + self._check_result_error() + + def blocks_iter(self) -> Iterator[Tuple[List[Tuple], int]]: + if self._res is NULL: + raise OperationalError("Invalid use of rows_iter") + + if self._is_async: + raise OperationalError("Invalid use of rows_iter, async result can not use sync func") + + while True: + block, num_of_rows = self._fetch_block() + + if num_of_rows == 0: + break + + yield [r for r in map(tuple, zip(*block))], num_of_rows + self._check_result_error() + + async def rows_iter_a(self) -> AsyncIterator[Tuple]: + if self._res is NULL: + raise OperationalError("Invalid use of rows_iter_a") + + if not self._is_async: + raise OperationalError("Invalid use of rows_iter_a, sync result can not use async func") + + while True: + rows = await self._fetch_rows_a() + + if not rows: + break + + for row in rows: + yield row + self._check_result_error() + + async def blocks_iter_a(self) -> AsyncIterator[Tuple[List[Tuple], int]]: + if self._res is NULL: + raise OperationalError("Invalid use of blocks_iter_a") + + if not self._is_async: + raise OperationalError("Invalid use of blocks_iter_a, sync result can not use async func") + + while True: + block, num_of_rows = await self._fetch_block_a() + + if num_of_rows == 0: + break + + yield [r for r in map(tuple, zip(*block))], num_of_rows + self._check_result_error() + + def stop_query(self): + if self._res is not NULL: + taos_stop_query(self._res) + + def close(self): + if self._res is not NULL: + taos_free_result(self._res) + + self._res = NULL + self._field_count + self._fields = NULL + self._taos_fields = [] + + def __dealloc__(self): + self.close() + + +cdef class TaosCursor: + """Database cursor which is used to manage the context of a fetch operation. + + Attributes: + .description: Read-only attribute consists of 7-item sequences: + + > name (mandatory) + > type_code (mandatory) + > display_size + > internal_size + > precision + > scale + > null_ok + + This attribute will be None for operations that do not return rows or + if the cursor has not had an operation invoked via the .execute*() method yet. + + .rowcount:This read-only attribute specifies the number of rows that the last + .execute*() produced (for DQL statements like SELECT) or affected + """ + cdef list _description + cdef TaosConnection _connection + cdef TaosResult _result + + def __init__(self, connection: Optional[TaosConnection]=None): + self._description = [] + self._connection = connection + self._result = None + + def __iter__(self) -> Iterator[Tuple]: + for block, _ in self._result.blocks_iter(): + for row in block: + yield row + + @property + def description(self) -> List[Tuple]: + return self._description + + @property + def rowcount(self) -> int: + """ + For INSERT statement, rowcount is assigned immediately after execute the statement. + For SELECT statement, rowcount will not get correct value until fetched all data. + """ + return self._result.row_count + + @property + def affected_rows(self) -> int: + """Return the rowcount of insertion""" + return self._result.affected_rows + + def callproc(self, procname, *args): + """ + Call a stored database procedure with the given name. + Void functionality since no stored procedures. + """ + pass + + def close(self): + """Close the cursor.""" + if self._connection is None: + return False + + self._reset_result() + self._connection = None + + return True + + def execute(self, operation, params=None, req_id: Optional[int] = None): + """Prepare and execute a database operation (query or command).""" + if not operation: + return + + if not self._connection: + raise ProgrammingError("Cursor is not connected") + + self._reset_result() + sql = operation + self._result = self._connection.query(sql, req_id) + self._description = [(f.name, f.type, None, None, None, None, False) for f in self._result.fields] + + if self._result.field_count == 0: + return self.affected_rows + else: + return self._result + + def executemany(self, operation, data_list, req_id: Optional[int] = None) -> int: + """ + Prepare a database operation (query or command) and then execute it against all parameter sequences or mappings + found in the sequence seq_of_parameters. + """ + sql = operation + flag = True + affected_rows = 0 + for line in data_list: + if isinstance(line, dict): + flag = False + affected_rows += self.execute(sql.format(**line), req_id=req_id) + elif isinstance(line, list): + sql += f' {tuple(line)} ' + elif isinstance(line, tuple): + sql += f' {line} ' + if flag: + affected_rows += self.execute(sql, req_id=req_id) + return affected_rows + + def fetchone(self) -> Optional[Tuple]: + try: + row = next(self) + except StopIteration: + row = None + return row + + def fetchmany(self, size=None) -> List[Tuple]: + cdef int i = 0 + size = size or 1 + rows = [] + for row in self: + rows.append(row) + i += 1 + if i >= size: + break + + return rows + + def istype(self, col: int, data_type: str): + ft_name = "".join(["C ", data_type.upper()]).replace(" ", "_") + return self._description[col][1] == getattr(FieldType, ft_name) + + def fetchall(self) -> List[Tuple]: + return [r for r in self] + + def stop_query(self): + self._result.stop_query() + + def nextset(self): + pass + + def setinputsize(self, sizes): + pass + + def setutputsize(self, size, column=None): + pass + + def _reset_result(self): + """Reset the result to unused version.""" + self._description = [] + self._result = None + + def __del__(self): + self.close() + +# ---------------------------------------------- TAOS -------------------------------------------------------------- v + + +# ---------------------------------------------- TMQ --------------------------------------------------------------- v +cdef void async_commit_future_wrapper(tmq_t *tmq, int32_t code, void *param) with gil: + fut = param + fut.get_loop().call_soon_threadsafe(fut.set_result, code) + + +cdef void tmq_auto_commit_wrapper(tmq_t *tmq, int32_t code, void *param) with gil: + consumer = param + if code == 0: + if consumer.callback is not None: + consumer.callback(consumer) # TODO: param is consumer itself only, seem pretty weird + else: + errstr = tmq_err2str(code).decode("utf-8") + tmq_err = TmqError(errstr, code) + if consumer.error_callback is not None: + consumer.error_callback(tmq_err) + + +cdef class TopicPartition: + cdef str _topic + cdef int32_t _partition + cdef int64_t _offset + cdef int64_t _begin + cdef int64_t _end + + def __cinit__(self, str topic, int32_t partition, int64_t offset, int64_t begin=0, int64_t end=0): + self._topic = topic + self._partition = partition + self._offset = offset + self._begin = begin + self._end = end + + @property + def topic(self) -> str: + return self._topic + + @property + def partition(self) -> int: + return self._partition + + @property + def offset(self) -> int: + return self._offset + + @offset.setter + def offset(self, int64_t value): + self._offset = value + + @property + def begin(self) -> int: + return self._begin + + @property + def end(self) -> int: + return self._end + + def __str__(self): + return "TopicPartition(topic=%s, partition=%s, offset=%s)" % (self.topic, self.partition, self.offset) + + def __repr__(self): + return "TopicPartition(topic=%s, partition=%s, offset=%s)" % (self.topic, self.partition, self.offset) + + +cdef class MessageBlock: + cdef list _block + cdef list _fields + cdef int _nrows + cdef int _ncols + cdef str _table + + def __init__(self, + block: Optional[List[Optional[List]]]=None, + fields: Optional[List[TaosField]]=None, + nrows: int=0, ncols: int=0, table: str=""): + self._block = block or [] + self._fields = fields or [] + self._nrows = nrows + self._ncols = ncols + self._table = table + + def fields(self) -> List[TaosField]: + """ + Get fields in message block + """ + return self._fields + + def nrows(self) -> int: + """ + get total count of rows of message block + """ + return self._nrows + + def ncols(self) -> int: + """ + get total count of rows of message block + """ + return self._ncols + + def table(self) -> str: + """ + get table name of message block + """ + return self._table + + def fetchall(self) -> List[Tuple]: + """ + get all data in message block + """ + return [r for r in self] + + def __iter__(self) -> Iterator[Tuple]: + return zip(*self._block) + + def __str__(self): + return "MessageBlock(table=%s, nrows=%s, ncols=%s)" % (self.table, self.nrows, self.ncols) + + def __repr__(self): + return "MessageBlock(table=%s, nrows=%s, ncols=%s)" % (self.table, self.nrows, self.ncols) + +cdef class Message: + cdef TAOS_RES *_res + cdef int _err_no + cdef char *_err_str + cdef object _dt_epoch + + def __cinit__(self, size_t res): + self._res = res + self._err_no = taos_errno(self._res) + self._err_str = taos_errstr(self._res) + self._dt_epoch = DEFAULT_DT_EPOCH + + def __str__(self): + return "Message(topic=%s, database=%s, vgroup=%s, offset=%s)" % (self.topic(), self.database(), self.vgroup(), self.offset()) + + def __repr__(self): + return "Message(topic=%s, database=%s, vgroup=%s, offset=%s)" % (self.topic(), self.database(), self.vgroup(), self.offset()) + + def error(self) -> Optional[TmqError]: + """ + The message object is also used to propagate errors and events, an application must check error() to determine + if the Message is a proper message (error() returns None) or an error or event (error() returns a TmqError + object) + + :rtype: None or :py:class:`TmqError + """ + return TmqError(self._err_str.decode("utf-8"), self._err_no) if self._err_no else None + + def topic(self) -> Optional[str]: + """ + + :returns: topic name. + :rtype: str + """ + _topic = tmq_get_topic_name(self._res) + topic = None if _topic is NULL else _topic.decode("utf-8") + return topic + + def database(self) -> Optional[str]: + """ + + :returns: database name. + :rtype: str + """ + _db = tmq_get_db_name(self._res) + db = None if _db is NULL else _db.decode("utf-8") + return db + + def vgroup(self) -> int: + return tmq_get_vgroup_id(self._res) + + def offset(self) -> int: + """ + :returns: message offset. + :rtype: int + """ + return tmq_get_vgroup_offset(self._res) + + def _set_dt_epoch(self, dt_epoch): + self._dt_epoch = dt_epoch + + def _fetch_message_block(self) -> Optional[MessageBlock]: + cdef TAOS_ROW pblock + num_of_rows = taos_fetch_block(self._res, &pblock) + if num_of_rows == 0: + return None + + field_count = taos_field_count(self._res) + _fields = taos_fetch_fields(self._res) + fields = [TaosField(f.name.decode("utf-8"), f.type, f.bytes) for f in _fields[:field_count]] + precision = taos_result_precision(self._res) + + _table = tmq_get_table_name(self._res) + table = "" if _table is NULL else _table.decode("utf-8") + + block = [None] * field_count + cdef int i + for i in range(field_count): + data = pblock[i] + field = _fields[i] + + if field.type in SIZED_TYPE: + is_null = taos_get_column_data_is_null(self._res, i, num_of_rows) + block[i] = CONVERT_FUNC[field.type](data, num_of_rows, is_null) + free(is_null) + elif field.type in UNSIZED_TYPE: + offsets = taos_get_column_data_offset(self._res, i) + block[i] = CONVERT_FUNC[field.type](data, num_of_rows, offsets) + else: + pass + + if field.type == TSDB_DATA_TYPE_TIMESTAMP and precision in (PrecisionEnum.Milliseconds, PrecisionEnum.Microseconds): + block[i] = _convert_timestamp_to_datetime(block[i], precision, self._dt_epoch) + + return MessageBlock(block, fields, num_of_rows, field_count, table) + + def value(self) -> List[MessageBlock]: + """ + + :returns: message value (payload). + :rtype: list[MessageBlock] + """ + res_type = tmq_get_res_type(self._res) + if res_type in (TmqResultType.TABLE_META, TmqResultType.INVALID): + return None # TODO: deal with meta data + + message_blocks = [] + while True: + mb = self._fetch_message_block() + if not mb: + break + + message_blocks.append(mb) + + return message_blocks + + def __dealloc__(self): + if self._res is not NULL: + taos_free_result(self._res) + + self._res = NULL + self._err_no = 0 + self._err_str = NULL + + +cdef class TaosConsumer: + cdef object __cb + cdef object __err_cb + cdef dict _configs + cdef tmq_conf_t *_tmq_conf + cdef tmq_t *_tmq + cdef bool _subscribed + cdef object _tz + cdef object _dt_epoch + default_config = { + 'group.id', + 'client.id', + 'msg.with.table.name', + 'enable.auto.commit', + 'auto.commit.interval.ms', + 'auto.offset.reset', + 'experimental.snapshot.enable', + 'enable.heartbeat.background', + 'experimental.snapshot.batch.size', + 'td.connect.ip', + 'td.connect.user', + 'td.connect.pass', + 'td.connect.port', + 'td.connect.db', + } + + def __cinit__(self, dict configs, **kwargs): + self.__cb = None + self.__err_cb = None + self._init_config(configs) + self._init_consumer() + self._subscribed = False + self._tz = DEFAULT_TZ + self._dt_epoch = DEFAULT_DT_EPOCH + if "timezone" in kwargs: + self.tz = kwargs["timezone"] + + def _init_config(self, dict configs): + if 'group.id' not in configs: + raise TmqError('missing group.id in consumer config setting') + + self._configs = configs + self._tmq_conf = tmq_conf_new() + if self._tmq_conf is NULL: + raise TmqError("new tmq conf failed") + + for k, v in self._configs.items(): + _k = k.encode("utf-8") + _v = v.encode("utf-8") + tmq_conf_res = tmq_conf_set(self._tmq_conf, _k, _v) + if tmq_conf_res != tmq_conf_res_t.TMQ_CONF_OK: + tmq_conf_destroy(self._tmq_conf) + self._tmq_conf = NULL + raise TmqError("set tmq conf failed!") + + def _init_consumer(self): + if self._tmq_conf is NULL: + raise TmqError('tmq_conf is NULL') + + if self._configs.get("enable.auto.commit", "false") == "true": + param = self + tmq_conf_set_auto_commit_cb(self._tmq_conf, tmq_auto_commit_wrapper, param) + + self._tmq = tmq_consumer_new(self._tmq_conf, NULL, 0) + if self._tmq is NULL: + raise TmqError("new tmq consumer failed") + + def _check_tmq_error(self, tmq_errno): + if tmq_errno != 0: + tmq_errstr = tmq_err2str(tmq_errno).decode("utf-8") + raise TmqError(tmq_errstr, tmq_errno) + + @property + def tz(self) -> Optional[dt.tzinfo]: + return self._tz + + @tz.setter + def tz(self, timezone: Optional[Union[str, dt.tzinfo]]): + if isinstance(timezone, str): + timezone = pytz.timezone(timezone) + + assert timezone is None or isinstance(timezone, dt.tzinfo) + + self._tz = timezone + self._dt_epoch = dt.datetime.fromtimestamp(0, tz=self._tz) + + @property + def callback(self): + return self.__cb + + @callback.setter + def callback(self, cb): + assert callable(cb) + self.__cb = cb + + @property + def error_callback(self): + return self.__err_cb + + @error_callback.setter + def error_callback(self, err_cb): + assert callable(err_cb) + self.__err_cb = err_cb + + def subscribe(self, topics: List[str]): + """ + Set subscription to supplied list of topics. + :param list(str) topics: List of topics (strings) to subscribe to. + """ + tmq_list = tmq_list_new() + if tmq_list is NULL: + raise TmqError("new tmq list failed!") + + try: + for tp in topics: + _tp = tp.encode("utf-8") + tmq_errno = tmq_list_append(tmq_list, _tp) + if tmq_errno != 0: + tmq_errstr = tmq_err2str(tmq_errno).decode("utf-8") + raise TmqError(tmq_errstr, tmq_errno) + + tmq_errno = tmq_subscribe(self._tmq, tmq_list) + if tmq_errno != 0: + tmq_errstr = tmq_err2str(tmq_errno).decode("utf-8") + raise TmqError(tmq_errstr, tmq_errno) + finally: + tmq_list_destroy(tmq_list) + + self._subscribed = True + + def unsubscribe(self): + """ + Remove current subscription. + """ + tmq_errno = tmq_unsubscribe(self._tmq) + self._check_tmq_error(tmq_errno) + + self._subscribed = False + + def close(self): + """ + Close down and terminate the Kafka Consumer. + """ + if self._tmq is not NULL: + tmq_errno = tmq_consumer_close(self._tmq) + self._check_tmq_error(tmq_errno) + self._tmq = NULL + + def poll(self, float timeout=1.0) -> Optional[Message]: + """ + Consumes a single message and returns events. + + The application must check the returned `Message` object's `Message.error()` method to distinguish between + proper messages (error() returns None). + + :param float timeout: Maximum time to block waiting for message, event or callback (default: 1). (second) + :returns: A Message object or None on timeout + :rtype: `Message` or None + """ + if not self._subscribed: + raise TmqError("unsubscribe topic") + + timeout_ms = int(timeout * 1000) + res = tmq_consumer_poll(self._tmq, timeout_ms) + if res is NULL: + return None + + msg = Message(res) + msg._set_dt_epoch(self._dt_epoch) + return msg + + def set_auto_commit_cb(self, callback: Callable[[TaosConsumer]]=None, error_callback: Callable[[TmqError]]=None): + """ + Set callback for auto commit. + + :param callback: a Callable[[TaosConsumer]] object which was called when message is committed + :param error_callback: a Callable[[TmqError]] object which waw called when something wrong(code != 0) + """ + self.callback = callback + self.error_callback = error_callback + + def commit(self, message: Message=None, offsets: List[TopicPartition]=None): + """ + Commit a message. + + The `message` and `offsets` parameters are mutually exclusive. If neither is set, the current partition + assignment's offsets are used instead. Use this method to commit offsets if you have 'enable.auto.commit' set + to False. + + :param Message message: Commit the message's offset+1. Note: By convention, committed offsets reflect the next + message to be consumed, **not** the last message consumed. + :param list(TopicPartition) offsets: List of topic+partitions+offsets to commit. + """ + if message: + self.message_commit(message) + return + + if offsets: + self.offsets_commit(offsets) + return + + tmq_errno = tmq_commit_sync(self._tmq, NULL) + self._check_tmq_error(tmq_errno) + + async def commit_a(self, message: Message=None, offsets: List[TopicPartition]=None): + """ + Async commit a message. + + The `message` and `offsets` parameters are mutually exclusive. If neither is set, the current partition + assignment's offsets are used instead. Use this method to commit offsets if you have 'enable.auto.commit' set + to False. + + :param Message message: Commit the message's offset+1. Note: By convention, committed offsets reflect the next + message to be consumed, **not** the last message consumed. + :param list(TopicPartition) offsets: List of topic+partitions+offsets to commit. + """ + if message: + await self.message_commit_a(message) + return + + if offsets: + await self.offsets_commit_a(offsets) + return + + loop = asyncio.get_event_loop() + fut = loop.create_future() + tmq_commit_async(self._tmq, NULL, async_commit_future_wrapper, fut) + tmq_errno = await fut + self._check_tmq_error(tmq_errno) + + def message_commit(self, message: Message): + """ Commit with message """ + tmq_errno = tmq_commit_sync(self._tmq, message._res) + self._check_tmq_error(tmq_errno) + + async def message_commit_a(self, message: Message): + """ Async commit with message """ + loop = asyncio.get_event_loop() + fut = loop.create_future() + tmq_commit_async(self._tmq, message._res, async_commit_future_wrapper, fut) + tmq_errno = await fut + self._check_tmq_error(tmq_errno) + + def offsets_commit(self, partitions: List[TopicPartition]): + """ Commit with topic partitions """ + for tp in partitions: + _tp = tp._topic.encode("utf-8") + tmq_errno = tmq_commit_offset_sync(self._tmq, _tp, tp._partition, tp._offset) + self._check_tmq_error(tmq_errno) + + async def offsets_commit_a(self, partitions: List[TopicPartition]): + """ async commit with topic partitions """ + loop = asyncio.get_event_loop() + futs = [] + for tp in partitions: + _tp = tp._topic.encode("utf-8") + fut = loop.create_future() + tmq_commit_offset_async(self._tmq, _tp, tp._partition, tp._offset, async_commit_future_wrapper, fut) + futs.append(fut) + + tmq_errnos = await asyncio.gather(futs, return_exceptions=True) + for tmq_errno in tmq_errnos: + self._check_tmq_error(tmq_errno) + + def assignment(self) -> List[TopicPartition]: + """ + Returns the current partition assignment as a list of TopicPartition tuples. + """ + cdef int32_t i + cdef int32_t num_of_assignment + cdef tmq_topic_assignment *p_assignment = NULL + + topics = self.list_topics() + topic_partitions = [] + try: + for topic in topics: + _topic = topic.encode("utf-8") + tmq_errno = tmq_get_topic_assignment(self._tmq, _topic, &p_assignment, &num_of_assignment) + if tmq_errno != 0: + tmq_errstr = tmq_err2str(tmq_errno).decode("utf-8") + raise TmqError(tmq_errstr, tmq_errno) + + for i in range(num_of_assignment): + assignment = p_assignment[i] + tp = TopicPartition(topic, assignment.vgId, assignment.currentOffset, assignment.begin, assignment.end) + topic_partitions.append(tp) + finally: + if p_assignment is not NULL: + tmq_free_assignment(p_assignment) + + return topic_partitions + + def seek(self, partition: TopicPartition): + """ + Set consume position for partition to offset. + """ + _tp = partition._topic.encode("utf-8") + tmq_errno = tmq_offset_seek(self._tmq, _tp, partition._partition, partition._offset) + self._check_tmq_error(tmq_errno) + + def committed(self, partitions: List[TopicPartition]) -> List[TopicPartition]: + """ + Retrieve committed offsets for the specified partitions. + + :param list(TopicPartition) partitions: List of topic+partitions to query for stored offsets. + :returns: List of topic+partitions with offset and possibly error set. + :rtype: list(TopicPartition) + """ + for partition in partitions: + if not isinstance(partition, TopicPartition): + raise TmqError("Invalid partition type") + + _tp = partition._topic.encode("utf-8") + tmq_errno = offset = tmq_committed(self._tmq, _tp, partition._partition) + self._check_tmq_error(tmq_errno) + + partition.offset = offset + + return partitions + + def position(self, partitions: List[TopicPartition]) -> List[TopicPartition]: + """ + Retrieve current positions (offsets) for the specified partitions. + + :param list(TopicPartition) partitions: List of topic+partitions to return current offsets for. + :returns: List of topic+partitions with offset and possibly error set. + :rtype: list(TopicPartition) + """ + for partition in partitions: + if not isinstance(partition, TopicPartition): + raise TmqError("Invalid partition type") + + _tp = partition._topic.encode("utf-8") + tmq_errno = offset = tmq_position(self._tmq, _tp, partition._partition) + self._check_tmq_error(tmq_errno) + + partition.offset = offset + + return partitions + + def list_topics(self) -> List[str]: + """ + Request subscription topics from the tmq. + + :rtype: topics list + """ + cdef int i + tmq_list = tmq_list_new() + if tmq_list is NULL: + raise TmqError("new tmq list failed!") + + try: + tmq_errno = tmq_subscription(self._tmq, &tmq_list) + self._check_tmq_error(tmq_errno) + + ca = tmq_list_to_c_array(tmq_list) + n = tmq_list_get_size(tmq_list) + + tp_list = [] + for i in range(n): + tp_list.append(ca[i].decode("utf-8")) + finally: + tmq_list_destroy(tmq_list) + + return tp_list + + def __iter__(self) -> Iterator[Message]: + while self._tmq: + message = self.poll() + if message: + yield message + + def __dealloc__(self): + if self._tmq_conf is not NULL: + tmq_conf_destroy(self._tmq_conf) + self._tmq_conf = NULL + self.close() + +# ---------------------------------------------- TMQ --------------------------------------------------------------- ^ + + +# ---------------------------------------- statement --------------------------------------------------------------- v + +cdef class TaosStmt: + cdef TAOS_STMT *_stmt + + def __cinit__(self, size_t stmt): + self._stmt = stmt + + def _check_stmt_error(self, int errno): + if errno != 0: + stmt_errstr = taos_stmt_errstr(self._stmt).decode("utf-8") + raise StatementError(stmt_errstr, errno) + + def set_tbname(self, str name): + if self._stmt is NULL: + raise StatementError("Invalid use of set_tbname") + + _name = name.encode("utf-8") + errno = taos_stmt_set_tbname(self._stmt, _name) + self._check_stmt_error(errno) + + def prepare(self, str sql): + if self._stmt is NULL: + raise StatementError("Invalid use of prepare") + + _sql = sql.encode("utf-8") + errno = taos_stmt_prepare(self._stmt, _sql, len(_sql)) + self._check_stmt_error(errno) + + def set_tbname_tags(self, str name, TaosMultiBinds tags): + """Set table name with tags, tags is array of BindParams""" + if self._stmt is NULL: + raise StatementError("Invalid use of set_tbname_tags") + + _name = name.encode("utf-8") + errno = taos_stmt_set_tbname_tags(self._stmt, _name, tags._raw_binds) + self._check_stmt_error(errno) + + def bind_param(self, TaosMultiBinds binds, bool add_batch=True): + if self._stmt is NULL: + raise StatementError("Invalid use of bind_param") + + errno = taos_stmt_bind_param(self._stmt, binds._raw_binds) + self._check_stmt_error(errno) + + if add_batch: + self.add_batch() + + def bind_param_batch(self, TaosMultiBinds binds, bool add_batch=True): + if self._stmt is NULL: + raise StatementError("Invalid use of bind_param_batch") + + errno = taos_stmt_bind_param_batch(self._stmt, binds._raw_binds) + self._check_stmt_error(errno) + + if add_batch: + self.add_batch() + + def add_batch(self): + if self._stmt is NULL: + raise StatementError("Invalid use of add_batch") + + errno = taos_stmt_add_batch(self._stmt) + self._check_stmt_error(errno) + + def execute(self): + if self._stmt is NULL: + raise StatementError("Invalid use of execute") + + errno = taos_stmt_execute(self._stmt) + self._check_stmt_error(errno) + + def use_result(self) -> TaosResult: + if self._stmt is NULL: + raise StatementError("Invalid use of use_result") + + res = taos_stmt_use_result(self._stmt) + if res is NULL: + raise StatementError(taos_stmt_errstr(self._stmt).decode("utf-8")) + + return TaosResult(res) + + @property + def affected_rows(self) -> int: + # type: () -> int + return taos_stmt_affected_rows(self._stmt) + + def close(self): + """Close stmt.""" + if self._stmt is NULL: + return + + errno = taos_stmt_close(self._stmt) + self._check_stmt_error(errno) + self._stmt = NULL + + def __dealloc__(self): + self.close() + + +cdef class TaosMultiBinds: + cdef size_t _size + cdef TAOS_MULTI_BIND *_raw_binds + cdef list _binds + + def __cinit__(self, size_t size): + self._size = size + self._init_binds(size) + + def _init_binds(self, size_t size): + self._raw_binds = malloc(size * sizeof(TAOS_MULTI_BIND)) + _check_malloc(self._raw_binds) + memset(self._raw_binds, 0, size * sizeof(TAOS_MULTI_BIND)) + self._binds = [] + for i in range(size): + _pbind = self._raw_binds + (i * sizeof(TAOS_MULTI_BIND)) + self._binds.append(TaosMultiBind(_pbind)) + + def __str__(self): + return "TaosMultiBinds(size=%d)" % (self._size, ) + + def __repr__(self): + return "TaosMultiBinds(size=%d)" % (self._size, ) + + def __getitem__(self, item): + return self._binds[item] + + def __dealloc__(self): + if self._raw_binds is not NULL: + self._binds = [] + free(self._raw_binds) + self._raw_binds = NULL + + self._size = 0 + + +cdef class TaosMultiBind: + cdef TAOS_MULTI_BIND *_inner + + def __cinit__(self, size_t pbind): + self._inner = pbind + + def __dealloc__(self): + if self._inner.buffer is not NULL: + free(self._inner.buffer) + self._inner.buffer = NULL + + if self._inner.is_null is not NULL: + free(self._inner.is_null) + self._inner.is_null = NULL + + if self._inner.length is not NULL: + free(self._inner.length) + self._inner.length = NULL + + cdef _init_buffer(self, size_t size): + if self._inner.buffer is not NULL: + free(self._inner.buffer) + self._inner.buffer = NULL + + _buffer = malloc(size) + _check_malloc(_buffer) + self._inner.buffer = _buffer + + cdef _init_is_null(self, size_t size): + if self._inner.is_null is not NULL: + free(self._inner.is_null) + self._inner.is_null = NULL + + _is_null = malloc(size) + _check_malloc(_is_null) + self._inner.is_null = _is_null + + cdef _init_length(self, size_t size): + if self._inner.length is not NULL: + free(self._inner.length) + self._inner.length = NULL + + _length = malloc(size) + _check_malloc(_length) + self._inner.length = _length + + def __str__(self): + return "TaosMultiBind(buffer_type=%s, buffer_length=%d, num=%d)" % (self._inner.buffer_type, self._inner.buffer_length, self._inner.num) + + def __repr__(self): + return "TaosMultiBind(buffer_type=%s, buffer_length=%d, num=%d)" % (self._inner.buffer_type, self._inner.buffer_length, self._inner.num) + + def bool(self, values): + self._inner.buffer_type = FieldType.C_BOOL + self._inner.buffer_length = sizeof(bool) + self._inner.num = len(values) + self._init_buffer(self._inner.num * sizeof(bool)) + self._init_is_null(self._inner.num * sizeof(char)) + _buffer = self._inner.buffer + _is_null = self._inner.is_null + + for i in range(self._inner.num): + v = values[i] + if v is None: + _buffer[i] = FieldType.C_BOOL_NULL + _is_null[i] = 1 + else: + _buffer[i] = v + _is_null[i] = 0 + + def tinyint(self, values): + self._inner.buffer_type = FieldType.C_TINYINT + self._inner.buffer_length = sizeof(int8_t) + self._inner.num = len(values) + self._init_buffer(self._inner.num * sizeof(int8_t)) + self._init_is_null(self._inner.num * sizeof(char)) + _buffer = self._inner.buffer + _is_null = self._inner.is_null + + for i in range(self._inner.num): + v = values[i] + if v is None: + _buffer[i] = FieldType.C_TINYINT_NULL + _is_null[i] = 1 + else: + _buffer[i] = v + _is_null[i] = 0 + + def smallint(self, values): + self._inner.buffer_type = FieldType.C_SMALLINT + self._inner.buffer_length = sizeof(int16_t) + self._inner.num = len(values) + self._init_buffer(self._inner.num * sizeof(int16_t)) + self._init_is_null(self._inner.num * sizeof(char)) + _buffer = self._inner.buffer + _is_null = self._inner.is_null + + for i in range(self._inner.num): + v = values[i] + if v is None: + _buffer[i] = FieldType.C_SMALLINT_NULL + _is_null[i] = 1 + else: + _buffer[i] = v + _is_null[i] = 0 + + def int(self, values): + self._inner.buffer_type = FieldType.C_INT + self._inner.buffer_length = sizeof(int32_t) + self._inner.num = len(values) + self._init_buffer(self._inner.num * sizeof(int32_t)) + self._init_is_null(self._inner.num * sizeof(char)) + _buffer = self._inner.buffer + _is_null = self._inner.is_null + + for i in range(self._inner.num): + v = values[i] + if v is None: + _buffer[i] = FieldType.C_INT_NULL + _is_null[i] = 1 + else: + _buffer[i] = v + _is_null[i] = 0 + + def bigint(self, values): + self._inner.buffer_type = FieldType.C_BIGINT + self._inner.buffer_length = sizeof(int64_t) + self._inner.num = len(values) + self._init_buffer(self._inner.num * sizeof(int64_t)) + self._init_is_null(self._inner.num * sizeof(char)) + _buffer = self._inner.buffer + _is_null = self._inner.is_null + + for i in range(self._inner.num): + v = values[i] + if v is None: + _buffer[i] = FieldType.C_BIGINT_NULL + _is_null[i] = 1 + else: + _buffer[i] = v + _is_null[i] = 0 + + def float(self, values): + self._inner.buffer_type = FieldType.C_FLOAT + self._inner.buffer_length = sizeof(float) + self._inner.num = len(values) + self._init_buffer(self._inner.num * sizeof(float)) + self._init_is_null(self._inner.num * sizeof(char)) + _buffer = self._inner.buffer + _is_null = self._inner.is_null + + for i in range(self._inner.num): + v = values[i] + if v is None: + # _buffer[i] = FieldType.C_FLOAT_NULL + _buffer[i] = float('nan') + _is_null[i] = 1 + else: + _buffer[i] = v + _is_null[i] = 0 + + def double(self, values): + self._inner.buffer_type = FieldType.C_DOUBLE + self._inner.buffer_length = sizeof(double) + self._inner.num = len(values) + self._init_buffer(self._inner.num * sizeof(double)) + self._init_is_null(self._inner.num * sizeof(char)) + _buffer = self._inner.buffer + _is_null = self._inner.is_null + + for i in range(self._inner.num): + v = values[i] + if v is None: + # _buffer[i] = FieldType.C_DOUBLE_NULL + _buffer[i] = float('nan') + _is_null[i] = 1 + else: + _buffer[i] = v + _is_null[i] = 0 + + def tinyint_unsigned(self, values): + self._inner.buffer_type = FieldType.C_TINYINT_UNSIGNED + self._inner.buffer_length = sizeof(uint8_t) + self._inner.num = len(values) + self._init_buffer(self._inner.num * sizeof(uint8_t)) + self._init_is_null(self._inner.num * sizeof(char)) + _buffer = self._inner.buffer + _is_null = self._inner.is_null + + for i in range(self._inner.num): + v = values[i] + if v is None: + _buffer[i] = FieldType.C_TINYINT_UNSIGNED_NULL + _is_null[i] = 1 + else: + _buffer[i] = v + _is_null[i] = 0 + + def smallint_unsigned(self, values): + self._inner.buffer_type = FieldType.C_SMALLINT_UNSIGNED + self._inner.buffer_length = sizeof(uint16_t) + self._inner.num = len(values) + self._init_buffer(self._inner.num * sizeof(uint16_t)) + self._init_is_null(self._inner.num * sizeof(char)) + _buffer = self._inner.buffer + _is_null = self._inner.is_null + + for i in range(self._inner.num): + v = values[i] + if v is None: + _buffer[i] = FieldType.C_SMALLINT_UNSIGNED_NULL + _is_null[i] = 1 + else: + _buffer[i] = v + _is_null[i] = 0 + + def int_unsigned(self, values): + self._inner.buffer_type = FieldType.C_INT_UNSIGNED + self._inner.buffer_length = sizeof(uint32_t) + self._inner.num = len(values) + self._init_buffer(self._inner.num * sizeof(uint32_t)) + self._init_is_null(self._inner.num * sizeof(char)) + _buffer = self._inner.buffer + _is_null = self._inner.is_null + + for i in range(self._inner.num): + v = values[i] + if v is None: + _buffer[i] = FieldType.C_INT_UNSIGNED_NULL + _is_null[i] = 1 + else: + _buffer[i] = v + _is_null[i] = 0 + + def bigint_unsigned(self, values): + self._inner.buffer_type = FieldType.C_BIGINT_UNSIGNED + self._inner.buffer_length = sizeof(uint64_t) + self._inner.num = len(values) + self._init_buffer(self._inner.num * sizeof(uint64_t)) + self._init_is_null(self._inner.num * sizeof(char)) + _buffer = self._inner.buffer + _is_null = self._inner.is_null + + for i in range(self._inner.num): + v = values[i] + if v is None: + _buffer[i] = FieldType.C_BIGINT_UNSIGNED_NULL + _is_null[i] = 1 + else: + _buffer[i] = v + _is_null[i] = 0 + + def timestamp(self, values, precision=PrecisionEnum.Milliseconds): # BUG: program just crash if one of the values is None in the first timestamp column + self._inner.buffer_type = FieldType.C_TIMESTAMP + self._inner.buffer_length = sizeof(int64_t) + self._inner.num = len(values) + self._init_buffer(self._inner.num * sizeof(int64_t)) + self._init_is_null(self._inner.num * sizeof(char)) + _buffer = self._inner.buffer + _is_null = self._inner.is_null + + m = 10**(3*(precision+1)) + for i in range(self._inner.num): + v = values[i] + if v is None: + _buffer[i] = FieldType.C_BIGINT_NULL + _is_null[i] = 1 + else: + if isinstance(v, dt.datetime): + v = int(round(v.timestamp() * m)) + elif isinstance(v, str): + v = int(round(dt.datetime.fromisoformat(v).timestamp() * m)) + elif isinstance(v, float): + v = int(round(v * m)) + else: + pass + + _buffer[i] = v + _is_null[i] = 0 + + def binary(self, values): + _bytes = [v if v is None else v.encode("utf-8") for v in values] + self._inner.buffer_type = FieldType.C_BINARY + self._inner.buffer_length = max(len(b) for b in _bytes if b is not None) + self._inner.num = len(values) + self._init_buffer(self._inner.num * self._inner.buffer_length) + self._init_is_null(self._inner.num * sizeof(char)) + self._init_length(self._inner.num * sizeof(int32_t)) + _buffer = self._inner.buffer + _is_null = self._inner.is_null + _length = self._inner.length + + _buf = bytearray(self._inner.num * self._inner.buffer_length) + for i in range(self._inner.num): + offset = i * self._inner.buffer_length + v = _bytes[i] + if v is None: + # _buf[offset:offset+self._inner.buffer_length] = b"\x00" * self._inner.buffer_length + _is_null[i] = 1 + _length[i] = 0 + else: + _buf[offset:offset+len(v)] = v + _is_null[i] = 0 + _length[i] = len(v) + + memcpy(_buffer, _buf, self._inner.num * self._inner.buffer_length) + + def nchar(self, values): + _bytes = [v if v is None else v.encode("utf-8") for v in values] + self._inner.buffer_type = FieldType.C_NCHAR + self._inner.buffer_length = max(len(b) for b in _bytes if b is not None) + self._inner.num = len(values) + self._init_buffer(self._inner.num * self._inner.buffer_length) + self._init_is_null(self._inner.num * sizeof(char)) + self._init_length(self._inner.num * sizeof(int32_t)) + _buffer = self._inner.buffer + _is_null = self._inner.is_null + _length = self._inner.length + + _buf = bytearray(self._inner.num * self._inner.buffer_length) + for i in range(self._inner.num): + offset = i * self._inner.buffer_length + v = _bytes[i] + if v is None: + # _buf[offset:offset+self._inner.buffer_length] = b"\x00" * self._inner.buffer_length + _is_null[i] = 1 + _length[i] = 0 + else: + _buf[offset:offset+len(v)] = v + _is_null[i] = 0 + _length[i] = len(v) + + memcpy(_buffer, _buf, self._inner.num * self._inner.buffer_length) + + def json(self, values): + _bytes = [v if v is None else v.encode("utf-8") for v in values] + self._inner.buffer_type = FieldType.C_JSON + self._inner.buffer_length = max(len(b) for b in _bytes if b is not None) + self._inner.num = len(values) + self._init_buffer(self._inner.num * self._inner.buffer_length) + self._init_is_null(self._inner.num * sizeof(char)) + self._init_length(self._inner.num * sizeof(int32_t)) + _buffer = self._inner.buffer + _is_null = self._inner.is_null + _length = self._inner.length + + _buf = bytearray(self._inner.num * self._inner.buffer_length) + for i in range(self._inner.num): + offset = i * self._inner.buffer_length + v = _bytes[i] + if v is None: + # _buf[offset:offset+self._inner.buffer_length] = b"\x00" * self._inner.buffer_length + _is_null[i] = 1 + _length[i] = 0 + else: + _buf[offset:offset+len(v)] = v + _is_null[i] = 0 + _length[i] = len(v) + + memcpy(_buffer, _buf, self._inner.num * self._inner.buffer_length) + +# ---------------------------------------- statement --------------------------------------------------------------- ^ diff --git a/taos/_parser.pxd b/taos/_parser.pxd new file mode 100644 index 00000000..54921561 --- /dev/null +++ b/taos/_parser.pxd @@ -0,0 +1,43 @@ +from libc.stdint cimport int8_t, int16_t, int32_t, int64_t, uint8_t, uint16_t, uint32_t, uint64_t, uintptr_t + +ctypedef bint bool + +cdef list _parse_binary_string(size_t ptr, int num_of_rows, int field_length) + +cdef list _parse_nchar_string(size_t ptr, int num_of_rows, int field_length) + +cdef list _parse_bytes(size_t ptr, int num_of_rows, size_t offsets) + +cdef list _parse_string(size_t ptr, int num_of_rows, size_t offsets) + +cdef list _parse_bool(size_t ptr, int num_of_rows, size_t is_null) + +cdef list _parse_int8_t(size_t ptr, int num_of_rows, size_t is_null) + +cdef list _parse_int16_t(size_t ptr, int num_of_rows, size_t is_null) + +cdef list _parse_int32_t(size_t ptr, int num_of_rows, size_t is_null) + +cdef list _parse_int64_t(size_t ptr, int num_of_rows, size_t is_null) + +cdef list _parse_uint8_t(size_t ptr, int num_of_rows, size_t is_null) + +cdef list _parse_uint16_t(size_t ptr, int num_of_rows, size_t is_null) + +cdef list _parse_uint32_t(size_t ptr, int num_of_rows, size_t is_null) + +cdef list _parse_uint64_t(size_t ptr, int num_of_rows, size_t is_null) + +cdef list _parse_int(size_t ptr, int num_of_rows, size_t is_null) + +cdef list _parse_uint(size_t ptr, int num_of_rows, size_t is_null) + +cdef list _parse_float(size_t ptr, int num_of_rows, size_t is_null) + +cdef list _parse_double(size_t ptr, int num_of_rows, size_t is_null) + +cdef list _parse_datetime(size_t ptr, int num_of_rows, size_t is_null, int precision, object dt_epoch) + +cdef list _parse_timestamp(size_t ptr, int num_of_rows, size_t is_null) + +cdef list _convert_timestamp_to_datetime(list ts, int precision, object dt_epoch) \ No newline at end of file diff --git a/taos/_parser.pyx b/taos/_parser.pyx new file mode 100644 index 00000000..5cdd0d91 --- /dev/null +++ b/taos/_parser.pyx @@ -0,0 +1,267 @@ +# cython: profile=True + +import datetime as dt + +cdef list _parse_binary_string(size_t ptr, int num_of_rows, int field_length): + cdef list res = [] + cdef int i + for i in range(abs(num_of_rows)): + nchar_ptr = ptr + field_length * i + py_string = (nchar_ptr).decode("utf-8") + res.append(py_string) + + return res + +cdef list _parse_nchar_string(size_t ptr, int num_of_rows, int field_length): + cdef list res = [] + cdef int i + for i in range(abs(num_of_rows)): + c_char_ptr = ptr + field_length * i + py_string = (c_char_ptr)[:field_length].decode("utf-8") + res.append(py_string) + + return res + +cdef list _parse_bytes(size_t ptr, int num_of_rows, size_t offsets): + cdef list res = [] + cdef int i + cdef size_t rbyte_ptr + cdef size_t c_char_ptr + cdef int *_offset = offsets + for i in range(abs(num_of_rows)): + if _offset[i] == -1: + res.append(None) + else: + rbyte_ptr = ptr + _offset[i] + rbyte = (rbyte_ptr)[0] + c_char_ptr = rbyte_ptr + sizeof(uint16_t) + py_bytes = (c_char_ptr)[:rbyte] + res.append(py_bytes) + + return res + +cdef list _parse_string(size_t ptr, int num_of_rows, size_t offsets): + cdef list res = [] + cdef int i + cdef size_t rbyte_ptr + cdef size_t c_char_ptr + cdef int *_offset = offsets + for i in range(abs(num_of_rows)): + if _offset[i] == -1: + res.append(None) + else: + rbyte_ptr = ptr + _offset[i] + rbyte = (rbyte_ptr)[0] + c_char_ptr = rbyte_ptr + sizeof(uint16_t) + py_string = (c_char_ptr)[:rbyte].decode("utf-8") + res.append(py_string) + + return res + +cdef list _parse_bool(size_t ptr, int num_of_rows, size_t is_null): + cdef list res = [] + cdef int i + cdef bool *_is_null = is_null + v_ptr = ptr + for i in range(abs(num_of_rows)): + if _is_null[i]: + res.append(None) + else: + res.append(v_ptr[i]) + + return res + +cdef list _parse_int8_t(size_t ptr, int num_of_rows, size_t is_null): + cdef list res = [] + cdef int i + cdef bool *_is_null = is_null + v_ptr = ptr + for i in range(abs(num_of_rows)): + if _is_null[i]: + res.append(None) + else: + res.append(v_ptr[i]) + + return res + +cdef list _parse_int16_t(size_t ptr, int num_of_rows, size_t is_null): + cdef list res = [] + cdef int i + cdef bool *_is_null = is_null + v_ptr = ptr + for i in range(abs(num_of_rows)): + if _is_null[i]: + res.append(None) + else: + res.append(v_ptr[i]) + + return res + +cdef list _parse_int32_t(size_t ptr, int num_of_rows, size_t is_null): + cdef list res = [] + cdef int i + cdef bool *_is_null = is_null + v_ptr = ptr + for i in range(abs(num_of_rows)): + if _is_null[i]: + res.append(None) + else: + res.append(v_ptr[i]) + + return res + +cdef list _parse_int64_t(size_t ptr, int num_of_rows, size_t is_null): + cdef list res = [] + cdef int i + cdef bool *_is_null = is_null + v_ptr = ptr + for i in range(abs(num_of_rows)): + if _is_null[i]: + res.append(None) + else: + res.append(v_ptr[i]) + + return res + +cdef list _parse_uint8_t(size_t ptr, int num_of_rows, size_t is_null): + cdef list res = [] + cdef int i + cdef bool *_is_null = is_null + v_ptr = ptr + for i in range(abs(num_of_rows)): + if _is_null[i]: + res.append(None) + else: + res.append(v_ptr[i]) + + return res + +cdef list _parse_uint16_t(size_t ptr, int num_of_rows, size_t is_null): + cdef list res = [] + cdef int i + cdef bool *_is_null = is_null + v_ptr = ptr + for i in range(abs(num_of_rows)): + if _is_null[i]: + res.append(None) + else: + res.append(v_ptr[i]) + + return res + +cdef list _parse_uint32_t(size_t ptr, int num_of_rows, size_t is_null): + cdef list res = [] + cdef int i + cdef bool *_is_null = is_null + v_ptr = ptr + for i in range(abs(num_of_rows)): + if _is_null[i]: + res.append(None) + else: + res.append(v_ptr[i]) + + return res + +cdef list _parse_uint64_t(size_t ptr, int num_of_rows, size_t is_null): + cdef list res = [] + cdef int i + cdef bool *_is_null = is_null + v_ptr = ptr + for i in range(abs(num_of_rows)): + if _is_null[i]: + res.append(None) + else: + res.append(v_ptr[i]) + + return res + +cdef list _parse_int(size_t ptr, int num_of_rows, size_t is_null): + cdef list res = [] + cdef int i + cdef bool *_is_null = is_null + v_ptr = ptr + for i in range(abs(num_of_rows)): + if _is_null[i]: + res.append(None) + else: + res.append(v_ptr[i]) + + return res + +cdef list _parse_uint(size_t ptr, int num_of_rows, size_t is_null): + cdef list res = [] + cdef int i + cdef bool *_is_null = is_null + v_ptr = ptr + for i in range(abs(num_of_rows)): + if _is_null[i]: + res.append(None) + else: + res.append(v_ptr[i]) + + return res + +cdef list _parse_float(size_t ptr, int num_of_rows, size_t is_null): + cdef list res = [] + cdef int i + cdef bool *_is_null = is_null + v_ptr = ptr + for i in range(abs(num_of_rows)): + if _is_null[i]: + res.append(None) + else: + res.append(v_ptr[i]) + + return res + +cdef list _parse_double(size_t ptr, int num_of_rows, size_t is_null): + cdef list res = [] + cdef int i + cdef bool *_is_null = is_null + v_ptr = ptr + for i in range(abs(num_of_rows)): + if _is_null[i]: + res.append(None) + else: + res.append(v_ptr[i]) + + return res + +cdef list _parse_datetime(size_t ptr, int num_of_rows, size_t is_null, int precision, object dt_epoch): + cdef list res = [] + cdef int i + cdef bool *_is_null = is_null + cdef double denom = 10**((precision + 1) * 3) + v_ptr = ptr + for i in range(abs(num_of_rows)): + if _is_null[i]: + res.append(None) + else: + raw_value = v_ptr[i] + if precision <= 1: + _dt = dt_epoch + dt.timedelta(seconds=raw_value / denom) + else: + _dt = raw_value + res.append(_dt) + return res + +cdef list _parse_timestamp(size_t ptr, int num_of_rows, size_t is_null): + cdef list res = [] + cdef int i + cdef bool *_is_null = is_null + v_ptr = ptr + for i in range(abs(num_of_rows)): + if _is_null[i]: + res.append(None) + else: + raw_value = v_ptr[i] + res.append(raw_value) + return res + +cdef list _convert_timestamp_to_datetime(list ts, int precision, object dt_epoch): + cdef double denom = 10**((precision + 1) * 3) + for i, t in enumerate(ts): + if t is not None: + ts[i] = dt_epoch + dt.timedelta(seconds=t / denom) + + return ts \ No newline at end of file diff --git a/taos/error.py b/taos/error.py index bc57a977..01caeba4 100644 --- a/taos/error.py +++ b/taos/error.py @@ -109,3 +109,8 @@ class TmqError(DatabaseError): """Exception raise in TMQ API""" pass + +class PrecisionError(Exception): + """Python datetime does not support nanoseconds error""" + + pass \ No newline at end of file diff --git a/taos/sqlalchemy.py b/taos/sqlalchemy.py index ffb2d395..6baadbc7 100644 --- a/taos/sqlalchemy.py +++ b/taos/sqlalchemy.py @@ -76,6 +76,7 @@ def _resolve_type(self, type_): class AlchemyTaosConnection: paramstyle = "pyformat" + from taos.error import Warning, Error, InterfaceError, DatabaseError, DataError, OperationalError, IntegrityError, InternalError, ProgrammingError, NotSupportedError def connect(self, **kwargs): host = kwargs["host"] if "host" in kwargs else "localhost" diff --git a/tests/test_taos_cython/test_connect_args.py b/tests/test_taos_cython/test_connect_args.py new file mode 100644 index 00000000..860b80a5 --- /dev/null +++ b/tests/test_taos_cython/test_connect_args.py @@ -0,0 +1,14 @@ +from taos._objects import TaosConnection + + +def test_connect_args(): + """ + DO NOT DELETE THIS TEST CASE! + + Useless args, prevent mistakenly deleted args in connect init. + Because some case in CI of earlier version may use it. + """ + host = 'localhost:6030' + conn = TaosConnection(host) + assert conn is not None + conn.close() diff --git a/tests/test_taos_cython/test_info.py b/tests/test_taos_cython/test_info.py new file mode 100644 index 00000000..01bae155 --- /dev/null +++ b/tests/test_taos_cython/test_info.py @@ -0,0 +1,19 @@ +from taos._objects import TaosConnection +import pytest + + +@pytest.fixture +def conn(): + return TaosConnection(host="localhost") + + +def test_client_info(conn): + print("client info: %s" % conn.client_info) + pass + + +def test_server_info(conn): + # type: (TaosConnection) -> None + print("conn client info: %s" % conn.client_info) + print("conn server info: %s" % conn.server_info) + pass diff --git a/tests/test_taos_cython/test_lines.py b/tests/test_taos_cython/test_lines.py new file mode 100644 index 00000000..77fdd55f --- /dev/null +++ b/tests/test_taos_cython/test_lines.py @@ -0,0 +1,628 @@ +from taos.error import OperationalError, SchemalessError, InterfaceError +from taos._objects import TaosConnection +from taos._constants import PrecisionEnum +import taos.utils as utils + +import taos +import pytest + + +@pytest.fixture +def conn(): + # type: () -> taos.TaosConnection + return TaosConnection(host="localhost") + + +def test_schemaless_insert_update_2(conn): + # type: (TaosConnection) -> None + + dbname = "test_schemaless_insert_update_2" + try: + conn.execute("drop database if exists %s" % dbname) + if taos.IS_V3: + conn.execute("create database if not exists %s schemaless 1 precision 'ns'" % dbname) + else: + conn.execute("create database if not exists %s update 2 precision 'ns'" % dbname) + conn.select_db(dbname) + + lines = [ + 'st,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin, abc",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000', + ] + res = conn.schemaless_insert(lines, 1, 0) + print("affected rows: ", res) + assert (res == 1) + + result = conn.query("select * from st") + [before] = result.fetch_all_into_dict() + assert (before["c3"] == "passitagin, abc") + + lines = [ + 'st,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000', + ] + res = conn.schemaless_insert(lines, 1, 0) + result = conn.query("select * from st") + [after] = result.fetch_all_into_dict() + assert (after["c3"] == "passitagin") + + conn.execute("drop database if exists %s" % dbname) + conn.close() + except InterfaceError as err: + conn.execute("drop database if exists %s" % dbname) + conn.close() + print(err) + except Exception as err: + conn.execute("drop database if exists %s" % dbname) + conn.close() + print(err) + raise err + + +def test_schemaless_insert(conn): + # type: (TaosConnection) -> None + + dbname = "pytest_taos_schemaless_insert" + try: + conn.execute("drop database if exists %s" % dbname) + if taos.IS_V3: + conn.execute("create database if not exists %s schemaless 1 precision 'ns'" % dbname) + else: + conn.execute("create database if not exists %s update 2 precision 'ns'" % dbname) + + conn.select_db(dbname) + + lines = [ + 'st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000', + 'st,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin, abc",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000', + 'stf,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000', + ] + res = conn.schemaless_insert(lines, 1, 0) + print("affected rows: ", res) + assert (res == 3) + + lines = [ + 'stf,t1=5i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000', + ] + res = conn.schemaless_insert(lines, 1, 0) + print("affected rows: ", res) + assert (res == 1) + result = conn.query("select * from st") + + dict2 = result.fetch_all_into_dict() + print(dict2) + print(result.row_count) + all = result.rows_iter() + for row in all: + print(row) + result.close() + assert (result.row_count == 2) + + # error test + lines = [ + ',t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000', + ] + try: + res = conn.schemaless_insert(lines, 1, 0) + print(res) + # assert(False) + except SchemalessError as err: + print('**** error: ', err) + # assert (err.msg == 'Invalid data format') + + result = conn.query("select * from st") + print(result.row_count) + all = result.rows_iter() + for row in all: + print(row) + result.close() + + conn.execute("drop database if exists %s" % dbname) + conn.close() + except InterfaceError as err: + conn.execute("drop database if exists %s" % dbname) + conn.close() + print(err) + except Exception as err: + conn.execute("drop database if exists %s" % dbname) + conn.close() + print(err) + raise err + + +def test_schemaless_insert_ttl(conn: TaosConnection) -> None: + dbname = "pytest_taos_schemaless_insert" + try: + conn.execute("drop database if exists %s" % dbname) + if taos.IS_V3: + conn.execute("create database if not exists %s schemaless 1 precision 'ns'" % dbname) + else: + conn.execute("create database if not exists %s update 2 precision 'ns'" % dbname) + + conn.select_db(dbname) + + lines = [ + 'st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000', + 'st,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin, abc",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000', + 'stf,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000', + ] + ttl = 1000 + res = conn.schemaless_insert(lines, 1, 0, ttl=ttl) + print("affected rows: ", res) + assert (res == 3) + + lines = [ + 'stf,t1=5i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000', + ] + ttl = 1000 + res = conn.schemaless_insert(lines, 1, 0, ttl=ttl) + print("affected rows: ", res) + assert (res == 1) + result = conn.query("select * from st") + + dict2 = result.fetch_all_into_dict() + print(dict2) + print(result.row_count) + all = result.rows_iter() + for row in all: + print(row) + result.close() + assert (result.row_count == 2) + + # error test + lines = [ + ',t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000', + ] + try: + ttl = 1000 + res = conn.schemaless_insert(lines, 1, 0, ttl=ttl) + print(res) + # assert(False) + except SchemalessError as err: + print('**** error: ', err) + # assert (err.msg == 'Invalid data format') + + result = conn.query("select * from st") + print(result.row_count) + all = result.rows_iter() + for row in all: + print(row) + result.close() + + conn.execute("drop database if exists %s" % dbname) + conn.close() + except InterfaceError as err: + conn.execute("drop database if exists %s" % dbname) + conn.close() + print(err) + except Exception as err: + conn.execute("drop database if exists %s" % dbname) + conn.close() + print(err) + raise err + + +def test_schemaless_insert_ttl_with_req_id(conn: TaosConnection) -> None: + dbname = "pytest_taos_schemaless_insert" + try: + conn.execute("drop database if exists %s" % dbname) + if taos.IS_V3: + conn.execute("create database if not exists %s schemaless 1 precision 'ns'" % dbname) + else: + conn.execute("create database if not exists %s update 2 precision 'ns'" % dbname) + + conn.select_db(dbname) + + lines = [ + 'st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000', + 'st,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin, abc",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000', + 'stf,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000', + ] + ttl = 1000 + req_id = utils.gen_req_id() + res = conn.schemaless_insert(lines, 1, 0, ttl=ttl, req_id=req_id) + print("affected rows: ", res) + assert (res == 3) + + lines = [ + 'stf,t1=5i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000', + ] + ttl = 1000 + req_id = utils.gen_req_id() + res = conn.schemaless_insert(lines, 1, 0, ttl=ttl, req_id=req_id) + print("affected rows: ", res) + assert (res == 1) + result = conn.query("select * from st") + + dict2 = result.fetch_all_into_dict() + print(dict2) + print(result.row_count) + all = result.rows_iter() + for row in all: + print(row) + result.close() + assert (result.row_count == 2) + + # error test + lines = [ + ',t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000', + ] + try: + ttl = 1000 + req_id = utils.gen_req_id() + res = conn.schemaless_insert(lines, 1, 0, ttl=ttl, req_id=req_id) + print(res) + # assert(False) + except SchemalessError as err: + print('**** error: ', err) + # assert (err.msg == 'Invalid data format') + + result = conn.query("select * from st") + print(result.row_count) + all = result.rows_iter() + for row in all: + print(row) + result.close() + + conn.execute("drop database if exists %s" % dbname) + conn.close() + except InterfaceError as err: + conn.execute("drop database if exists %s" % dbname) + conn.close() + print(err) + except Exception as err: + conn.execute("drop database if exists %s" % dbname) + conn.close() + print(err) + raise err + + +def test_schemaless_insert_raw(conn: TaosConnection) -> None: + dbname = "pytest_taos_schemaless_insert" + try: + conn.execute("drop database if exists %s" % dbname) + + if taos.IS_V3: + conn.execute("create database if not exists %s schemaless 1 precision 'ns'" % dbname) + else: + conn.execute("create database if not exists %s update 2 precision 'ns'" % dbname) + + conn.select_db(dbname) + + lines = '''st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000 + st,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin, abc",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000 + stf,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000''' + + res = conn.schemaless_insert_raw(lines, 1, 0) + print("affected rows: ", res) + assert (res == 3) + + lines = '''stf,t1=5i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000''' + res = conn.schemaless_insert_raw(lines, 1, 0) + print("affected rows: ", res) + assert (res == 1) + + result = conn.query("select * from st") + dict2 = result.fetch_all_into_dict() + print(dict2) + print(result.row_count) + + all = result.rows_iter() + for row in all: + print(row) + result.close() + assert (result.row_count == 2) + + # error test + lines = ''',t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000''' + try: + res = conn.schemaless_insert_raw(lines, 1, 0) + print(res) + # assert(False) + except SchemalessError as err: + print('**** error: ', err) + # assert (err.msg == 'Invalid data format') + + result = conn.query("select * from st") + print(result.row_count) + all = result.rows_iter() + for row in all: + print(row) + result.close() + + conn.execute("drop database if exists %s" % dbname) + conn.close() + except InterfaceError as err: + conn.execute("drop database if exists %s" % dbname) + conn.close() + print(err) + except SchemalessError as err: + conn.execute("drop database if exists %s" % dbname) + conn.close() + print(err) + except Exception as err: + conn.execute("drop database if exists %s" % dbname) + conn.close() + print(err) + raise err + + +def test_schemaless_insert_raw_with_req_id(conn: TaosConnection) -> None: + dbname = "pytest_taos_schemaless_insert" + try: + conn.execute("drop database if exists %s" % dbname) + + if taos.IS_V3: + conn.execute("create database if not exists %s schemaless 1 precision 'ns'" % dbname) + else: + conn.execute("create database if not exists %s update 2 precision 'ns'" % dbname) + + conn.select_db(dbname) + + lines = '''st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000 + st,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin, abc",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000 + stf,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000''' + + req_id = utils.gen_req_id() + res = conn.schemaless_insert_raw(lines, 1, 0, req_id=req_id) + print("affected rows: ", res) + assert (res == 3) + + lines = '''stf,t1=5i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000''' + req_id = utils.gen_req_id() + res = conn.schemaless_insert_raw(lines, 1, 0, req_id=req_id) + print("affected rows: ", res) + assert (res == 1) + + result = conn.query("select * from st") + dict2 = result.fetch_all_into_dict() + print(dict2) + print(result.row_count) + + all = result.rows_iter() + for row in all: + print(row) + result.close() + assert (result.row_count == 2) + + # error test + lines = ''',t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000''' + try: + req_id = utils.gen_req_id() + res = conn.schemaless_insert_raw(lines, 1, 0, req_id=req_id) + print(res) + # assert(False) + except SchemalessError as err: + print('**** error: ', err) + # assert (err.msg == 'Invalid data format') + + result = conn.query("select * from st") + print(result.row_count) + all = result.rows_iter() + for row in all: + print(row) + result.close() + + conn.execute("drop database if exists %s" % dbname) + conn.close() + except InterfaceError as err: + conn.execute("drop database if exists %s" % dbname) + conn.close() + print(err) + except Exception as err: + conn.execute("drop database if exists %s" % dbname) + conn.close() + print(err) + raise err + + +def test_schemaless_insert_raw_ttl(conn: TaosConnection) -> None: + dbname = "pytest_taos_schemaless_insert" + try: + conn.execute("drop database if exists %s" % dbname) + + if taos.IS_V3: + conn.execute("create database if not exists %s schemaless 1 precision 'ns'" % dbname) + else: + conn.execute("create database if not exists %s update 2 precision 'ns'" % dbname) + + conn.select_db(dbname) + + lines = '''st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000 + st,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin, abc",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000 + stf,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000''' + + ttl = 1000 + res = conn.schemaless_insert_raw(lines, 1, 0, ttl=ttl) + print("affected rows: ", res) + assert (res == 3) + + lines = '''stf,t1=5i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000''' + ttl = 1000 + res = conn.schemaless_insert_raw(lines, 1, 0, ttl=ttl) + print("affected rows: ", res) + assert (res == 1) + + result = conn.query("select * from st") + dict2 = result.fetch_all_into_dict() + print(dict2) + print(result.row_count) + + all = result.rows_iter() + for row in all: + print(row) + result.close() + assert (result.row_count == 2) + + # error test + lines = ''',t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000''' + try: + ttl = 1000 + res = conn.schemaless_insert_raw(lines, 1, 0, ttl=ttl) + print(res) + # assert(False) + except SchemalessError as err: + print('**** error: ', err) + # assert (err.msg == 'Invalid data format') + + result = conn.query("select * from st") + print(result.row_count) + all = result.rows_iter() + for row in all: + print(row) + result.close() + + conn.execute("drop database if exists %s" % dbname) + conn.close() + except InterfaceError as err: + conn.execute("drop database if exists %s" % dbname) + conn.close() + print(err) + except Exception as err: + conn.execute("drop database if exists %s" % dbname) + conn.close() + print(err) + raise err + + +def test_schemaless_insert_raw_ttl_with_req_id(conn: TaosConnection) -> None: + dbname = "pytest_taos_schemaless_insert" + try: + conn.execute("drop database if exists %s" % dbname) + + if taos.IS_V3: + conn.execute("create database if not exists %s schemaless 1 precision 'ns'" % dbname) + else: + conn.execute("create database if not exists %s update 2 precision 'ns'" % dbname) + + conn.select_db(dbname) + + lines = '''st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000 + st,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin, abc",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000 + stf,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000''' + + ttl = 1000 + req_id = utils.gen_req_id() + res = conn.schemaless_insert_raw(lines, 1, 0, ttl=ttl, req_id=req_id) + print("affected rows: ", res) + assert (res == 3) + + lines = '''stf,t1=5i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000''' + ttl = 1000 + req_id = utils.gen_req_id() + res = conn.schemaless_insert_raw(lines, 1, 0, ttl=ttl, req_id=req_id) + print("affected rows: ", res) + assert (res == 1) + + result = conn.query("select * from st") + dict2 = result.fetch_all_into_dict() + print(dict2) + print(result.row_count) + + all = result.rows_iter() + for row in all: + print(row) + result.close() + assert (result.row_count == 2) + + # error test + lines = ''',t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000''' + try: + ttl = 1000 + req_id = utils.gen_req_id() + res = conn.schemaless_insert_raw(lines, 1, 0, ttl=ttl, req_id=req_id) + print(res) + # assert(False) + except SchemalessError as err: + print('**** error: ', err) + # assert (err.msg == 'Invalid data format') + + result = conn.query("select * from st") + print(result.row_count) + all = result.rows_iter() + for row in all: + print(row) + result.close() + + conn.execute("drop database if exists %s" % dbname) + conn.close() + except InterfaceError as err: + conn.execute("drop database if exists %s" % dbname) + conn.close() + print(err) + except Exception as err: + conn.execute("drop database if exists %s" % dbname) + conn.close() + print(err) + raise err + + +def test_schemaless_insert_with_req_id(conn): + # type: (TaosConnection) -> None + + dbname = "pytest_taos_schemaless_insert" + try: + conn.execute("drop database if exists %s" % dbname) + if taos.IS_V3: + conn.execute("create database if not exists %s schemaless 1 precision 'ns'" % dbname) + else: + conn.execute("create database if not exists %s update 2 precision 'ns'" % dbname) + + conn.select_db(dbname) + + lines = [ + 'st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000', + 'st,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin, abc",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000', + 'stf,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000', + ] + req_id = utils.gen_req_id() + res = conn.schemaless_insert(lines, 1, 0, req_id) + print("affected rows: ", res) + assert (res == 3) + + lines = [ + 'stf,t1=5i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000', + ] + req_id = utils.gen_req_id() + res = conn.schemaless_insert(lines, 1, 0, req_id) + print("affected rows: ", res) + assert (res == 1) + result = conn.query("select * from st") + + dict2 = result.fetch_all_into_dict() + print(dict2) + result.row_count + all = result.rows_iter() + for row in all: + print(row) + result.close() + assert (result.row_count == 2) + + # error test + lines = [ + ',t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000', + ] + try: + req_id = utils.gen_req_id() + res = conn.schemaless_insert(lines, 1, 0, req_id) + print(res) + # assert(False) + except SchemalessError as e: + print(e) + + req_id = utils.gen_req_id() + result = conn.query("select * from st", req_id) + all = result.rows_iter() + for row in all: + print(row) + result.close() + + conn.execute("drop database if exists %s" % dbname) + conn.close() + except InterfaceError as err: + conn.execute("drop database if exists %s" % dbname) + conn.close() + print(err) + except Exception as err: + conn.execute("drop database if exists %s" % dbname) + conn.close() + print(err) + raise err diff --git a/tests/test_taos_cython/test_native_cursor.py b/tests/test_taos_cython/test_native_cursor.py new file mode 100644 index 00000000..3e5a109d --- /dev/null +++ b/tests/test_taos_cython/test_native_cursor.py @@ -0,0 +1,52 @@ +from taos._objects import TaosConnection + +env = { + 'user': "root", + 'password': "taosdata", + 'host': "localhost", + 'port': 6030, +} + + +def make_context(config): + db_protocol = config.get('db_protocol', 'taos') + db_user = config['user'] + db_pass = config['password'] + db_host = config['host'] + db_port = config['port'] + + db_url = f"{db_protocol}://{db_user}:{db_pass}@{db_host}:{db_port}" + print('dsn: ', db_url) + + conn = TaosConnection(**config) + + db_name = config.get('database', 'c_cursor') + + return conn, db_name + + +def test_cursor(): + conn, db_name = make_context(env) + + cursor = conn.cursor() + + cursor.execute(f"DROP DATABASE IF EXISTS {db_name}") + cursor.execute(f"CREATE DATABASE {db_name}") + cursor.execute(f"USE {db_name}") + + cursor.execute("CREATE STABLE weather(ts TIMESTAMP, temperature FLOAT) TAGS (location INT)") + + cursor.execute(f"INSERT INTO t1 USING weather TAGS(1) VALUES (now, 23.5) (now+100a, 23.5)") + + assert cursor.rowcount == 2 + + cursor.execute("SELECT tbname, ts, temperature, location FROM weather") + # rowcount can only get correct value after fetching all data + all_data = cursor.fetchall() + print(f"{'*' * 20} row count: {cursor.rowcount} {'*' * 20}") + for row in all_data: + print(row) + assert cursor.rowcount == 2 + conn.execute("DROP DATABASE IF EXISTS %s" % db_name) + cursor.close() + conn.close() diff --git a/tests/test_taos_cython/test_native_many.py b/tests/test_taos_cython/test_native_many.py new file mode 100644 index 00000000..80204674 --- /dev/null +++ b/tests/test_taos_cython/test_native_many.py @@ -0,0 +1,87 @@ +from taos._objects import TaosConnection + +env = { + 'user': "root", + 'password': "taosdata", + 'host': "localhost", + 'port': 6030, +} + + +def make_context(config): + db_protocol = config.get('db_protocol', 'taos') + db_user = config['user'] + db_pass = config['password'] + db_host = config['host'] + db_port = config['port'] + + db_url = f"{db_protocol}://{db_user}:{db_pass}@{db_host}:{db_port}" + print('dsn: ', db_url) + + conn = TaosConnection(**config) + + db_name = config.get('database', 'c_cursor') + + return conn, db_name + + +def test_cursor(): + conn, db_name = make_context(env) + + cur = conn.cursor() + + cur.execute(f"DROP DATABASE IF EXISTS {db_name}") + cur.execute(f"CREATE DATABASE {db_name}") + cur.execute(f"USE {db_name}") + + cur.execute("create stable stb (ts timestamp, v1 int) tags(t1 int)") + + create_table_data = [ + { + "name": "tb1", + "t1": 1, + }, + { + "name": "tb2", + "t1": 2, + }, + { + "name": "tb3", + "t1": 3, + } + ] + + res = cur.executemany( + "create table {name} using stb tags({t1})", + create_table_data, + ) + print(f"r: {res}") + assert res == 0 + + data = [ + ('2018-10-03 14:38:05.100', 219), + ('2018-10-03 14:38:15.300', 218), + ('2018-10-03 14:38:16.800', 221), + ] + + for table in create_table_data: + table_name = table['name'] + + res = cur.executemany( + f"insert into {table_name} values", + data, + ) + print(f"r: {res}") + assert res == 3 + + res = cur.execute('select * from stb') + print(f'res: {res}') + data = cur.fetchall() + column_names = [meta[0] for meta in cur.description] + print(column_names) + for r in data: + print(r) + + conn.execute("DROP DATABASE IF EXISTS %s" % db_name) + cur.close() + conn.close() diff --git a/tests/test_taos_cython/test_query.py b/tests/test_taos_cython/test_query.py new file mode 100644 index 00000000..255872c8 --- /dev/null +++ b/tests/test_taos_cython/test_query.py @@ -0,0 +1,87 @@ +from datetime import datetime +from taos import utils +from taos.error import InterfaceError +from taos._objects import TaosConnection +import taos +import pytest + +@pytest.fixture +def conn(): + return TaosConnection(host="localhost") + +def test_query(conn): + conn.execute("drop database if exists test_query_py") + conn.execute("create database if not exists test_query_py") + conn.execute("use test_query_py") + conn.execute("create table if not exists tb1 (ts timestamp, v int) tags(jt json)") + n = conn.execute("insert into tn1 using tb1 tags('{\"name\":\"value\"}') values(now, null) (now + 10s, 1)") + n = conn.execute("insert into tn1 using tb1 tags('{\"name\":\"value\"}') values(now, null) (now + 10s, 1)") + print("inserted %d rows" % n) + result = conn.query("select * from tb1") + fields = result.fields + for field in fields: + print("field: %s" % field) + + # test re-consume fields + flag = 0 + for _ in fields: + flag += 1 + assert flag == 3 + + start = datetime.now() + for row in result: + print(row) + None + + for row in result.rows_iter(): + print(row) + + result = conn.query("select * from tb1 limit 1") + results = result.fetch_all_into_dict() + print(results) + + end = datetime.now() + elapsed = end - start + print("elapsed time: ", elapsed) + result.close() + conn.execute("drop database if exists test_query_py") + conn.close() + + +def test_query_with_req_id(conn): + conn.execute("drop database if exists test_query_py") + conn.execute("create database if not exists test_query_py") + conn.execute("use test_query_py") + conn.execute("create table if not exists tb1 (ts timestamp, v int) tags(jt json)") + n = conn.execute("insert into tn1 using tb1 tags('{\"name\":\"value\"}') values(now, null) (now + 10s, 1)") + n = conn.execute("insert into tn1 using tb1 tags('{\"name\":\"value\"}') values(now, null) (now + 10s, 1)") + print("inserted %d rows" % n) + result = conn.query("select * from tb1") + fields = result.fields + for field in fields: + print("field: %s" % field) + + # test re-consume fields + flag = 0 + for _ in fields: + flag += 1 + assert flag == 3 + + start = datetime.now() + for row in result: + print(row) + None + + for row in result.rows_iter(): + print(row) + + result = conn.query("select * from tb1 limit 1", req_id=utils.gen_req_id()) + results = result.fetch_all_into_dict() + print(results) + + end = datetime.now() + elapsed = end - start + print("elapsed time: ", elapsed) + result.close() + conn.execute("drop database if exists test_query_py") + conn.close() diff --git a/tests/test_taos_cython/test_query_a.py b/tests/test_taos_cython/test_query_a.py new file mode 100644 index 00000000..bcf38596 --- /dev/null +++ b/tests/test_taos_cython/test_query_a.py @@ -0,0 +1,63 @@ +import asyncio +import time +from taos._objects import TaosConnection +from taos import utils +import pytest + +pytest_plugins = ('pytest_asyncio',) + + +@pytest.fixture +def conn(): + return TaosConnection(host="localhost") + +def print_all(fut: asyncio.Future): + result = fut.result() + print("result:", result) + print("fields:", result.fields) + # print("data:", result.fetch_all()) # NOT USE fetch_all directly right here + +@pytest.mark.asyncio +async def test_query_a(conn): + conn.execute("drop database if exists pytestquerya") + conn.execute("create database pytestquerya") + conn.execute("use pytestquerya") + cols = ["bool", "tinyint", "smallint", "int", "bigint", "tinyint unsigned", "smallint unsigned", "int unsigned", + "bigint unsigned", "float", "double", "binary(100)", "nchar(100)"] + s = ','.join("c%d %s" % (i, t) for i, t in enumerate(cols)) + print(s) + conn.execute("create table tb1(ts timestamp, %s)" % s) + for _ in range(100): + s = ','.join('null' for c in cols) + conn.execute("insert into tb1 values(now, %s)" % s) + + # async query, async iter row + result = await conn.query_a("select * from tb1") + print("result:", result) + async for row in result: + print("row:", row) + + # async query, add callback, async fetch all data + query_task = asyncio.create_task(conn.query_a("select * from tb1")) + query_task.add_done_callback(print_all) + res = await query_task + data = await res.fetch_all_a() + print(data) + + # async query with req_id, async iter row + result = await conn.query_a("select * from tb1", req_id=utils.gen_req_id()) + print("result:", result) + async for row in result: + print("row:", row) + + + # async query with req_id, add callback, async fetch all data + query_task = asyncio.create_task(conn.query_a("select * from tb1", req_id=utils.gen_req_id())) + query_task.add_done_callback(print_all) + res = await query_task + data = await res.fetch_all_a() + print(data) + + conn.execute("drop database if exists pytestquerya") + conn.close() +