You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 

25 KiB

None <html> <head> </head>

Benchmark: ClickHouse Vs. InfluxDB Vs. Postgresql Vs. Parquet


How to use:

  • Rename the file "properties-model.ini" to "properties.ini"
  • Fill with your own credentials

The proposal of this work is to compare the speed in read/writing a midle level of data ( a dataset with 9 columns and 50.000 lines) to four diferent databases:

  • ClickHouse
  • InfluxDB
  • Postgresql
  • Parquet (in a S3 Minio Storage)
    ToDo:
  • DuckDB with Polars
  • MongoDB
  • Kdb+

Deve-se relevar: é uma "cold-storage" ou "frezze-storage"?
influxdb: alta leitura e possui a vantagem da indexaçõa para vizualização de dados em gráficos.

notas:

  • comparar tamanho do csv com parquet

Imports

In [68]:
import configparser
from datetime import datetime

import duckdb
import influxdb_client
import pandas as pd

# import pymongo
from clickhouse_driver import Client
from dotenv import load_dotenv
from minio import Minio
from pymongo import MongoClient
from pytz import timezone
from sqlalchemy import create_engine

load_dotenv()
Out[68]:
False
In [ ]:
# Variables
dbname = "EURUSDtest"
In [ ]:
arq = configparser.RawConfigParser()
arq.read("properties.ini")
ClickHouseUser = arq.get("CLICKHOUSE", "user")
ClickHouseKey = arq.get("CLICKHOUSE", "key")
ClickHouseUrl = arq.get("CLICKHOUSE", "url")

InfluxDBUser = arq.get("INFLUXDB", "user")
InfluxDBKey = arq.get("INFLUXDB", "key")
InfluxDBUrl = arq.get("INFLUXDB", "url")
InfluxDBBucket = arq.get("INFLUXDB", "bucket")

PostgresqlUser = arq.get("POSTGRESQL", "user")
PostgresqlKey = arq.get("POSTGRESQL", "key")
PostgresqlUrl = arq.get("POSTGRESQL", "url")
PostgresqlDB = arq.get("POSTGRESQL", "database")

S3MinioUser = arq.get("S3MINIO", "user")
S3MinioKey = arq.get("S3MINIO", "key")
S3MinioUrl = arq.get("S3MINIO", "url")
S3MinioRegion = arq.get("S3MINIO", "region")

MongoUser = arq.get("MONGODB", "user")
MongoKey = arq.get("MONGODB", "key")
MongoUrl = arq.get("MONGODB", "url")
In [ ]:
%%time
# Load Dataset
df = pd.read_csv("out.csv", index_col=0)
In [ ]:
# df.head()
In [ ]:
df["from"] = pd.to_datetime(df["from"], unit="s")
df["to"] = pd.to_datetime(df["to"], unit="s")
# Optional use when not transoformed yet
# Transform Datetime

Funçoes

-> Class

In [ ]:
def timestamp2dataHora(x, timezone_="America/Sao_Paulo"):
    d = datetime.fromtimestamp(x, tz=timezone(timezone_))
    return d

ClickHouse

In [ ]:
# !! O client oficial usa um driver http, nesse exemplo vamos usar a biblioteca
# de terceirtos clickhouse_driver recomendada, por sua vez que usa tcp.
client = Client(
    host=ClickHouseUrl,
    user=ClickHouseUser,
    password=ClickHouseKey,
    settings={"use_numpy": True},
)
In [ ]:
# Create Tables in ClickHouse
# !! ALTERAR TIPOS !!
# ENGINE: 'Memory' desaparece quando server é reiniciado
client.execute(
    "CREATE TABLE IF NOT EXISTS {} (id UInt32,"
    "from DateTime, at UInt64, to DateTime, open Float64,"
    "close Float64, min Float64, max  Float64, volume UInt32)"
    "ENGINE MergeTree ORDER BY to".format(dbname)
)
In [ ]:
%%time
# Write dataframe to db
client.insert_dataframe("INSERT INTO {} VALUES".format(dbname), df)
In [ ]:
%%time
client.query_dataframe("SELECT * FROM default.{}".format(dbname))  # LIMIT 10000
In [ ]:
# %%time
# df = pd.DataFrame(client.query_dataframe("SELECT * FROM default.{}".format(dbname)))

InfluxDB

In [ ]:
client = influxdb_client.InfluxDBClient(
    url=InfluxDBUrl, token=InfluxDBKey, org=InfluxDBUser
)
In [ ]:
# Read data from CSV without index and parse 'TimeStamp' as date.
df = pd.read_csv("out.csv", sep=",", index_col=False, parse_dates=["from"])
# Set 'TimeStamp' field as index of dataframe # test another indexs
df.set_index("from", inplace=True)
In [ ]:
df.head()
In [ ]:
%%time
# gravando... demorou... mas deu certo
with client.write_api() as writer:
    writer.write(
        bucket=InfluxDBBucket,
        record=df,
        data_frame_measurement_name="id",
        data_frame_tag_columns=["volume"],
    )
In [ ]:
# data
#   |> pivot(
#     rowKey:["_time"],
#     columnKey: ["_field"],
#     valueColumn: "_value"
#   )
In [ ]:
# Read

Postgresql

In [ ]:
# Connect / Create Tables
engine = create_engine(
    "postgresql+psycopg2://{}:{}@{}:5432/{}".format(
        PostgresqlUser, PostgresqlKey, PostgresqlUrl, PostgresqlDB
    )
)
In [ ]:
# Drop old table and create new empty table
df.head(0).to_sql("comparedbs", engine, if_exists="replace", index=False)
In [ ]:
%%time
# Write
conn = engine.raw_connection()
cur = conn.cursor()
output = io.StringIO()
df.to_csv(output, sep="\t", header=False, index=False)
output.seek(0)
contents = output.getvalue()

cur.copy_from(output, "comparedbs")  # , null="")  # null values become ''
conn.commit()
cur.close()
conn.close()
In [ ]:
# Read

S3 Parquet

In [ ]:
# fazer sem funçao para ver se melhora
# verifique se esta no ssd os arquivos da pasta git
def main():
    client = Minio(
        S3MinioUrl,
        secure=False,
        region=S3MinioRegion,
        access_key="MatMPA7NyHltz7DQ",
        secret_key="SO1IG5iBPSjNPZanYUaHCLcoSbjphLCP",
    )

    # Make bucket if not exist.
    found = client.bucket_exists("data")
    if not found:
        client.make_bucket("data")
    else:
        print("Bucket 'data' already exists")

    # Upload
    client.fput_object(
        "data",
        "data.parquet",
        "data/data.parquet",
    )
    # print(
    #     "'data/data.parquet' is successfully uploaded as "
    #     "object 'data.parquet' to bucket 'data'."
    # )
In [ ]:
%%time
df.to_parquet("data/data.parquet")
if __name__ == "__main__":
    try:
        main()
    except S3Error as exc:
        print("error occurred.", exc)
In [ ]:
pq = pd.read_parquet("data/data.parquet", engine="pyarrow")
pq.head()

MongoDB

In [ ]:
# Load csv dataset
data = pd.read_csv("out.csv")
In [ ]:
# Connect to MongoDB
client = MongoClient(
    # "mongodb://192.168.1.133:27017"
    "mongodb://{}:{}@{}/EURUSDtest?retryWrites=true&w=majority".format(
        MongoUser, MongoKey, MongoUrl
    ),
    authSource="admin",
)
In [ ]:
db = client["EUROUSDtest"]
collection = db["finance"]
# data.reset_index(inplace=True)
data_dict = data.to_dict("records")
In [ ]:
%%time
# Insert collection
collection.insert_many(data_dict)
In [ ]:
# read

DuckDB

In [ ]:
cursor = duckdb.connect()
print(cursor.execute("SELECT 42").fetchall())
In [ ]:
%%time
conn = duckdb.connect()
data = pd.read_csv("out.csv")
conn.register("EURUSDtest", data)
In [ ]:
display(conn.execute("SHOW TABLES").df())
In [ ]:
%%time
df = conn.execute("SELECT * FROM EURUSDtest").df()
df

Kdb+

In [69]:
import numpy as np

np.bool = np.bool_
from qpython import qconnection
In [70]:
# read csv
data = pd.read_csv("out.csv")
In [71]:
# open connection
q = qconnection.QConnection(host="localhost", port=5001)
q.open()
In [75]:
%%time
# send df to kd+ in memory bank
q.sendSync("{t::x}", data)
CPU times: user 925 ms, sys: 40 ms, total: 965 ms
Wall time: 1.43 s
In [76]:
# write to on disk table
q.sendSync("`:/home/sandman/q/tab1 set t")
Out[76]:
b':/home/sandman/q/tab1'
In [77]:
%%time
# read from on disk table
df2 = q.sendSync("tab2: get `:/home/sandman/q/tab1")
CPU times: user 1.94 ms, sys: 1 µs, total: 1.94 ms
Wall time: 426 ms
In [78]:
# print(df2)
In [79]:
%%time
# load to variable df2
df2 = q.sendSync("tab2")
CPU times: user 1.08 s, sys: 116 ms, total: 1.2 s
Wall time: 1.27 s
In [80]:
# df2(type)
In [82]:
%%time
# converto to dataframe
df = pd.DataFrame(q("t"))  # , pandas=True))
df.head()
CPU times: user 1.25 s, sys: 132 ms, total: 1.39 s
Wall time: 1.46 s
Out[82]:
Unnamed: 0 id from at to open close min max volume
0 0 7730801 b'2023-01-02 15:58:45' 1672675140000000000 b'2023-01-02 15:59:00' 1.065995 1.066035 1.065930 1.066070 57
1 1 7730802 b'2023-01-02 15:59:00' 1672675155000000000 b'2023-01-02 15:59:15' 1.066055 1.066085 1.066005 1.066115 52
2 2 7730803 b'2023-01-02 15:59:15' 1672675170000000000 b'2023-01-02 15:59:30' 1.066080 1.066025 1.066025 1.066110 57
3 3 7730804 b'2023-01-02 15:59:30' 1672675185000000000 b'2023-01-02 15:59:45' 1.065980 1.065985 1.065885 1.066045 64
4 4 7730805 b'2023-01-02 15:59:45' 1672675200000000000 b'2023-01-02 16:00:00' 1.065975 1.066055 1.065830 1.066055 50
In [83]:
%%time
# select
df3 = q.sendSync("select from t")
CPU times: user 1.11 s, sys: 116 ms, total: 1.23 s
Wall time: 1.3 s
In [84]:
q.close()
In [ ]:

</html>