You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 

58 KiB

None <html> <head> </head>

Benchmark: ClickHouse Vs. InfluxDB Vs. Postgresql Vs. Parquet


How to use:

  • Rename the file "properties-model.ini" to "properties.ini"
  • Fill with your own credentials

The proposal of this work is to compare the speed in read/writing a midle level of data ( a dataset with 9 columns and 50.000 lines) to four diferent databases:

  • ClickHouse
  • InfluxDB
  • Postgresql
  • Parquet (in a S3 Minio Storage)
    ToDo:
  • DuckDB with Polars
  • MongoDB
  • Kdb+

Deve-se relevar: é uma "cold-storage" ou "frezze-storage"?
influxdb: alta leitura e possui a vantagem da indexaçõa para vizualização de dados em gráficos.

notas:

  • comparar tamanho do csv com parquet

Imports

In [1]:
import configparser
import io
import timeit
from datetime import datetime

import duckdb
import influxdb_client
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from clickhouse_driver import Client
from dotenv import load_dotenv
from minio import Minio
from pymongo import MongoClient
from pytz import timezone
from sqlalchemy import create_engine

load_dotenv()
Out[1]:
False
In [2]:
# Variables
dbname = "EURUSDtest"
In [3]:
arq = configparser.RawConfigParser()
arq.read("properties.ini")
ClickHouseUser = arq.get("CLICKHOUSE", "user")
ClickHouseKey = arq.get("CLICKHOUSE", "key")
ClickHouseUrl = arq.get("CLICKHOUSE", "url")

InfluxDBUser = arq.get("INFLUXDB", "user")
InfluxDBKey = arq.get("INFLUXDB", "key")
InfluxDBUrl = arq.get("INFLUXDB", "url")
InfluxDBBucket = arq.get("INFLUXDB", "bucket")

PostgresqlUser = arq.get("POSTGRESQL", "user")
PostgresqlKey = arq.get("POSTGRESQL", "key")
PostgresqlUrl = arq.get("POSTGRESQL", "url")
PostgresqlDB = arq.get("POSTGRESQL", "database")

S3MinioUser = arq.get("S3MINIO", "user")
S3MinioKey = arq.get("S3MINIO", "key")
S3MinioUrl = arq.get("S3MINIO", "url")
S3MinioRegion = arq.get("S3MINIO", "region")

MongoUser = arq.get("MONGODB", "user")
MongoKey = arq.get("MONGODB", "key")
MongoUrl = arq.get("MONGODB", "url")
In [5]:
# %%time
# Load Dataset
df = pd.read_csv("out.csv", index_col=0)
df.tail()
Out[5]:
id from at to open close min max volume
999995 7984748 2023-03-03 18:13:30 1677867225000000000 2023-03-03 18:13:45 1.062695 1.062635 1.062630 1.062700 64
999996 7984749 2023-03-03 18:13:45 1677867240000000000 2023-03-03 18:14:00 1.062645 1.062650 1.062625 1.062650 43
999997 7984750 2023-03-03 18:14:00 1677867255000000000 2023-03-03 18:14:15 1.062640 1.062625 1.062620 1.062665 47
999998 7984751 2023-03-03 18:14:15 1677867270000000000 2023-03-03 18:14:30 1.062625 1.062535 1.062535 1.062645 43
999999 7984752 2023-03-03 18:14:30 1677867285000000000 2023-03-03 18:14:45 1.062535 1.062520 1.062520 1.062580 59
In [ ]:
df["from"] = pd.to_datetime(df["from"], unit="s")
df["to"] = pd.to_datetime(df["to"], unit="s")
# Optional use when not transoformed yet
# Transform Datetime

Funçoes

-> Class

In [6]:
def timestamp2dataHora(x, timezone_="America/Sao_Paulo"):
    d = datetime.fromtimestamp(x, tz=timezone(timezone_))
    return d

ClickHouse

In [20]:
# !! driver tcp.
def cHouseConnect():
    client = Client(
        host=ClickHouseUrl,
        user=ClickHouseUser,
        password=ClickHouseKey,
        settings={"use_numpy": True},
    )
    return client


# Create Tables in ClickHouse
# !! ALTERAR TIPOS !!
# ENGINE: 'Memory' desaparece quando server é reiniciado
def cHouseCreateDb(databasename):
    client = cHouseConnect()
    client.execute(
        "CREATE TABLE IF NOT EXISTS {} (id UInt32,"
        "from DateTime, at UInt64, to DateTime, open Float64,"
        "close Float64, min Float64, max  Float64, volume UInt32)"
        "ENGINE MergeTree ORDER BY to".format(databasename)
    )
    client.disconnect()
    return "Database created"


# Write dataframe to db
def cHouseInsertDf(dbName, dataframe):
    client = cHouseConnect()
    client.insert_dataframe("INSERT INTO {} VALUES".format(dbName), dataframe)
    client.disconnect()
    return " dataframe {} inserted in clickhouse database".format(dataframe)


def cHouseQueryDf(databaseName):
    client = cHouseConnect()
    dfQuery = client.query_dataframe(
        "SELECT * FROM default.{}".format(databaseName)
    )  # LIMIT 10000
    client.disconnect()
    return dfQuery


cHouseCreateDb(dbname)
Out[20]:
'Database created'
In [21]:
# Insert to db and benchmark time
start = timeit.default_timer()
cHouseInsertDf(dbname, df)
stop = timeit.default_timer()
cHouse_write_execution_time = stop - start
In [22]:
# read from db and benchmark time
start = timeit.default_timer()
dfCh = cHouseQueryDf(dbname)
stop = timeit.default_timer()
cHouse_read_execution_time = stop - start
In [23]:
dfCh.tail()
Out[23]:
id from at to open close min max volume
2999995 8230798 2023-05-03 10:35:00 1683110115000000000 2023-05-03 10:35:15 1.103340 1.10330 1.103275 1.103340 61
2999996 8230799 2023-05-03 10:35:15 1683110130000000000 2023-05-03 10:35:30 1.103300 1.10341 1.103300 1.103410 44
2999997 8230799 2023-05-03 10:35:15 1683110130000000000 2023-05-03 10:35:30 1.103300 1.10341 1.103300 1.103410 44
2999998 8230800 2023-05-03 10:35:30 1683110145000000000 2023-05-03 10:35:45 1.103415 1.10351 1.103385 1.103515 51
2999999 8230800 2023-05-03 10:35:30 1683110145000000000 2023-05-03 10:35:45 1.103415 1.10351 1.103385 1.103515 51
In [26]:
print(cHouse_read_execution_time)
10.64297915700081
In [27]:
print(cHouse_write_execution_time)
6.190685558998666
In [28]:
%%time
dfCh = cHouseQueryDf(dbname)
CPU times: user 2.21 s, sys: 383 ms, total: 2.6 s
Wall time: 10.7 s

InfluxDB

In [ ]:
client = influxdb_client.InfluxDBClient(
    url=InfluxDBUrl, token=InfluxDBKey, org=InfluxDBUser
)
In [ ]:
# Read data from CSV without index and parse 'TimeStamp' as date.
df = pd.read_csv("out.csv", sep=",", index_col=False, parse_dates=["from"])
# Set 'TimeStamp' field as index of dataframe # test another indexs
df.set_index("from", inplace=True)
In [ ]:
df.head()
In [ ]:
%%time
# gravando... demorou... mas deu certo
with client.write_api() as writer:
    writer.write(
        bucket=InfluxDBBucket,
        record=df,
        data_frame_measurement_name="id",
        data_frame_tag_columns=["volume"],
    )
In [ ]:
# data
#   |> pivot(
#     rowKey:["_time"],
#     columnKey: ["_field"],
#     valueColumn: "_value"
#   )
In [ ]:
# Read

Postgresql

In [24]:
# Connect / Create Tables
def psqlConnect():
    engine = create_engine(
        "postgresql+psycopg2://{}:{}@{}:5432/{}".format(
            PostgresqlUser, PostgresqlKey, PostgresqlUrl, PostgresqlDB
        )
    )
    return engine


psqlConnect()
# testar função
Out[24]:
Engine(postgresql+psycopg2://postgres:***@192.168.1.133:5432/postgres)
In [25]:
# Drop old table and create new empty table
def psqlCreateTables(databaseName):
    engine = psqlConnect()
    df.head(0).to_sql(databaseName, engine, if_exists="replace", index=False)
    # Write
    conn = engine.raw_connection()
    cur = conn.cursor()
    output = io.StringIO()
    df.to_csv(output, sep="\t", header=False, index=False)
    output.seek(0)
    contents = output.getvalue()

    cur.copy_from(output, "comparedbs")  # , null="")  # null values become ''
    conn.commit()
    cur.close()
    conn.close()
    # disconnect()
    return 0


# funcao read sql
def psqlReadTables():
    engine = psqlConnect()
    df = pd.read_sql_query('select * from "comparedbs"', con=engine)
    return df


# testar função
In [26]:
# Insert to db and benchmark time
start = timeit.default_timer()
psqlCreateTables(dbname)
stop = timeit.default_timer()
psql_write_execution_time = stop - start
In [27]:
start = timeit.default_timer()
psqlReadTables()
stop = timeit.default_timer()
psql_read_execution_time = stop - start
In [29]:
print(psql_read_execution_time)
126.40752380799677
In [ ]:
# df.head()

S3 Parquet

In [15]:
# fazer sem funçao para ver se melhora
# verifique se esta no ssd os arquivos da pasta git
def s3Connect():
    client = Minio(
        S3MinioUrl,
        secure=False,
        region=S3MinioRegion,
        access_key=S3MinioUser,
        secret_key=S3MinioKey,
    )
    return client


def s3CreateBucket(bucketName="data"):
    client = s3Connect()
    found = client.bucket_exists(bucketName)
    if not found:
        return client.make_bucket(bucketName)
    else:
        return "Bucket '{}' already exists".format(bucketName)


def s3uploadCsv():
    client = s3Connect()
    client.fput_object(
        "data",
        "data.parquet",
        "data/data.parquet",
    )
    return (
        "'data/data.parquet' is successfully uploaded as "
        "object 'data.parquet' to bucket 'data'."
    )
In [13]:
# Insert to db and benchmark time
df.to_parquet("data/data.parquet")
s3CreateBucket()
start = timeit.default_timer()
s3uploadCsv()
stop = timeit.default_timer()
s3_write_execution_time = stop - start
In [ ]:
# falta read (parquet to df)
In [14]:
print(s3_write_execution_time)
3.8389689489995362
In [16]:
start = timeit.default_timer()
pq = pd.read_parquet("data/data.parquet", engine="pyarrow")
stop = timeit.default_timer()
s3_read_execution_time = stop - start
In [17]:
pq.head()
Out[17]:
id from at to open close min max volume
0 7730801 2023-01-02 15:58:45 1672675140000000000 2023-01-02 15:59:00 1.065995 1.066035 1.065930 1.066070 57
1 7730802 2023-01-02 15:59:00 1672675155000000000 2023-01-02 15:59:15 1.066055 1.066085 1.066005 1.066115 52
2 7730803 2023-01-02 15:59:15 1672675170000000000 2023-01-02 15:59:30 1.066080 1.066025 1.066025 1.066110 57
3 7730804 2023-01-02 15:59:30 1672675185000000000 2023-01-02 15:59:45 1.065980 1.065985 1.065885 1.066045 64
4 7730805 2023-01-02 15:59:45 1672675200000000000 2023-01-02 16:00:00 1.065975 1.066055 1.065830 1.066055 50
In [18]:
print(s3_read_execution_time)
0.5066086639999412

MongoDB

In [ ]:
# Load csv dataset
data = pd.read_csv("out.csv")
In [ ]:
# Connect to MongoDB
client = MongoClient(
    # "mongodb://192.168.1.133:27017"
    "mongodb://{}:{}@{}/EURUSDtest?retryWrites=true&w=majority".format(
        MongoUser, MongoKey, MongoUrl
    ),
    authSource="admin",
)
In [ ]:
db = client["EUROUSDtest"]
collection = db["finance"]
# data.reset_index(inplace=True)
data_dict = data.to_dict("records")
In [ ]:
%%time
# Insert collection
collection.insert_many(data_dict)
In [ ]:
# read

DuckDB

In [ ]:
cursor = duckdb.connect()
print(cursor.execute("SELECT 42").fetchall())
In [ ]:
%%time
conn = duckdb.connect()
data = pd.read_csv("out.csv")
conn.register("EURUSDtest", data)
In [ ]:
display(conn.execute("SHOW TABLES").df())
In [ ]:
%%time
df = conn.execute("SELECT * FROM EURUSDtest").df()
df

Kdb+

In [ ]:
np.bool = np.bool_
from qpython import qconnection
In [ ]:
# read csv
data = pd.read_csv("out.csv")
In [ ]:
# open connection
q = qconnection.QConnection(host="localhost", port=5001)
q.open()
In [ ]:
%%time
# send df to kd+ in memory bank
q.sendSync("{t::x}", data)
In [ ]:
# write to on disk table
q.sendSync("`:/home/sandman/q/tab1 set t")
In [ ]:
%%time
# read from on disk table
df2 = q.sendSync("tab2: get `:/home/sandman/q/tab1")
In [ ]:
# print(df2)
In [ ]:
%%time
# load to variable df2
df2 = q.sendSync("tab2")
In [ ]:
# df2(type)
In [ ]:
%%time
# converto to dataframe
df = pd.DataFrame(q("t"))  # , pandas=True))
df.head()
In [ ]:
%%time
# select
df3 = q.sendSync("select from t")
In [ ]:
q.close()

Graph

In [30]:
x = np.arange(3)  # change here
width = 0.40
y1 = [
    cHouse_read_execution_time,
    psql_read_execution_time,
    s3_read_execution_time,
]  # change here
y2 = [
    cHouse_write_execution_time,
    psql_write_execution_time,
    s3_write_execution_time,
]  # change here
plt.bar(x - 0.2, y1, width)
plt.bar(x + 0.2, y2, width)
plt.xticks(x, ["Click House", "Postgresql", "S3 Parquet"])
plt.xlabel("Databases")
plt.ylabel("Seconds")
plt.legend(["Read", "Write"])  # ver
plt.show()
In [ ]:

</html>