Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Coproc perfomance tests #8

Open
wants to merge 9 commits into
base: new-dev
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions coproc-test/cluster/redpanda-node-0/prepare.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
#!/bin/bash

rm -rf data coredump
mkdir data coredump

sed -i'' "s@STORAGE_PATH@$(pwd)@" redpanda.yaml
42 changes: 42 additions & 0 deletions coproc-test/cluster/redpanda-node-0/redpanda.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
organization: "vectorized"
cluster_id: "cluster0"

redpanda:
developer_mode: true
data_directory: "STORAGE_PATH/data"
node_id: 0
rpc_server:
address: "0.0.0.0"
port: 33145
kafka_api:
address: "0.0.0.0"
port: 9092
admin:
address: "0.0.0.0"
port: 9644
coproc_supervisor_server:
address: "0.0.0.0"
port: 43189

auto_create_topics_enabled: true
default_topic_partitions: 1
default_topic_replications: 3
enable_idempotence: true
enable_transactions: true
enable_coproc: true


rpk:
enable_usage_stats: false
tune_network: false
tune_disk_scheduler: false
tune_disk_nomerges: false
tune_disk_irq: false
tune_fstrim: false
tune_cpu: false
tune_aio_events: false
tune_clocksource: false
tune_swappiness: false
enable_memory_locking: false
tune_coredump: false
coredump_dir: "STORAGE_PATH/coredump"
3 changes: 3 additions & 0 deletions coproc-test/cluster/redpanda-node-0/start.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
#!/bin/bash

../../../vbuild/release/clang/bin/redpanda --redpanda-cfg redpanda.yaml
6 changes: 6 additions & 0 deletions coproc-test/cluster/redpanda-node-1/prepare.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
#!/bin/bash

rm -rf data coredump
mkdir data coredump

sed -i'' "s@STORAGE_PATH@$(pwd)@" redpanda.yaml
48 changes: 48 additions & 0 deletions coproc-test/cluster/redpanda-node-1/redpanda.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
organization: "vectorized"
cluster_id: "cluster0"

redpanda:
developer_mode: true
data_directory: "STORAGE_PATH/data"
node_id: 1
rpc_server:
address: "0.0.0.0"
port: 33146
kafka_api:
address: "0.0.0.0"
port: 9093
admin:
address: "0.0.0.0"
port: 9645
coproc_supervisor_server:
address: "0.0.0.0"
port: 43190


auto_create_topics_enabled: true
default_topic_partitions: 1
default_topic_replications: 3
enable_idempotence: true
enable_transactions: true
enable_coproc: true

seed_servers:
- host:
address: "127.0.0.1"
port: 33145
node_id: 0

rpk:
enable_usage_stats: false
tune_network: false
tune_disk_scheduler: false
tune_disk_nomerges: false
tune_disk_irq: false
tune_fstrim: false
tune_cpu: false
tune_aio_events: false
tune_clocksource: false
tune_swappiness: false
enable_memory_locking: false
tune_coredump: false
coredump_dir: "STORAGE_PATH/coredump"
3 changes: 3 additions & 0 deletions coproc-test/cluster/redpanda-node-1/start.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
#!/bin/bash

../../../vbuild/release/clang/bin/redpanda --redpanda-cfg redpanda.yaml
6 changes: 6 additions & 0 deletions coproc-test/cluster/redpanda-node-2/prepare.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
#!/bin/bash

rm -rf data coredump
mkdir data coredump

sed -i'' "s@STORAGE_PATH@$(pwd)@" redpanda.yaml
47 changes: 47 additions & 0 deletions coproc-test/cluster/redpanda-node-2/redpanda.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
organization: "vectorized"
cluster_id: "cluster0"

redpanda:
developer_mode: true
data_directory: "STORAGE_PATH/data"
node_id: 2
rpc_server:
address: "0.0.0.0"
port: 33147
kafka_api:
address: "0.0.0.0"
port: 9094
admin:
address: "0.0.0.0"
port: 9646
coproc_supervisor_server:
address: "0.0.0.0"
port: 43191

auto_create_topics_enabled: true
default_topic_partitions: 1
default_topic_replications: 3
enable_idempotence: true
enable_transactions: true
enable_coproc: true

seed_servers:
- host:
address: "127.0.0.1"
port: 33145
node_id: 0

rpk:
enable_usage_stats: false
tune_network: false
tune_disk_scheduler: false
tune_disk_nomerges: false
tune_disk_irq: false
tune_fstrim: false
tune_cpu: false
tune_aio_events: false
tune_clocksource: false
tune_swappiness: false
enable_memory_locking: false
tune_coredump: false
coredump_dir: "STORAGE_PATH/coredump"
3 changes: 3 additions & 0 deletions coproc-test/cluster/redpanda-node-2/start.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
#!/bin/bash

../../../vbuild/release/clang/bin/redpanda --redpanda-cfg redpanda.yaml
11 changes: 11 additions & 0 deletions coproc-test/config.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
#!/bin/bash

export COPROC_COUNT=5
export RECORD_SIZE=500
export THROUGHPUT=-1
export PARTITIONS=32
export TIMEOUT=120000

export MESSAGES_COUNT=500000

export BROKERS=""
36 changes: 36 additions & 0 deletions coproc-test/start.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
#!/bin/bash

IPs='127.0.0.1:9092,127.0.0.1:9093,127.0.0.1:9094';

NODE_PATH='../../../build/node/output/modules/rpc/service.js'

cd ./cluster


# Start redpanda cluster
for dir in *
do
cd $dir
sh prepare.sh
cd ..
done

for dir in *
do
cd $dir
rm -rf log.txt
touch log.txt
sh start.sh &> log.txt &
count="0"
while true
do
count=`grep "Successfully started Redpanda" log.txt -c`
if [ "$count" == "1" ]
then
break;
fi
done
rm -rf node_log.txt
node $NODE_PATH redpanda.yaml &> node_log.txt &
cd ..
done
18 changes: 18 additions & 0 deletions coproc-test/stop.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
#!/bin/bash

pids=`ps aux | grep redpanda | awk '{print $2}'`
sudo kill -9 $pids
pids=`ps aux | grep perf-test | awk '{print $2}'`
sudo kill -9 $pids
pids=`ps aux | grep kafka | awk '{print $2}'`
sudo kill -9 $pids

. ./config.sh

RPK_PATH="../vbuild/go/linux/bin/rpk"

for (( i=0; i < ${COPROC_COUNT}; i++ ))
do
$RPK_PATH wasm remove "one_to_one_${i}" --brokers ${BROKERS}
$RPK_PATH topic delete one_to_one_${i} --brokers ${BROKERS}
done
38 changes: 38 additions & 0 deletions coproc-test/template_code.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
const {
SimpleTransform,
PolicyError,
PolicyInjection
} = require("@vectorizedio/wasm-api");
const transform = new SimpleTransform();
/* Topics that fire the transform function */
transform.subscribe([["_input", PolicyInjection.Stored]]);
/* The strategy the transform engine will use when handling errors */
transform.errorHandler(PolicyError.SkipOnFailure);
/* Auxiliar transform function for records */
const uppercase = (record) => {
const newRecord = {
...record,
value: record.value.map((char) => {
if (char >= 97 && char <= 122) {
return char - 32;
} else {
return char;
}
}),
};
return newRecord;
}
/* Transform function */
transform.processRecord((recordBatch) => {
const result = new Map();
const transformedRecord = recordBatch.map(({ header, records }) => {
return {
header,
records: records.map(uppercase),
};
});
result.set("output", transformedRecord);
// processRecord function returns a Promise
return Promise.resolve(result);
});
exports["default"] = transform;
58 changes: 58 additions & 0 deletions coproc-test/test.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
#!/bin/bash

. ./config.sh

RPK_PATH="../vbuild/go/linux/bin/rpk"
GENERATE_DIR_PATH="./test"
SCRIPTS_DIR_PATH="./scripts"

PRODUCER_DIR="./producer_log"
CONSUMER_DIR="./consumer_log"

producer="./kafka_2.12-3.0.0/bin/kafka-producer-perf-test.sh"
consumer="./kafka_2.12-3.0.0/bin/kafka-consumer-perf-test.sh"

rm -rf $SCRIPTS_DIR_PATH
mkdir $SCRIPTS_DIR_PATH

rm -rf $GENERATE_DIR_PATH
mkdir $GENERATE_DIR_PATH
$RPK_PATH wasm generate $GENERATE_DIR_PATH
cp template_code.js $GENERATE_DIR_PATH/src/main.js

cd $GENERATE_DIR_PATH
npm install
npm run build
cd ..

cp $GENERATE_DIR_PATH/dist/main.js $SCRIPTS_DIR_PATH/template.js

for (( i=0; i < ${COPROC_COUNT}; i++ ))
do
template=`cat ${SCRIPTS_DIR_PATH}/template.js`

new_code=${template//_input/one_to_one_${i}}

echo "$new_code" > $SCRIPTS_DIR_PATH/script_${i}.js

# Deploy
$RPK_PATH topic create one_to_one_${i} -p ${PARTITIONS} -r 3 --brokers ${BROKERS}
$RPK_PATH wasm deploy --name "one_to_one_${i}" $SCRIPTS_DIR_PATH/script_${i}.js --brokers ${BROKERS}
done

rm -rf ${PRODUCER_DIR}
mkdir ${PRODUCER_DIR}

for (( i=0; i < ${COPROC_COUNT}; i++ ))
do
$producer --topic "one_to_one_${i}" --record-size ${RECORD_SIZE} --producer-props asks=-1 bootstrap.servers=${BROKERS} --throughput ${THROUGHPUT} --num-records $MESSAGES_COUNT &> ${PRODUCER_DIR}/producer_${i}.txt &
done

rm -rf ${CONSUMER_DIR}
mkdir ${CONSUMER_DIR}
for (( i=0; i < ${COPROC_COUNT}; i++ ))
do
$consumer --topic "one_to_one_${i}."'$output$' --bootstrap-server ${BROKERS} --messages ${MESSAGES_COUNT} &> ${CONSUMER_DIR}/consumer_${i}.txt --timeout ${TIMEOUT} &
done