Compare commits

..

10 Commits

Author SHA1 Message Date
722e00b1e9
-fix typos 2023-07-26 17:53:24 +02:00
d208041240
[age-withdraw] reveal works, tested.
Tests for age-withdraw and the reveal now work for both, RSA and CS.
2023-07-26 17:36:13 +02:00
0f17931b10
Merge branch 'master' of ssh://git.taler.net/exchange 2023-07-26 04:05:39 +02:00
1ce9312d02
[age-withdraw] WIP - database transaction during reveal works now
The test for age-restriction still fail, but the database transactions,
including passing arrays in/out the PQ-helpers works.
2023-07-26 04:04:02 +02:00
ac462b2753
[pq] Added support for some Taler-specific array types
Added array support for DB-query and -result for
- denomination signatures
- blinded envelope hases
- denomination hashes
2023-07-26 04:02:09 +02:00
Christian Grothoff
eae030fc4f
-misc fixes: 2023-07-25 22:17:59 +02:00
Christian Grothoff
28fcff7c1e
-move to postgres 2023-07-25 17:52:10 +02:00
Christian Grothoff
153b685bac
actually take kyc_off flag into consideration 2023-07-25 17:34:39 +02:00
Christian Grothoff
4c6ab83925
-more script fixes 2023-07-25 14:34:03 +02:00
Christian Grothoff
987878469d
-more script fixes 2023-07-25 14:10:48 +02:00
23 changed files with 2172 additions and 449 deletions

View File

@ -207,12 +207,12 @@ taler_auditor_sync_CPPFLAGS = \
check_SCRIPTS = \
test-auditor.sh \
test-kyc.sh \
test-revocation.sh \
test-sync.sh
.NOTPARALLEL:
# revocation test disabled for now: need working wallet first!
TESTS = $(check_SCRIPTS)
# TESTS = $(check_SCRIPTS)
EXTRA_DIST = \
taler-auditor.in \
@ -223,5 +223,6 @@ EXTRA_DIST = \
test-sync-out.conf \
generate-auditor-basedb.sh \
generate-auditor-basedb.conf \
generate-kyc-basedb.conf \
generate-revoke-basedb.sh \
$(check_SCRIPTS)

View File

@ -12,16 +12,41 @@
#
set -eu
# Where do we write the result?
BASEDB="$1"
. setup.sh
CONF="generate-auditor-basedb.conf"
# Parse command-line options
while getopts ':c:d:h' OPTION; do
case "$OPTION" in
c)
CONF="$OPTARG"
;;
d)
BASEDB="$OPTARG"
;;
h)
echo 'Supported options:'
# shellcheck disable=SC2016
echo ' -c $CONF -- set configuration'
# shellcheck disable=SC2016
echo ' -d $DB -- set database name'
;;
?)
exit_fail "Unrecognized command line option"
;;
esac
done
# Where do we write the result?
if [ ! -v BASEDB ]
then
exit_fail "-d option required"
fi
echo -n "Testing for curl ..."
curl --help >/dev/null </dev/null || exit_skip " MISSING"
echo " FOUND"
CONF="generate-auditor-basedb.conf"
# reset database
echo -n "Reset 'auditor-basedb' database at $PGHOST ..."

View File

@ -0,0 +1,4 @@
# This file is in the public domain.
@INLINE@ generate-auditor-basedb.conf
# FIXME: add options for KYC here!

View File

@ -1,4 +1,4 @@
#!/bin/sh
#!/bin/bash
# This file is in the public domain
# Script to be inlined into the main test scripts. Defines function 'setup()'
@ -70,3 +70,78 @@ function get_bankaccount_transactions() {
export LIBEUFIN_SANDBOX_URL="http://localhost:18082"
libeufin-cli sandbox demobank list-transactions --bank-account $1
}
# Stop libeufin sandbox and nexus (if running)
function stop_libeufin()
{
echo -n "Stopping libeufin... "
if [ -f "${MY_TMP_DIR:-/}/libeufin-sandbox.pid" ]
then
PID=$(cat "${MY_TMP_DIR}/libeufin-sandbox.pid" 2> /dev/null)
echo "Killing libeufin sandbox $PID"
rm "${MY_TMP_DIR}/libeufin-sandbox.pid"
kill "$PID" 2> /dev/null || true
wait "$PID" || true
fi
if [ -f "${MY_TMP_DIR:-/}/libeufin-nexus.pid" ]
then
PID=$(cat "${MY_TMP_DIR}/libeufin-nexus.pid" 2> /dev/null)
echo "Killing libeufin nexus $PID"
rm "${MY_TMP_DIR}/libeufin-nexus.pid"
kill "$PID" 2> /dev/null || true
wait "$PID" || true
fi
echo "DONE"
}
function launch_libeufin () {
# shellcheck disable=SC2016
export LIBEUFIN_SANDBOX_DB_CONNECTION='jdbc:postgresql://localhost/'"${DB}"'?socketFactory=org.newsclub.net.unix.AFUNIXSocketFactory$FactoryArg&socketFactoryArg='"$SOCKETDIR"'/.s.PGSQL.5432'
libeufin-sandbox serve \
--no-auth \
--port 18082 \
> "${MY_TMP_DIR}/libeufin-sandbox-stdout.log" \
2> "${MY_TMP_DIR}/libeufin-sandbox-stderr.log" &
echo $! > "${MY_TMP_DIR}/libeufin-sandbox.pid"
# shellcheck disable=SC2016
export LIBEUFIN_NEXUS_DB_CONNECTION='jdbc:postgresql://localhost/'"${DB}"'?socketFactory=org.newsclub.net.unix.AFUNIXSocketFactory$FactoryArg&socketFactoryArg='"$SOCKETDIR"'/.s.PGSQL.5432'
libeufin-nexus serve \
--port 8082 \
2> "${MY_TMP_DIR}/libeufin-nexus-stderr.log" \
> "${MY_TMP_DIR}/libeufin-nexus-stdout.log" &
echo $! > "${MY_TMP_DIR}/libeufin-nexus.pid"
}
# Downloads new transactions from the bank.
function nexus_fetch_transactions () {
export LIBEUFIN_NEXUS_USERNAME="exchange"
export LIBEUFIN_NEXUS_PASSWORD="x"
export LIBEUFIN_NEXUS_URL="http://localhost:8082/"
libeufin-cli accounts \
fetch-transactions \
--range-type since-last \
--level report \
exchange-nexus > /dev/null
unset LIBEUFIN_NEXUS_USERNAME
unset LIBEUFIN_NEXUS_PASSWORD
unset LIBEUFIN_NEXUS_URL
}
# Instruct Nexus to all the prepared payments (= those
# POSTed to /transfer by the exchange).
function nexus_submit_to_sandbox () {
export LIBEUFIN_NEXUS_USERNAME="exchange"
export LIBEUFIN_NEXUS_PASSWORD="x"
export LIBEUFIN_NEXUS_URL="http://localhost:8082/"
libeufin-cli accounts \
submit-payments\
exchange-nexus
unset LIBEUFIN_NEXUS_USERNAME
unset LIBEUFIN_NEXUS_PASSWORD
unset LIBEUFIN_NEXUS_URL
}

View File

@ -55,28 +55,6 @@ LIBEUFIN_SETTLE_TIME=1
. setup.sh
# Stop libeufin sandbox and nexus (if running)
function stop_libeufin()
{
echo -n "Stopping libeufin... "
if test -f ${MY_TMP_DIR:-/}/libeufin-sandbox.pid
then
PID=$(cat ${MY_TMP_DIR}/libeufin-sandbox.pid 2> /dev/null)
echo "Killing libeufin sandbox $PID"
rm "${MY_TMP_DIR}/libeufin-sandbox.pid"
kill "$PID" 2> /dev/null || true
wait "$PID" || true
fi
if test -f ${MY_TMP_DIR:-/}/libeufin-nexus.pid
then
PID=$(cat ${MY_TMP_DIR}/libeufin-nexus.pid 2> /dev/null)
echo "Killing libeufin nexus $PID"
rm "${MY_TMP_DIR}/libeufin-nexus.pid"
kill "$PID" 2> /dev/null || true
wait "$PID" || true
fi
echo "DONE"
}
# Cleanup exchange and libeufin between runs.
function cleanup()
@ -118,52 +96,6 @@ function exit_cleanup()
# Install cleanup handler (except for kill -9)
trap exit_cleanup EXIT
function launch_libeufin () {
# shellcheck disable=SC2016
export LIBEUFIN_SANDBOX_DB_CONNECTION='jdbc:postgresql://localhost/'"${DB}"'?socketFactory=org.newsclub.net.unix.AFUNIXSocketFactory$FactoryArg&socketFactoryArg='"$SOCKETDIR"'/.s.PGSQL.5432'
export MY_TMP_DIR
libeufin-sandbox serve --no-auth --port 18082 \
> "${MY_TMP_DIR}/libeufin-sandbox-stdout.log" \
2> "${MY_TMP_DIR}/libeufin-sandbox-stderr.log" &
echo $! > "${MY_TMP_DIR}/libeufin-sandbox.pid"
# shellcheck disable=SC2016
export LIBEUFIN_NEXUS_DB_CONNECTION='jdbc:postgresql://localhost/'"${DB}"'?socketFactory=org.newsclub.net.unix.AFUNIXSocketFactory$FactoryArg&socketFactoryArg='"$SOCKETDIR"'/.s.PGSQL.5432'
libeufin-nexus serve --port 8082 \
2> "${MY_TMP_DIR}/libeufin-nexus-stderr.log" \
> "${MY_TMP_DIR}/libeufin-nexus-stdout.log" &
echo $! > "${MY_TMP_DIR}/libeufin-nexus.pid"
}
# Downloads new transactions from the bank.
function nexus_fetch_transactions () {
export LIBEUFIN_NEXUS_USERNAME="exchange"
export LIBEUFIN_NEXUS_PASSWORD="x"
export LIBEUFIN_NEXUS_URL="http://localhost:8082/"
libeufin-cli accounts \
fetch-transactions \
--range-type since-last \
--level report \
exchange-nexus > /dev/null
unset LIBEUFIN_NEXUS_USERNAME
unset LIBEUFIN_NEXUS_PASSWORD
unset LIBEUFIN_NEXUS_URL
}
# Instruct Nexus to all the prepared payments (= those
# POSTed to /transfer by the exchange).
function nexus_submit_to_sandbox () {
export LIBEUFIN_NEXUS_USERNAME="exchange"
export LIBEUFIN_NEXUS_PASSWORD="x"
export LIBEUFIN_NEXUS_URL="http://localhost:8082/"
libeufin-cli accounts \
submit-payments\
exchange-nexus
unset LIBEUFIN_NEXUS_USERNAME
unset LIBEUFIN_NEXUS_PASSWORD
unset LIBEUFIN_NEXUS_URL
}
# Operations to run before the actual audit
function pre_audit () {
@ -2238,7 +2170,6 @@ function check_with_database()
{
BASEDB="$1"
CONF="$1.conf"
ORIGIN=$(pwd)
echo "Running test suite with database $BASEDB using configuration $CONF"
MASTER_PRIV_FILE="${BASEDB}.mpriv"
taler-config \
@ -2344,7 +2275,7 @@ export PGHOST
MYDIR="${MY_TMP_DIR}/basedb"
mkdir -p "${MYDIR}"
echo "Generating fresh database at $MYDIR"
if faketime -f '-1 d' ./generate-auditor-basedb.sh "$MYDIR/$DB"
if faketime -f '-1 d' ./generate-auditor-basedb.sh -d "$MYDIR/$DB"
then
echo -n "Reset 'auditor-basedb' database at $PGHOST ..."
dropdb "auditor-basedb" >/dev/null 2>/dev/null || true

784
src/auditor/test-kyc.sh Executable file
View File

@ -0,0 +1,784 @@
#!/bin/bash
#
# This file is part of TALER
# Copyright (C) 2014-2023 Taler Systems SA
#
# TALER is free software; you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3, or (at your option) any later version.
#
# TALER is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# TALER; see the file COPYING. If not, If not, see <http://www.gnu.org/license>
#
#
# shellcheck disable=SC2317
# shellcheck disable=SC1091
#
#
# Setup database which was generated from a perfectly normal
# exchange-wallet interaction with KYC enabled and transactions
# blocked due to KYC and run the auditor against it.
#
# Check that the auditor report is as expected.
#
# Requires 'jq' tool and Postgres superuser rights!
#
set -eu
#set -x
# Set of numbers for all the testcases.
# When adding new tests, increase the last number:
ALL_TESTS=$(seq 0 1)
# $TESTS determines which tests we should run.
# This construction is used to make it easy to
# only run a subset of the tests. To only run a subset,
# pass the numbers of the tests to run as the FIRST
# argument to test-kyc.sh, i.e.:
#
# $ test-kyc.sh "1 3"
#
# to run tests 1 and 3 only. By default, all tests are run.
#
TESTS=${1:-$ALL_TESTS}
# Global variable to run the auditor processes under valgrind
# VALGRIND=valgrind
VALGRIND=""
# Number of seconds to let libeuifn background
# tasks apply a cycle of payment submission and
# history request.
LIBEUFIN_SETTLE_TIME=1
. setup.sh
# Cleanup exchange and libeufin between runs.
function cleanup()
{
if test ! -z "${EPID:-}"
then
echo -n "Stopping exchange $EPID..."
kill -TERM "$EPID"
wait "$EPID" || true
echo "DONE"
unset EPID
fi
stop_libeufin
}
# Cleanup to run whenever we exit
function exit_cleanup()
{
echo "Running exit-cleanup"
if test ! -z "${POSTGRES_PATH:-}"
then
echo "Stopping Postgres at ${POSTGRES_PATH}"
"${POSTGRES_PATH}/pg_ctl" \
-D "$TMPDIR" \
-l /dev/null \
stop \
&> /dev/null \
|| true
fi
cleanup
for n in $(jobs -p)
do
kill "$n" 2> /dev/null || true
done
wait || true
echo "DONE"
}
# Install cleanup handler (except for kill -9)
trap exit_cleanup EXIT
# Operations to run before the actual audit
function pre_audit () {
# Launch bank
echo -n "Launching bank"
launch_libeufin
for n in $(seq 1 80)
do
echo -n "."
sleep 0.1
OK=1
wget http://localhost:18082/ \
-o /dev/null \
-O /dev/null \
>/dev/null \
&& break
OK=0
done
if [ 1 != "$OK" ]
then
exit_skip "Failed to launch Sandbox"
fi
sleep "$LIBEUFIN_SETTLE_TIME"
for n in $(seq 1 80)
do
echo -n "."
sleep 0.1
OK=1
wget http://localhost:8082/ \
-o /dev/null \
-O /dev/null \
>/dev/null \
&& break
OK=0
done
if [ 1 != "$OK" ]
then
exit_skip "Failed to launch Nexus"
fi
echo " DONE"
if test "${1:-no}" = "aggregator"
then
echo -n "Running exchange aggregator ..."
taler-exchange-aggregator \
-y \
-L "INFO" \
-t \
-c "$CONF" \
2> "${MY_TMP_DIR}/aggregator.log" \
|| exit_fail "FAIL"
echo " DONE"
echo -n "Running exchange closer ..."
taler-exchange-closer \
-L "INFO" \
-t \
-c "$CONF" \
2> "${MY_TMP_DIR}/closer.log" \
|| exit_fail "FAIL"
echo " DONE"
echo -n "Running exchange transfer ..."
taler-exchange-transfer \
-L "INFO" \
-t \
-c "$CONF" \
2> "${MY_TMP_DIR}/transfer.log" \
|| exit_fail "FAIL"
echo " DONE"
echo -n "Running Nexus payment submitter ..."
nexus_submit_to_sandbox
echo " DONE"
# Make outgoing transactions appear in the TWG:
echo -n "Download bank transactions ..."
nexus_fetch_transactions
echo " DONE"
fi
}
# actual audit run
function audit_only () {
# Run the auditor!
echo -n "Running audit(s) ..."
# Restart so that first run is always fresh, and second one is incremental
taler-auditor-dbinit \
-r \
-c "$CONF"
$VALGRIND taler-helper-auditor-aggregation \
-L DEBUG \
-c "$CONF" \
-m "$MASTER_PUB" \
> test-audit-aggregation.json \
2> "${MY_TMP_DIR}/test-audit-aggregation.log" \
|| exit_fail "aggregation audit failed"
echo -n "."
$VALGRIND taler-helper-auditor-aggregation \
-L DEBUG \
-c "$CONF" \
-m "$MASTER_PUB" \
> test-audit-aggregation-inc.json \
2> "${MY_TMP_DIR}/test-audit-aggregation-inc.log" \
|| exit_fail "incremental aggregation audit failed"
echo -n "."
$VALGRIND taler-helper-auditor-coins \
-L DEBUG \
-c "$CONF" \
-m "$MASTER_PUB" \
> test-audit-coins.json \
2> "${MY_TMP_DIR}/test-audit-coins.log" \
|| exit_fail "coin audit failed"
echo -n "."
$VALGRIND taler-helper-auditor-coins \
-L DEBUG \
-c "$CONF" \
-m "$MASTER_PUB" \
> test-audit-coins-inc.json \
2> "${MY_TMP_DIR}/test-audit-coins-inc.log" \
|| exit_fail "incremental coin audit failed"
echo -n "."
$VALGRIND taler-helper-auditor-deposits \
-L DEBUG \
-c "$CONF" \
-m "$MASTER_PUB" \
> test-audit-deposits.json \
2> "${MY_TMP_DIR}/test-audit-deposits.log" \
|| exit_fail "deposits audit failed"
echo -n "."
$VALGRIND taler-helper-auditor-deposits \
-L DEBUG \
-c "$CONF" \
-m "$MASTER_PUB" \
> test-audit-deposits-inc.json \
2> "${MY_TMP_DIR}/test-audit-deposits-inc.log" \
|| exit_fail "incremental deposits audit failed"
echo -n "."
$VALGRIND taler-helper-auditor-reserves \
-i \
-L DEBUG \
-c "$CONF" \
-m "$MASTER_PUB" \
> test-audit-reserves.json \
2> "${MY_TMP_DIR}/test-audit-reserves.log" \
|| exit_fail "reserves audit failed"
echo -n "."
$VALGRIND taler-helper-auditor-reserves \
-i \
-L DEBUG \
-c "$CONF" \
-m "$MASTER_PUB" \
> test-audit-reserves-inc.json \
2> "${MY_TMP_DIR}/test-audit-reserves-inc.log" \
|| exit_fail "incremental reserves audit failed"
echo -n "."
rm -f "${MY_TMP_DIR}/test-wire-audit.log"
thaw() {
$VALGRIND taler-helper-auditor-wire \
-i \
-L DEBUG \
-c "$CONF" \
-m "$MASTER_PUB" \
> test-audit-wire.json \
2>> "${MY_TMP_DIR}/test-wire-audit.log"
}
thaw || ( echo -e " FIRST CALL TO taler-helper-auditor-wire FAILED,\nRETRY AFTER TWO SECONDS..." | tee -a "${MY_TMP_DIR}/test-wire-audit.log"
sleep 2
thaw || exit_fail "wire audit failed" )
echo -n "."
$VALGRIND taler-helper-auditor-wire \
-i \
-L DEBUG \
-c "$CONF" \
-m "$MASTER_PUB" \
> test-audit-wire-inc.json \
2> "${MY_TMP_DIR}/test-wire-audit-inc.log" \
|| exit_fail "wire audit inc failed"
echo -n "."
echo " DONE"
}
# Cleanup to run after the auditor
function post_audit () {
taler-exchange-dbinit \
-c "$CONF" \
-g \
|| exit_fail "exchange DB GC failed"
cleanup
echo -n "TeXing ."
taler-helper-auditor-render.py \
test-audit-aggregation.json \
test-audit-coins.json \
test-audit-deposits.json \
test-audit-reserves.json \
test-audit-wire.json \
< ../../contrib/auditor-report.tex.j2 \
> test-report.tex \
|| exit_fail "Renderer failed"
echo -n "."
timeout 10 pdflatex test-report.tex \
>/dev/null \
|| exit_fail "pdflatex failed"
echo -n "."
timeout 10 pdflatex test-report.tex \
>/dev/null
echo " DONE"
}
# Run audit process on current database, including report
# generation. Pass "aggregator" as $1 to run
# $ taler-exchange-aggregator
# before auditor (to trigger pending wire transfers).
# Pass "drain" as $2 to run a drain operation as well.
function run_audit () {
pre_audit "${1:-no}"
if test "${2:-no}" = "drain"
then
echo -n "Starting exchange..."
taler-exchange-httpd \
-c "${CONF}" \
-L INFO \
2> "${MY_TMP_DIR}/exchange-httpd-drain.err" &
EPID=$!
# Wait for all services to be available
for n in $(seq 1 50)
do
echo -n "."
sleep 0.1
OK=0
# exchange
wget "http://localhost:8081/seed" \
-o /dev/null \
-O /dev/null \
>/dev/null \
|| continue
OK=1
break
done
echo "... DONE."
export CONF
echo -n "Running taler-exchange-offline drain "
taler-exchange-offline \
-L DEBUG \
-c "${CONF}" \
drain TESTKUDOS:0.1 \
exchange-account-1 payto://iban/SANDBOXX/DE360679?receiver-name=Exchange+Drain \
upload \
2> "${MY_TMP_DIR}/taler-exchange-offline-drain.log" \
|| exit_fail "offline draining failed"
kill -TERM "$EPID"
wait "$EPID" || true
unset EPID
echo -n "Running taler-exchange-drain ..."
printf "\n" | taler-exchange-drain \
-L DEBUG \
-c "$CONF" \
2> "${MY_TMP_DIR}/taler-exchange-drain.log" \
|| exit_fail "FAIL"
echo " DONE"
echo -n "Running taler-exchange-transfer ..."
taler-exchange-transfer \
-L INFO \
-t \
-c "$CONF" \
2> "${MY_TMP_DIR}/drain-transfer.log" \
|| exit_fail "FAIL"
echo " DONE"
export LIBEUFIN_NEXUS_USERNAME="exchange"
export LIBEUFIN_NEXUS_PASSWORD="x"
export LIBEUFIN_NEXUS_URL="http://localhost:8082/"
PAIN_UUID=$(libeufin-cli accounts list-payments exchange-nexus | jq .initiatedPayments[] | jq 'select(.submitted==false)' | jq -r .paymentInitiationId)
if test -z "${PAIN_UUID}"
then
echo -n "Payment likely already submitted, running submit-payments without UUID anyway ..."
libeufin-cli accounts \
submit-payments \
exchange-nexus
else
echo -n "Running payment submission for transaction ${PAIN_UUID} ..."
libeufin-cli accounts \
submit-payments \
--payment-uuid "${PAIN_UUID}" \
exchange-nexus
fi
echo " DONE"
echo -n "Import outgoing transactions..."
libeufin-cli accounts \
fetch-transactions \
--range-type since-last \
--level report \
exchange-nexus
echo " DONE"
fi
audit_only
post_audit
}
# Do a full reload of the (original) database
function full_reload()
{
echo -n "Doing full reload of the database (loading ${BASEDB}.sql into $DB at $PGHOST)... "
dropdb "$DB" 2> /dev/null || true
createdb -T template0 "$DB" \
|| exit_skip "could not create database $DB (at $PGHOST)"
# Import pre-generated database, -q(ietly) using single (-1) transaction
psql -Aqt "$DB" \
-q \
-1 \
-f "${BASEDB}.sql" \
> /dev/null \
|| exit_skip "Failed to load database $DB from ${BASEDB}.sql"
echo "DONE"
# Technically, this call shouldn't be needed as libeufin should already be stopped here...
stop_libeufin
}
function test_0() {
echo "===========0: normal run with aggregator==========="
run_audit aggregator
echo "Checking output"
# if an emergency was detected, that is a bug and we should fail
echo -n "Test for emergencies... "
jq -e .emergencies[0] < test-audit-coins.json > /dev/null && exit_fail "Unexpected emergency detected in ordinary run" || echo PASS
echo -n "Test for deposit confirmation emergencies... "
jq -e .deposit_confirmation_inconsistencies[0] < test-audit-deposits.json > /dev/null && exit_fail "Unexpected deposit confirmation inconsistency detected" || echo PASS
echo -n "Test for emergencies by count... "
jq -e .emergencies_by_count[0] < test-audit-coins.json > /dev/null && exit_fail "Unexpected emergency by count detected in ordinary run" || echo PASS
echo -n "Test for wire inconsistencies... "
jq -e .wire_out_amount_inconsistencies[0] < test-audit-wire.json > /dev/null && exit_fail "Unexpected wire out inconsistency detected in ordinary run"
jq -e .reserve_in_amount_inconsistencies[0] < test-audit-wire.json > /dev/null && exit_fail "Unexpected reserve in inconsistency detected in ordinary run"
jq -e .misattribution_inconsistencies[0] < test-audit-wire.json > /dev/null && exit_fail "Unexpected misattribution inconsistency detected in ordinary run"
jq -e .row_inconsistencies[0] < test-audit-wire.json > /dev/null && exit_fail "Unexpected row inconsistency detected in ordinary run"
jq -e .denomination_key_validity_withdraw_inconsistencies[0] < test-audit-reserves.json > /dev/null && exit_fail "Unexpected denomination key withdraw inconsistency detected in ordinary run"
jq -e .row_minor_inconsistencies[0] < test-audit-wire.json > /dev/null && exit_fail "Unexpected minor row inconsistency detected in ordinary run"
jq -e .lag_details[0] < test-audit-wire.json > /dev/null && exit_fail "Unexpected lag detected in ordinary run"
jq -e .wire_format_inconsistencies[0] < test-audit-wire.json > /dev/null && exit_fail "Unexpected wire format inconsistencies detected in ordinary run"
# TODO: check operation balances are correct (once we have all transaction types and wallet is deterministic)
# TODO: check revenue summaries are correct (once we have all transaction types and wallet is deterministic)
echo PASS
LOSS=$(jq -r .total_bad_sig_loss < test-audit-aggregation.json)
if [ "$LOSS" != "TESTKUDOS:0" ]
then
exit_fail "Wrong total bad sig loss from aggregation, got unexpected loss of $LOSS"
fi
LOSS=$(jq -r .irregular_loss < test-audit-coins.json)
if [ "$LOSS" != "TESTKUDOS:0" ]
then
exit_fail "Wrong total bad sig loss from coins, got unexpected loss of $LOSS"
fi
LOSS=$(jq -r .total_bad_sig_loss < test-audit-reserves.json)
if [ "$LOSS" != "TESTKUDOS:0" ]
then
exit_fail "Wrong total bad sig loss from reserves, got unexpected loss of $LOSS"
fi
echo -n "Test for wire amounts... "
WIRED=$(jq -r .total_wire_in_delta_plus < test-audit-wire.json)
if [ "$WIRED" != "TESTKUDOS:0" ]
then
exit_fail "Expected total wire delta plus wrong, got $WIRED"
fi
WIRED=$(jq -r .total_wire_in_delta_minus < test-audit-wire.json)
if [ "$WIRED" != "TESTKUDOS:0" ]
then
exit_fail "Expected total wire delta minus wrong, got $WIRED"
fi
WIRED=$(jq -r .total_wire_out_delta_plus < test-audit-wire.json)
if [ "$WIRED" != "TESTKUDOS:0" ]
then
exit_fail "Expected total wire delta plus wrong, got $WIRED"
fi
WIRED=$(jq -r .total_wire_out_delta_minus < test-audit-wire.json)
if [ "$WIRED" != "TESTKUDOS:0" ]
then
exit_fail "Expected total wire delta minus wrong, got $WIRED"
fi
WIRED=$(jq -r .total_misattribution_in < test-audit-wire.json)
if [ "$WIRED" != "TESTKUDOS:0" ]
then
exit_fail "Expected total misattribution in wrong, got $WIRED"
fi
echo "PASS"
echo -n "Checking for unexpected arithmetic differences "
LOSS=$(jq -r .total_arithmetic_delta_plus < test-audit-aggregation.json)
if [ "$LOSS" != "TESTKUDOS:0" ]
then
exit_fail "Wrong arithmetic delta from aggregations, got unexpected plus of $LOSS"
fi
LOSS=$(jq -r .total_arithmetic_delta_minus < test-audit-aggregation.json)
if [ "$LOSS" != "TESTKUDOS:0" ]
then
exit_fail "Wrong arithmetic delta from aggregation, got unexpected minus of $LOSS"
fi
LOSS=$(jq -r .total_arithmetic_delta_plus < test-audit-coins.json)
if [ "$LOSS" != "TESTKUDOS:0" ]
then
exit_fail "Wrong arithmetic delta from coins, got unexpected plus of $LOSS"
fi
LOSS=$(jq -r .total_arithmetic_delta_minus < test-audit-coins.json)
if [ "$LOSS" != "TESTKUDOS:0" ]
then
exit_fail "Wrong arithmetic delta from coins, got unexpected minus of $LOSS"
fi
LOSS=$(jq -r .total_arithmetic_delta_plus < test-audit-reserves.json)
if [ "$LOSS" != "TESTKUDOS:0" ]
then
exit_fail "Wrong arithmetic delta from reserves, got unexpected plus of $LOSS"
fi
LOSS=$(jq -r .total_arithmetic_delta_minus < test-audit-reserves.json)
if [ "$LOSS" != "TESTKUDOS:0" ]
then
exit_fail "Wrong arithmetic delta from reserves, got unexpected minus of $LOSS"
fi
jq -e .amount_arithmetic_inconsistencies[0] < test-audit-aggregation.json > /dev/null && exit_fail "Unexpected arithmetic inconsistencies from aggregations detected in ordinary run"
jq -e .amount_arithmetic_inconsistencies[0] < test-audit-coins.json > /dev/null && exit_fail "Unexpected arithmetic inconsistencies from coins detected in ordinary run"
jq -e .amount_arithmetic_inconsistencies[0] < test-audit-reserves.json > /dev/null && exit_fail "Unexpected arithmetic inconsistencies from reserves detected in ordinary run"
echo "PASS"
echo -n "Checking for unexpected wire out differences "
jq -e .wire_out_inconsistencies[0] < test-audit-aggregation.json > /dev/null && exit_fail "Unexpected wire out inconsistencies detected in ordinary run"
echo "PASS"
# cannot easily undo aggregator, hence full reload
full_reload
}
# Run without aggregator, hence auditor should detect wire
# transfer lag!
function test_1() {
echo "===========1: normal run==========="
run_audit
echo "Checking output"
# if an emergency was detected, that is a bug and we should fail
echo -n "Test for emergencies... "
jq -e .emergencies[0] \
< test-audit-coins.json \
> /dev/null \
&& exit_fail "Unexpected emergency detected in ordinary run";
echo "PASS"
echo -n "Test for emergencies by count... "
jq -e .emergencies_by_count[0] \
< test-audit-coins.json \
> /dev/null \
&& exit_fail "Unexpected emergency by count detected in ordinary run"
echo "PASS"
echo -n "Test for wire inconsistencies... "
jq -e .wire_out_amount_inconsistencies[0] \
< test-audit-wire.json \
> /dev/null \
&& exit_fail "Unexpected wire out inconsistency detected in ordinary run"
jq -e .reserve_in_amount_inconsistencies[0] \
< test-audit-wire.json \
> /dev/null \
&& exit_fail "Unexpected reserve in inconsistency detected in ordinary run"
jq -e .misattribution_inconsistencies[0] \
< test-audit-wire.json \
> /dev/null \
&& exit_fail "Unexpected misattribution inconsistency detected in ordinary run"
jq -e .row_inconsistencies[0] \
< test-audit-wire.json \
> /dev/null \
&& exit_fail "Unexpected row inconsistency detected in ordinary run"
jq -e .row_minor_inconsistencies[0] \
< test-audit-wire.json \
> /dev/null \
&& exit_fail "Unexpected minor row inconsistency detected in ordinary run"
jq -e .wire_format_inconsistencies[0] \
< test-audit-wire.json \
> /dev/null \
&& exit_fail "Unexpected wire format inconsistencies detected in ordinary run"
# TODO: check operation balances are correct (once we have all transaction types and wallet is deterministic)
# TODO: check revenue summaries are correct (once we have all transaction types and wallet is deterministic)
echo "PASS"
echo -n "Check for lag detection... "
# Check wire transfer lag reported (no aggregator!)
# NOTE: This test is EXPECTED to fail for ~1h after
# re-generating the test database as we do not
# report lag of less than 1h (see GRACE_PERIOD in
# taler-helper-auditor-wire.c)
jq -e .lag_details[0] \
< test-audit-wire.json \
> /dev/null \
|| exit_fail "Lag not detected in run without aggregator"
LAG=$(jq -r .total_amount_lag < test-audit-wire.json)
if [ "$LAG" = "TESTKUDOS:0" ]
then
exit_fail "Expected total lag to be non-zero"
fi
echo "PASS"
echo -n "Test for wire amounts... "
WIRED=$(jq -r .total_wire_in_delta_plus < test-audit-wire.json)
if [ "$WIRED" != "TESTKUDOS:0" ]
then
exit_fail "Expected total wire delta plus wrong, got $WIRED"
fi
WIRED=$(jq -r .total_wire_in_delta_minus < test-audit-wire.json)
if [ "$WIRED" != "TESTKUDOS:0" ]
then
exit_fail "Expected total wire delta minus wrong, got $WIRED"
fi
WIRED=$(jq -r .total_wire_out_delta_plus < test-audit-wire.json)
if [ "$WIRED" != "TESTKUDOS:0" ]
then
exit_fail "Expected total wire delta plus wrong, got $WIRED"
fi
WIRED=$(jq -r .total_wire_out_delta_minus < test-audit-wire.json)
if [ "$WIRED" != "TESTKUDOS:0" ]
then
exit_fail "Expected total wire delta minus wrong, got $WIRED"
fi
WIRED=$(jq -r .total_misattribution_in < test-audit-wire.json)
if [ "$WIRED" != "TESTKUDOS:0" ]
then
exit_fail "Expected total misattribution in wrong, got $WIRED"
fi
# Database was unmodified, no need to undo
echo "OK"
}
# *************** Main test loop starts here **************
# Run all the tests against the database given in $1.
# Sets $fail to 0 on success, non-zero on failure.
function check_with_database()
{
BASEDB="$1"
CONF="$1.conf"
echo "Running test suite with database $BASEDB using configuration $CONF"
MASTER_PRIV_FILE="${BASEDB}.mpriv"
taler-config \
-f \
-c "${CONF}" \
-s exchange-offline \
-o MASTER_PRIV_FILE \
-V "${MASTER_PRIV_FILE}"
MASTER_PUB=$(gnunet-ecc -p "$MASTER_PRIV_FILE")
echo "MASTER PUB is ${MASTER_PUB} using file ${MASTER_PRIV_FILE}"
# Load database
full_reload
# Run test suite
fail=0
for i in $TESTS
do
"test_$i"
if test 0 != $fail
then
break
fi
done
echo "Cleanup (disabled, leaving database $DB behind)"
# dropdb $DB
}
# *************** Main logic starts here **************
# ####### Setup globals ######
# Postgres database to use (must match configuration file)
export DB="auditor-basedb"
# test required commands exist
echo "Testing for jq"
jq -h > /dev/null || exit_skip "jq required"
echo "Testing for faketime"
faketime -h > /dev/null || exit_skip "faketime required"
# NOTE: really check for all three libeufin commands?
echo "Testing for libeufin"
libeufin-cli --help >/dev/null 2> /dev/null </dev/null || exit_skip "libeufin required"
echo "Testing for pdflatex"
which pdflatex > /dev/null </dev/null || exit_skip "pdflatex required"
echo "Testing for taler-wallet-cli"
taler-wallet-cli -h >/dev/null </dev/null 2>/dev/null || exit_skip "taler-wallet-cli required"
echo -n "Testing for Postgres"
# Available directly in path?
INITDB_BIN=$(command -v initdb) || true
if [[ -n "$INITDB_BIN" ]]; then
echo " FOUND (in path) at $INITDB_BIN"
else
HAVE_INITDB=$(find /usr -name "initdb" | head -1 2> /dev/null | grep postgres) \
|| exit_skip " MISSING"
echo " FOUND at $(dirname "$HAVE_INITDB")"
INITDB_BIN=$(echo "$HAVE_INITDB" | grep bin/initdb | grep postgres | sort -n | tail -n1)
fi
POSTGRES_PATH=$(dirname "$INITDB_BIN")
MY_TMP_DIR=$(mktemp -d /tmp/taler-auditor-basedbXXXXXX)
echo "Using $MY_TMP_DIR for logging and temporary data"
TMPDIR="$MY_TMP_DIR/postgres"
mkdir -p "$TMPDIR"
echo -n "Setting up Postgres DB at $TMPDIR ..."
$INITDB_BIN \
--no-sync \
--auth=trust \
-D "${TMPDIR}" \
> "${MY_TMP_DIR}/postgres-dbinit.log" \
2> "${MY_TMP_DIR}/postgres-dbinit.err"
echo "DONE"
SOCKETDIR="${TMPDIR}/sockets"
mkdir "${SOCKETDIR}"
echo -n "Launching Postgres service"
cat - >> "$TMPDIR/postgresql.conf" <<EOF
unix_socket_directories='${TMPDIR}/sockets'
fsync=off
max_wal_senders=0
synchronous_commit=off
wal_level=minimal
listen_addresses=''
EOF
grep -v host \
< "$TMPDIR/pg_hba.conf" \
> "$TMPDIR/pg_hba.conf.new"
mv "$TMPDIR/pg_hba.conf.new" "$TMPDIR/pg_hba.conf"
"${POSTGRES_PATH}/pg_ctl" \
-D "$TMPDIR" \
-l /dev/null \
start \
> "${MY_TMP_DIR}/postgres-start.log" \
2> "${MY_TMP_DIR}/postgres-start.err"
echo " DONE"
PGHOST="$TMPDIR/sockets"
export PGHOST
MYDIR="${MY_TMP_DIR}/basedb"
mkdir -p "${MYDIR}"
echo "Generating fresh database at $MYDIR"
if faketime -f '-1 d' ./generate-auditor-basedb.sh \
-c generate-kyc-basedb.conf \
-d "$MYDIR/$DB"
then
echo -n "Reset 'auditor-basedb' database at $PGHOST ..."
dropdb "auditor-basedb" >/dev/null 2>/dev/null || true
createdb "auditor-basedb" || exit_skip "Could not create database '$BASEDB' at $PGHOST"
echo " DONE"
check_with_database "$MYDIR/$DB"
if [ "$fail" != "0" ]
then
exit "$fail"
fi
else
echo "Generation failed"
exit 1
fi
exit 0

View File

@ -19,13 +19,15 @@
#
# Check that the auditor report is as expected.
#
# shellcheck disable=SC2317
#
# Requires 'jq' tool and Postgres superuser rights!
set -eu
# set -x
# Set of numbers for all the testcases.
# When adding new tests, increase the last number:
ALL_TESTS=`seq 0 4`
ALL_TESTS=$(seq 0 4)
# $TESTS determines which tests we should run.
# This construction is used to make it easy to
@ -42,50 +44,18 @@ TESTS=${1:-$ALL_TESTS}
# Global variable to run the auditor processes under valgrind
# VALGRIND=valgrind
VALGRIND=""
LOGLEVEL="INFO"
# Exit, with status code "skip" (no 'real' failure)
function exit_skip() {
echo "SKIPPING test: $1"
exit 77
}
# Exit, with error message (hard failure)
function exit_fail() {
echo "FAILING test: $1"
exit 1
}
function stop_libeufin()
{
echo "killing libeufin..."
if test -f ${MYDIR:-/}/libeufin-sandbox.pid
then
echo "Killing libeufin sandbox"
PID=`cat ${MYDIR}/libeufin-sandbox.pid 2> /dev/null`
rm ${MYDIR}/libeufin-sandbox.pid
kill $PID 2> /dev/null || true
wait $PID || true
fi
if test -f ${MYDIR:-/}/libeufin-nexus.pid
then
echo "Killing libeufin nexus"
PID=`cat ${MYDIR}/libeufin-nexus.pid 2> /dev/null`
rm ${MYDIR}/libeufin-nexus.pid
kill $PID 2> /dev/null || true
wait $PID || true
fi
echo "killing libeufin DONE"
}
. setup.sh
# Cleanup to run whenever we exit
function cleanup()
{
if test ! -z "${EPID:-}"
if [ ! -z "${EPID:-}" ]
then
echo -n "Stopping exchange $EPID..."
kill -TERM $EPID
wait $EPID
kill -TERM "$EPID"
wait "$EPID"
echo " DONE"
unset EPID
fi
@ -96,15 +66,20 @@ function cleanup()
function exit_cleanup()
{
echo "Running exit-cleanup"
if test ! -z "${POSTGRES_PATH:-}"
if [ ! -z "${POSTGRES_PATH:-}" ]
then
echo "Stopping Postgres at ${POSTGRES_PATH}"
${POSTGRES_PATH}/pg_ctl -D $TMPDIR -l /dev/null stop &> /dev/null || true
"${POSTGRES_PATH}/pg_ctl" \
-D "$TMPDIR" \
-l /dev/null \
stop \
&> /dev/null \
|| true
fi
cleanup
for n in `jobs -p`
for n in $(jobs -p)
do
kill $n 2> /dev/null || true
kill "$n" 2> /dev/null || true
done
wait
echo "DONE"
@ -113,94 +88,80 @@ function exit_cleanup()
# Install cleanup handler (except for kill -9)
trap exit_cleanup EXIT
# Downloads new transactions from the bank.
function nexus_fetch_transactions () {
export LIBEUFIN_NEXUS_USERNAME=exchange
export LIBEUFIN_NEXUS_PASSWORD=x
export LIBEUFIN_NEXUS_URL=http://localhost:8082/
libeufin-cli accounts fetch-transactions \
--range-type since-last --level report exchange-nexus > /dev/null
unset LIBEUFIN_NEXUS_USERNAME
unset LIBEUFIN_NEXUS_PASSWORD
unset LIBEUFIN_NEXUS_URL
}
# Instruct Nexus to all the prepared payments (= those
# POSTed to /transfer by the exchange).
function nexus_submit_to_sandbox () {
export LIBEUFIN_NEXUS_USERNAME=exchange
export LIBEUFIN_NEXUS_PASSWORD=x
export LIBEUFIN_NEXUS_URL=http://localhost:8082/
libeufin-cli accounts submit-payments exchange-nexus
unset LIBEUFIN_NEXUS_USERNAME
unset LIBEUFIN_NEXUS_PASSWORD
unset LIBEUFIN_NEXUS_URL
}
function get_payto_uri() {
export LIBEUFIN_SANDBOX_USERNAME=$1
export LIBEUFIN_SANDBOX_PASSWORD=$2
export LIBEUFIN_SANDBOX_URL=http://localhost:18082
libeufin-cli sandbox demobank info --bank-account $1 | jq --raw-output '.paytoUri'
libeufin-cli sandbox demobank info \
--bank-account "$1" \
| jq --raw-output '.paytoUri'
}
function launch_libeufin () {
export LIBEUFIN_NEXUS_DB_CONNECTION="jdbc:sqlite:${DB}-nexus.sqlite3"
cd $MYDIR
libeufin-nexus serve --port 8082 \
2> ${MYDIR}/libeufin-nexus-stderr.log \
> ${MYDIR}/libeufin-nexus-stdout.log &
echo $! > ${MYDIR}/libeufin-nexus.pid
export LIBEUFIN_SANDBOX_DB_CONNECTION="jdbc:sqlite:${DB}-sandbox.sqlite3"
libeufin-sandbox serve --no-auth --port 18082 \
> ${MYDIR}/libeufin-sandbox-stdout.log \
2> ${MYDIR}/libeufin-sandbox-stderr.log &
echo $! > ${MYDIR}/libeufin-sandbox.pid
cd $ORIGIN
}
# Operations to run before the actual audit
function pre_audit () {
# Launch bank
echo -n "Launching bank "
EXCHANGE_URL=`taler-config -c $CONF -s EXCHANGE -o BASE_URL`
launch_libeufin
for n in `seq 1 80`
for n in $(seq 1 80)
do
echo -n "."
sleep 0.1
OK=1
wget http://localhost:18082/ -o /dev/null -O /dev/null >/dev/null && break
wget http://localhost:18082/ \
-o /dev/null \
-O /dev/null \
>/dev/null && break
OK=0
done
if [ 1 != $OK ]
if [ 1 != "$OK" ]
then
exit_skip "Failed to launch Sandbox"
fi
for n in `seq 1 80`
for n in $(seq 1 80)
do
echo -n "."
sleep 0.1
OK=1
wget http://localhost:8082/ -o /dev/null -O /dev/null >/dev/null && break
wget http://localhost:8082/ \
-o /dev/null \
-O /dev/null \
>/dev/null && break
OK=0
done
if [ 1 != $OK ]
if [ 1 != "$OK" ]
then
exit_skip "Failed to launch Nexus"
fi
echo " DONE"
if test ${1:-no} = "aggregator"
if [ "${1:-no}" = "aggregator" ]
then
export CONF
echo -n "Running exchange aggregator ... (config: $CONF)"
taler-exchange-aggregator -L INFO -t -c $CONF -y 2> ${MYDIR}/aggregator.log || exit_fail "FAIL"
taler-exchange-aggregator \
-L "$LOGLEVEL" \
-t \
-c "$CONF" \
-y \
2> "${MY_TMP_DIR}/aggregator.log" \
|| exit_fail "FAIL"
echo " DONE"
echo -n "Running exchange closer ..."
taler-exchange-closer -L INFO -t -c $CONF 2> ${MYDIR}/closer.log || exit_fail "FAIL"
taler-exchange-closer \
-L "$LOGLEVEL" \
-t \
-c "$CONF" \
2> "${MY_TMP_DIR}/closer.log" \
|| exit_fail "FAIL"
echo " DONE"
echo -n "Running exchange transfer ..."
taler-exchange-transfer -L INFO -t -c $CONF 2> ${MYDIR}/transfer.log || exit_fail "FAIL"
taler-exchange-transfer \
-L "$LOGLEVEL" \
-t \
-c "$CONF" \
2> "${MY_TMP_DIR}/transfer.log" \
|| exit_fail "FAIL"
echo " DONE"
echo -n "Running Nexus payment submitter ..."
nexus_submit_to_sandbox
@ -218,28 +179,93 @@ function audit_only () {
echo -n "Running audit(s) ... (conf is $CONF)"
# Restart so that first run is always fresh, and second one is incremental
taler-auditor-dbinit -r -c $CONF
$VALGRIND taler-helper-auditor-aggregation -L DEBUG -c $CONF -m $MASTER_PUB > test-audit-aggregation.json 2> test-audit-aggregation.log || exit_fail "aggregation audit failed"
taler-auditor-dbinit \
-r \
-c "$CONF"
$VALGRIND taler-helper-auditor-aggregation \
-L "$LOGLEVEL" \
-c "$CONF" \
-m "$MASTER_PUB" \
> test-audit-aggregation.json \
2> test-audit-aggregation.log \
|| exit_fail "aggregation audit failed"
echo -n "."
$VALGRIND taler-helper-auditor-aggregation -L DEBUG -c $CONF -m $MASTER_PUB > test-audit-aggregation-inc.json 2> test-audit-aggregation-inc.log || exit_fail "incremental aggregation audit failed"
$VALGRIND taler-helper-auditor-aggregation \
-L "$LOGLEVEL" \
-c "$CONF" \
-m "$MASTER_PUB" \
> test-audit-aggregation-inc.json \
2> test-audit-aggregation-inc.log \
|| exit_fail "incremental aggregation audit failed"
echo -n "."
$VALGRIND taler-helper-auditor-coins -L DEBUG -c $CONF -m $MASTER_PUB > test-audit-coins.json 2> test-audit-coins.log || exit_fail "coin audit failed"
$VALGRIND taler-helper-auditor-coins \
-L "$LOGLEVEL" \
-c "$CONF" \
-m "$MASTER_PUB" \
> test-audit-coins.json \
2> test-audit-coins.log \
|| exit_fail "coin audit failed"
echo -n "."
$VALGRIND taler-helper-auditor-coins -L DEBUG -c $CONF -m $MASTER_PUB > test-audit-coins-inc.json 2> test-audit-coins-inc.log || exit_fail "incremental coin audit failed"
$VALGRIND taler-helper-auditor-coins \
-L "$LOGLEVEL" \
-c "$CONF" \
-m "$MASTER_PUB" \
> test-audit-coins-inc.json \
2> test-audit-coins-inc.log \
|| exit_fail "incremental coin audit failed"
echo -n "."
$VALGRIND taler-helper-auditor-deposits -L DEBUG -c $CONF -m $MASTER_PUB > test-audit-deposits.json 2> test-audit-deposits.log || exit_fail "deposits audit failed"
$VALGRIND taler-helper-auditor-deposits \
-L "$LOGLEVEL" \
-c "$CONF" \
-m "$MASTER_PUB" \
> test-audit-deposits.json \
2> test-audit-deposits.log \
|| exit_fail "deposits audit failed"
echo -n "."
$VALGRIND taler-helper-auditor-deposits -L DEBUG -c $CONF -m $MASTER_PUB > test-audit-deposits-inc.json 2> test-audit-deposits-inc.log || exit_fail "incremental deposits audit failed"
$VALGRIND taler-helper-auditor-deposits \
-L "$LOGLEVEL" \
-c "$CONF" \
-m "$MASTER_PUB" \
> test-audit-deposits-inc.json \
2> test-audit-deposits-inc.log \
|| exit_fail "incremental deposits audit failed"
echo -n "."
$VALGRIND taler-helper-auditor-reserves -i -L DEBUG -c $CONF -m $MASTER_PUB > test-audit-reserves.json 2> test-audit-reserves.log || exit_fail "reserves audit failed"
$VALGRIND taler-helper-auditor-reserves \
-i \
-L "$LOGLEVEL" \
-c "$CONF" \
-m "$MASTER_PUB" \
> test-audit-reserves.json \
2> test-audit-reserves.log \
|| exit_fail "reserves audit failed"
echo -n "."
$VALGRIND taler-helper-auditor-reserves -i -L DEBUG -c $CONF -m $MASTER_PUB > test-audit-reserves-inc.json 2> test-audit-reserves-inc.log || exit_fail "incremental reserves audit failed"
$VALGRIND taler-helper-auditor-reserves \
-i \
-L "$LOGLEVEL" \
-c "$CONF" \
-m "$MASTER_PUB" \
> test-audit-reserves-inc.json \
2> test-audit-reserves-inc.log \
|| exit_fail "incremental reserves audit failed"
echo -n "."
$VALGRIND taler-helper-auditor-wire -i -L DEBUG -c $CONF -m $MASTER_PUB > test-audit-wire.json 2> test-wire-audit.log || exit_fail "wire audit failed"
$VALGRIND taler-helper-auditor-wire \
-i \
-L "$LOGLEVEL" \
-c "$CONF" \
-m "$MASTER_PUB" \
> test-audit-wire.json \
2> test-wire-audit.log \
|| exit_fail "wire audit failed"
echo -n "."
$VALGRIND taler-helper-auditor-wire -i -L DEBUG -c $CONF -m $MASTER_PUB > test-audit-wire-inc.json 2> test-wire-audit-inc.log || exit_fail "wire audit failed"
$VALGRIND taler-helper-auditor-wire \
-i \
-L "$LOGLEVEL" \
-c "$CONF" \
-m "$MASTER_PUB" \
> test-audit-wire-inc.json \
2> test-wire-audit-inc.log \
|| exit_fail "wire audit failed"
echo -n "."
echo " DONE"
}
@ -248,12 +274,22 @@ function audit_only () {
function post_audit () {
cleanup
echo -n "TeXing ."
taler-helper-auditor-render.py test-audit-aggregation.json test-audit-coins.json test-audit-deposits.json test-audit-reserves.json test-audit-wire.json < ../../contrib/auditor-report.tex.j2 > test-report.tex || exit_fail "Renderer failed"
taler-helper-auditor-render.py \
test-audit-aggregation.json \
test-audit-coins.json \
test-audit-deposits.json \
test-audit-reserves.json \
test-audit-wire.json \
< ../../contrib/auditor-report.tex.j2 \
> test-report.tex \
|| exit_fail "Renderer failed"
echo -n "."
timeout 10 pdflatex test-report.tex >/dev/null || exit_fail "pdflatex failed"
timeout 10 pdflatex test-report.tex \
>/dev/null \
|| exit_fail "pdflatex failed"
echo -n "."
timeout 10 pdflatex test-report.tex >/dev/null
timeout 10 pdflatex test-report.tex \
>/dev/null
echo " DONE"
}
@ -263,10 +299,9 @@ function post_audit () {
# $ taler-exchange-aggregator
# before auditor (to trigger pending wire transfers).
function run_audit () {
pre_audit ${1:-no}
pre_audit "${1:-no}"
audit_only
post_audit
}
@ -274,35 +309,21 @@ function run_audit () {
function full_reload()
{
echo -n "Doing full reload of the database... "
dropdb $DB 2> /dev/null || true
createdb -T template0 $DB || exit_skip "could not create database $DB (at $PGHOST)"
dropdb "$DB" 2> /dev/null || true
createdb -T template0 "$DB" \
|| exit_skip "could not create database $DB (at $PGHOST)"
# Import pre-generated database, -q(ietly) using single (-1) transaction
psql -Aqt $DB -q -1 -f ${BASEDB}.sql > /dev/null || exit_skip "Failed to load database $DB from ${BASEDB}.sql"
psql -Aqt "$DB" \
-q \
-1 \
-f "${BASEDB}.sql" \
> /dev/null \
|| exit_skip "Failed to load database $DB from ${BASEDB}.sql"
echo "DONE"
cd $MYDIR
rm -f ${DB}-nexus.sqlite3 ${DB}-sandbox.sqlite3 || true # libeufin
echo "Loading libeufin Nexus basedb: ${BASEDB}-libeufin-nexus.sql"
sqlite3 ${DB}-nexus.sqlite3 < ${BASEDB}-libeufin-nexus.sql || exit_skip "Failed to load Nexus database"
echo "DONE"
echo "Loading libeufin Sandbox basedb: ${BASEDB}-libeufin-nexus.sql"
sqlite3 ${DB}-sandbox.sqlite3 < ${BASEDB}-libeufin-sandbox.sql || exit_skip "Failed to load Sandbox database"
echo "DONE"
# Exchange payto URI contains the (dynamically generated)
# IBAN, that can only be written in CONF after libeufin is
# setup.
taler-config -c $CONF -s exchange-account-1 -o PAYTO_URI &> /dev/null || (
echo -n "Specifying exchange payto URI in the configuration ($CONF) (grab IBAN from ${DB}-sandbox.sqlite3)...";
EXCHANGE_IBAN=`echo "SELECT iban FROM BankAccounts WHERE label='exchange'" | sqlite3 ${DB}-sandbox.sqlite3`;
taler-config -c $CONF -s exchange-account-1 -o PAYTO_URI \
-V "payto://iban/SANDBOXX/$EXCHANGE_IBAN?receiver-name=Exchange+Company"
echo " DONE"
)
cd $ORIGIN
}
function test_0() {
echo "===========0: normal run with aggregator==========="
run_audit aggregator
@ -331,94 +352,105 @@ function test_0() {
echo PASS
LOSS=`jq -r .total_bad_sig_loss < test-audit-aggregation.json`
if test $LOSS != "TESTKUDOS:0"
LOSS=$(jq -r .total_bad_sig_loss < test-audit-aggregation.json)
if [ "$LOSS" != "TESTKUDOS:0" ]
then
exit_fail "Wrong total bad sig loss from aggregation, got unexpected loss of $LOSS"
fi
LOSS=`jq -r .irregular_loss < test-audit-coins.json`
if test $LOSS != "TESTKUDOS:0"
LOSS=$(jq -r .irregular_loss < test-audit-coins.json)
if [ "$LOSS" != "TESTKUDOS:0" ]
then
exit_fail "Wrong total bad sig loss from coins, got unexpected loss of $LOSS"
fi
LOSS=`jq -r .total_bad_sig_loss < test-audit-reserves.json`
if test $LOSS != "TESTKUDOS:0"
LOSS=$(jq -r .total_bad_sig_loss < test-audit-reserves.json)
if [ "$LOSS" != "TESTKUDOS:0" ]
then
exit_fail "Wrong total bad sig loss from reserves, got unexpected loss of $LOSS"
fi
echo -n "Test for wire amounts... "
WIRED=`jq -r .total_wire_in_delta_plus < test-audit-wire.json`
if test $WIRED != "TESTKUDOS:0"
WIRED=$(jq -r .total_wire_in_delta_plus < test-audit-wire.json)
if [ "$WIRED" != "TESTKUDOS:0" ]
then
exit_fail "Expected total wire delta plus wrong, got $WIRED"
fi
WIRED=`jq -r .total_wire_in_delta_minus < test-audit-wire.json`
if test $WIRED != "TESTKUDOS:0"
WIRED=$(jq -r .total_wire_in_delta_minus < test-audit-wire.json)
if [ "$WIRED" != "TESTKUDOS:0" ]
then
exit_fail "Expected total wire delta minus wrong, got $WIRED"
fi
WIRED=`jq -r .total_wire_out_delta_plus < test-audit-wire.json`
if test $WIRED != "TESTKUDOS:0"
WIRED=$(jq -r .total_wire_out_delta_plus < test-audit-wire.json)
if [ "$WIRED" != "TESTKUDOS:0" ]
then
exit_fail "Expected total wire delta plus wrong, got $WIRED"
fi
WIRED=`jq -r .total_wire_out_delta_minus < test-audit-wire.json`
if test $WIRED != "TESTKUDOS:0"
WIRED=$(jq -r .total_wire_out_delta_minus < test-audit-wire.json)
if [ "$WIRED" != "TESTKUDOS:0" ]
then
exit_fail "Expected total wire delta minus wrong, got $WIRED"
fi
WIRED=`jq -r .total_misattribution_in < test-audit-wire.json`
if test $WIRED != "TESTKUDOS:0"
WIRED=$(jq -r .total_misattribution_in < test-audit-wire.json)
if [ "$WIRED" != "TESTKUDOS:0" ]
then
exit_fail "Expected total misattribution in wrong, got $WIRED"
fi
echo PASS
echo "PASS"
echo -n "Checking for unexpected arithmetic differences "
LOSS=`jq -r .total_arithmetic_delta_plus < test-audit-aggregation.json`
if test $LOSS != "TESTKUDOS:0"
LOSS=$(jq -r .total_arithmetic_delta_plus < test-audit-aggregation.json)
if [ "$LOSS" != "TESTKUDOS:0" ]
then
exit_fail "Wrong arithmetic delta from aggregations, got unexpected plus of $LOSS"
fi
LOSS=`jq -r .total_arithmetic_delta_minus < test-audit-aggregation.json`
if test $LOSS != "TESTKUDOS:0"
LOSS=$(jq -r .total_arithmetic_delta_minus < test-audit-aggregation.json)
if [ "$LOSS" != "TESTKUDOS:0" ]
then
exit_fail "Wrong arithmetic delta from aggregation, got unexpected minus of $LOSS"
fi
LOSS=`jq -r .total_arithmetic_delta_plus < test-audit-coins.json`
if test $LOSS != "TESTKUDOS:0"
LOSS=$(jq -r .total_arithmetic_delta_plus < test-audit-coins.json)
if [ "$LOSS" != "TESTKUDOS:0" ]
then
exit_fail "Wrong arithmetic delta from coins, got unexpected plus of $LOSS"
fi
LOSS=`jq -r .total_arithmetic_delta_minus < test-audit-coins.json`
if test $LOSS != "TESTKUDOS:0"
LOSS=$(jq -r .total_arithmetic_delta_minus < test-audit-coins.json)
if [ "$LOSS" != "TESTKUDOS:0" ]
then
exit_fail "Wrong arithmetic delta from coins, got unexpected minus of $LOSS"
fi
LOSS=`jq -r .total_arithmetic_delta_plus < test-audit-reserves.json`
if test $LOSS != "TESTKUDOS:0"
LOSS=$(jq -r .total_arithmetic_delta_plus < test-audit-reserves.json)
if [ "$LOSS" != "TESTKUDOS:0" ]
then
exit_fail "Wrong arithmetic delta from reserves, got unexpected plus of $LOSS"
fi
LOSS=`jq -r .total_arithmetic_delta_minus < test-audit-reserves.json`
if test $LOSS != "TESTKUDOS:0"
LOSS=$(jq -r .total_arithmetic_delta_minus < test-audit-reserves.json)
if [ "$LOSS" != "TESTKUDOS:0" ]
then
exit_fail "Wrong arithmetic delta from reserves, got unexpected minus of $LOSS"
fi
jq -e .amount_arithmetic_inconsistencies[0] < test-audit-aggregation.json > /dev/null && exit_fail "Unexpected arithmetic inconsistencies from aggregations detected in ordinary run"
jq -e .amount_arithmetic_inconsistencies[0] < test-audit-coins.json > /dev/null && exit_fail "Unexpected arithmetic inconsistencies from coins detected in ordinary run"
jq -e .amount_arithmetic_inconsistencies[0] < test-audit-reserves.json > /dev/null && exit_fail "Unexpected arithmetic inconsistencies from reserves detected in ordinary run"
echo PASS
jq -e .amount_arithmetic_inconsistencies[0] \
< test-audit-aggregation.json \
> /dev/null \
&& exit_fail "Unexpected arithmetic inconsistencies from aggregations detected in ordinary run"
jq -e .amount_arithmetic_inconsistencies[0] \
< test-audit-coins.json \
> /dev/null \
&& exit_fail "Unexpected arithmetic inconsistencies from coins detected in ordinary run"
jq -e .amount_arithmetic_inconsistencies[0] \
< test-audit-reserves.json \
> /dev/null \
&& exit_fail "Unexpected arithmetic inconsistencies from reserves detected in ordinary run"
echo "PASS"
echo -n "Checking for unexpected wire out differences "
jq -e .wire_out_inconsistencies[0] < test-audit-aggregation.json > /dev/null && exit_fail "Unexpected wire out inconsistencies detected in ordinary run"
echo PASS
jq -e .wire_out_inconsistencies[0] \
< test-audit-aggregation.json \
> /dev/null \
&& exit_fail "Unexpected wire out inconsistencies detected in ordinary run"
echo "PASS"
# cannot easily undo aggregator, hence full reload
full_reload
}
@ -432,46 +464,72 @@ function test_1() {
echo "Checking output"
# if an emergency was detected, that is a bug and we should fail
echo -n "Test for emergencies... "
jq -e .emergencies[0] < test-audit-coins.json > /dev/null && exit_fail "Unexpected emergency detected in ordinary run" || echo PASS
jq -e .emergencies[0] \
< test-audit-coins.json \
> /dev/null \
&& exit_fail "Unexpected emergency detected in ordinary run" \
|| echo "PASS"
echo -n "Test for emergencies by count... "
jq -e .emergencies_by_count[0] < test-audit-coins.json > /dev/null && exit_fail "Unexpected emergency by count detected in ordinary run" || echo PASS
jq -e .emergencies_by_count[0] \
< test-audit-coins.json \
> /dev/null \
&& exit_fail "Unexpected emergency by count detected in ordinary run" \
|| echo "PASS"
echo -n "Test for wire inconsistencies... "
jq -e .wire_out_amount_inconsistencies[0] < test-audit-wire.json > /dev/null && exit_fail "Unexpected wire out inconsistency detected in ordinary run"
jq -e .reserve_in_amount_inconsistencies[0] < test-audit-wire.json > /dev/null && exit_fail "Unexpected reserve in inconsistency detected in ordinary run"
jq -e .misattribution_inconsistencies[0] < test-audit-wire.json > /dev/null && exit_fail "Unexpected misattribution inconsistency detected in ordinary run"
jq -e .row_inconsistencies[0] < test-audit-wire.json > /dev/null && exit_fail "Unexpected row inconsistency detected in ordinary run"
jq -e .row_minor_inconsistencies[0] < test-audit-wire.json > /dev/null && exit_fail "Unexpected minor row inconsistency detected in ordinary run"
jq -e .wire_format_inconsistencies[0] < test-audit-wire.json > /dev/null && exit_fail "Unexpected wire format inconsistencies detected in ordinary run"
jq -e .wire_out_amount_inconsistencies[0] \
< test-audit-wire.json \
> /dev/null \
&& exit_fail "Unexpected wire out inconsistency detected in ordinary run"
jq -e .reserve_in_amount_inconsistencies[0] \
< test-audit-wire.json \
> /dev/null \
&& exit_fail "Unexpected reserve in inconsistency detected in ordinary run"
jq -e .misattribution_inconsistencies[0] \
< test-audit-wire.json \
> /dev/null \
&& exit_fail "Unexpected misattribution inconsistency detected in ordinary run"
jq -e .row_inconsistencies[0] \
< test-audit-wire.json \
> /dev/null \
&& exit_fail "Unexpected row inconsistency detected in ordinary run"
jq -e .row_minor_inconsistencies[0] \
< test-audit-wire.json \
> /dev/null \
&& exit_fail "Unexpected minor row inconsistency detected in ordinary run"
jq -e .wire_format_inconsistencies[0] \
< test-audit-wire.json \
> /dev/null \
&& exit_fail "Unexpected wire format inconsistencies detected in ordinary run"
# TODO: check operation balances are correct (once we have all transaction types and wallet is deterministic)
# TODO: check revenue summaries are correct (once we have all transaction types and wallet is deterministic)
echo PASS
echo "PASS"
echo -n "Test for wire amounts... "
WIRED=`jq -r .total_wire_in_delta_plus < test-audit-wire.json`
if test $WIRED != "TESTKUDOS:0"
WIRED=$(jq -r .total_wire_in_delta_plus < test-audit-wire.json)
if [ "$WIRED" != "TESTKUDOS:0" ]
then
exit_fail "Expected total wire delta plus wrong, got $WIRED"
fi
WIRED=`jq -r .total_wire_in_delta_minus < test-audit-wire.json`
if test $WIRED != "TESTKUDOS:0"
WIRED=$(jq -r .total_wire_in_delta_minus < test-audit-wire.json)
if [ "$WIRED" != "TESTKUDOS:0" ]
then
exit_fail "Expected total wire delta minus wrong, got $WIRED"
fi
WIRED=`jq -r .total_wire_out_delta_plus < test-audit-wire.json`
if test $WIRED != "TESTKUDOS:0"
WIRED=$(jq -r .total_wire_out_delta_plus < test-audit-wire.json)
if [ "$WIRED" != "TESTKUDOS:0" ]
then
exit_fail "Expected total wire delta plus wrong, got $WIRED"
fi
WIRED=`jq -r .total_wire_out_delta_minus < test-audit-wire.json`
if test $WIRED != "TESTKUDOS:0"
WIRED=$(jq -r .total_wire_out_delta_minus < test-audit-wire.json)
if [ "$WIRED" != "TESTKUDOS:0" ]
then
exit_fail "Expected total wire delta minus wrong, got $WIRED"
fi
WIRED=`jq -r .total_misattribution_in < test-audit-wire.json`
if test $WIRED != "TESTKUDOS:0"
WIRED=$(jq -r .total_misattribution_in < test-audit-wire.json)
if [ "$WIRED" != "TESTKUDOS:0" ]
then
exit_fail "Expected total misattribution in wrong, got $WIRED"
fi
@ -486,37 +544,37 @@ function test_1() {
function test_2() {
echo "===========2: recoup amount inconsistency==========="
echo "UPDATE exchange.recoup SET amount_val=5 WHERE recoup_uuid=1" | psql -Aqt $DB
echo "UPDATE exchange.recoup SET amount_val=5 WHERE recoup_uuid=1" | psql -Aqt "$DB"
run_audit
# Reserve balance is now wrong
echo -n "Testing inconsistency detection... "
AMOUNT=`jq -r .reserve_balance_summary_wrong_inconsistencies[0].auditor < test-audit-reserves.json`
if test $AMOUNT != "TESTKUDOS:3"
AMOUNT=$(jq -r .reserve_balance_summary_wrong_inconsistencies[0].auditor < test-audit-reserves.json)
if [ "$AMOUNT" != "TESTKUDOS:3" ]
then
exit_fail "Reserve auditor amount $AMOUNT is wrong"
fi
AMOUNT=`jq -r .reserve_balance_summary_wrong_inconsistencies[0].exchange < test-audit-reserves.json`
if test $AMOUNT != "TESTKUDOS:0"
AMOUNT=$(jq -r .reserve_balance_summary_wrong_inconsistencies[0].exchange < test-audit-reserves.json)
if [ "$AMOUNT" != "TESTKUDOS:0" ]
then
exit_fail "Reserve exchange amount $AMOUNT is wrong"
fi
# Coin spent exceeded coin's value
AMOUNT=`jq -r .amount_arithmetic_inconsistencies[0].auditor < test-audit-coins.json`
if test $AMOUNT != "TESTKUDOS:2"
AMOUNT=$(jq -r .amount_arithmetic_inconsistencies[0].auditor < test-audit-coins.json)
if [ "$AMOUNT" != "TESTKUDOS:2" ]
then
exit_fail "Coin auditor amount $AMOUNT is wrong"
fi
AMOUNT=`jq -r .amount_arithmetic_inconsistencies[0].exchange < test-audit-coins.json`
if test $AMOUNT != "TESTKUDOS:5"
AMOUNT=$(jq -r .amount_arithmetic_inconsistencies[0].exchange < test-audit-coins.json)
if [ "$AMOUNT" != "TESTKUDOS:5" ]
then
exit_fail "Coin exchange amount $AMOUNT is wrong"
fi
echo OK
echo "OK"
# Undo database modification
echo "UPDATE exchange.recoup SET amount_val=2 WHERE recoup_uuid=1" | psql -Aqt $DB
echo "UPDATE exchange.recoup SET amount_val=2 WHERE recoup_uuid=1" | psql -Aqt "$DB"
}
@ -525,26 +583,26 @@ function test_2() {
function test_3() {
echo "===========3: recoup-refresh amount inconsistency==========="
echo "UPDATE exchange.recoup_refresh SET amount_val=5 WHERE recoup_refresh_uuid=1" | psql -Aqt $DB
echo "UPDATE exchange.recoup_refresh SET amount_val=5 WHERE recoup_refresh_uuid=1" | psql -Aqt "$DB"
run_audit
echo -n "Testing inconsistency detection... "
# Coin spent exceeded coin's value
AMOUNT=`jq -r .total_arithmetic_delta_minus < test-audit-coins.json`
if test $AMOUNT != "TESTKUDOS:5"
AMOUNT=$(jq -r .total_arithmetic_delta_minus < test-audit-coins.json)
if [ "$AMOUNT" != "TESTKUDOS:5" ]
then
exit_fail "Arithmetic delta minus amount $AMOUNT is wrong"
fi
AMOUNT=`jq -r .total_arithmetic_delta_plus < test-audit-coins.json`
if test $AMOUNT != "TESTKUDOS:0"
AMOUNT=$(jq -r .total_arithmetic_delta_plus < test-audit-coins.json)
if [ "$AMOUNT" != "TESTKUDOS:0" ]
then
exit_fail "Arithmetic delta plus amount $AMOUNT is wrong"
fi
echo OK
echo "OK"
# Undo database modification
echo "UPDATE exchange.recoup_refresh SET amount_val=0 WHERE recoup_refresh_uuid=1" | psql -Aqt $DB
echo "UPDATE exchange.recoup_refresh SET amount_val=0 WHERE recoup_refresh_uuid=1" | psql -Aqt "$DB"
}
@ -553,34 +611,35 @@ function test_3() {
function test_4() {
echo "===========4: invalid recoup==========="
echo "DELETE FROM exchange.denomination_revocations;" | psql -Aqt $DB
echo "DELETE FROM exchange.denomination_revocations;" | psql -Aqt "$DB"
run_audit
echo -n "Testing inconsistency detection... "
# Coin spent exceeded coin's value
jq -e .bad_sig_losses[0] < test-audit-coins.json > /dev/null || exit_fail "Bad recoup not detected"
AMOUNT=`jq -r .irregular_loss < test-audit-coins.json`
if test $AMOUNT == "TESTKUDOS:0"
jq -e .bad_sig_losses[0] \
< test-audit-coins.json \
> /dev/null \
|| exit_fail "Bad recoup not detected"
AMOUNT=$(jq -r .irregular_loss < test-audit-coins.json)
if [ "$AMOUNT" == "TESTKUDOS:0" ]
then
exit_fail "Total bad sig losses are wrong"
fi
TAB=`jq -r .row_inconsistencies[0].table < test-audit-reserves.json`
if test $TAB != "recoup"
TAB=$(jq -r .row_inconsistencies[0].table < test-audit-reserves.json)
if [ "$TAB" != "recoup" ]
then
exit_fail "Wrong table for row inconsistency, got $TAB"
fi
echo OK
echo "OK"
# Undo database modification (can't easily undo DELETE, so full reload)
full_reload
}
# *************** Main test loop starts here **************
@ -588,14 +647,14 @@ function test_4() {
# Sets $fail to 0 on success, non-zero on failure.
function check_with_database()
{
BASEDB=$1
BASEDB="$1"
# Configuration file to use
CONF=$1.conf
CONF="$1.conf"
echo "Running test suite with database $BASEDB using configuration $CONF"
MASTER_PRIV_FILE=${BASEDB}.mpriv
taler-config -f -c ${CONF} -s exchange-offline -o MASTER_PRIV_FILE -V ${MASTER_PRIV_FILE}
MASTER_PUB=`gnunet-ecc -p $MASTER_PRIV_FILE`
MASTER_PRIV_FILE="${BASEDB}.mpriv"
taler-config -f -c "${CONF}" -s exchange-offline -o MASTER_PRIV_FILE -V "${MASTER_PRIV_FILE}"
MASTER_PUB=$(gnunet-ecc -p "$MASTER_PRIV_FILE")
echo "MASTER PUB is ${MASTER_PUB} using file ${MASTER_PRIV_FILE}"
@ -605,14 +664,14 @@ function check_with_database()
fail=0
for i in $TESTS
do
test_$i
if test 0 != $fail
"test_$i"
if [ 0 != "$fail" ]
then
break
fi
done
# echo "Cleanup (disabled, leaving database $DB behind)"
dropdb $DB
dropdb "$DB"
}
@ -628,36 +687,49 @@ DB=revoke-basedb
echo "Testing for jq"
jq -h > /dev/null || exit_skip "jq required"
echo "Testing for faketime"
faketime -h > /dev/null || exit_skip "faketime required"
faketime -h > /dev/null \
|| exit_skip "faketime required"
echo "Testing for libeufin(-cli)"
libeufin-cli --help >/dev/null 2> /dev/null </dev/null || exit_skip "libeufin required"
libeufin-cli --help \
>/dev/null \
2> /dev/null \
</dev/null \
|| exit_skip "libeufin required"
echo "Testing for pdflatex"
which pdflatex > /dev/null </dev/null || exit_skip "pdflatex required"
echo "Testing for taler-wallet-cli"
taler-wallet-cli -h >/dev/null </dev/null 2>/dev/null || exit_skip "taler-wallet-cli required"
taler-wallet-cli -h \
>/dev/null \
</dev/null \
2>/dev/null \
|| exit_skip "taler-wallet-cli required"
echo -n "Testing for Postgres"
echo -n "Testing for Postgres "
# Available directly in path?
INITDB_BIN=$(command -v initdb) || true
if [[ ! -z "$INITDB_BIN" ]]; then
echo " FOUND (in path) at" $INITDB_BIN
if [[ -n "$INITDB_BIN" ]]; then
echo "FOUND (in path) at $INITDB_BIN"
else
HAVE_INITDB=`find /usr -name "initdb" | head -1 2> /dev/null | grep postgres` || exit_skip " MISSING"
echo " FOUND at" `dirname $HAVE_INITDB`
INITDB_BIN=`echo $HAVE_INITDB | grep bin/initdb | grep postgres | sort -n | tail -n1`
HAVE_INITDB=$(find /usr -name "initdb" | head -1 2> /dev/null | grep postgres) || exit_skip " MISSING"
echo "FOUND at " "$(dirname "$HAVE_INITDB")"
INITDB_BIN=$(echo "$HAVE_INITDB" | grep bin/initdb | grep postgres | sort -n | tail -n1)
fi
echo -n "Setting up Postgres DB"
POSTGRES_PATH=`dirname $INITDB_BIN`
ORIGIN=`pwd`
MYDIR=`mktemp -d /tmp/taler-auditor-basedbXXXXXX`
TMPDIR="${MYDIR}/postgres/"
mkdir -p $TMPDIR
POSTGRES_PATH=$(dirname "$INITDB_BIN")
MY_TMP_DIR=$(mktemp -d /tmp/taler-auditor-basedbXXXXXX)
TMPDIR="${MY_TMP_DIR}/postgres/"
mkdir -p "$TMPDIR"
echo -n "Setting up Postgres DB at $TMPDIR ..."
$INITDB_BIN --no-sync --auth=trust -D ${TMPDIR} > ${MYDIR}/postgres-dbinit.log 2> ${MYDIR}/postgres-dbinit.err
"$INITDB_BIN" \
--no-sync \
--auth=trust \
-D "${TMPDIR}" \
> "${MY_TMP_DIR}/postgres-dbinit.log" \
2> "${MY_TMP_DIR}/postgres-dbinit.err"
echo " DONE"
mkdir ${TMPDIR}/sockets
mkdir "${TMPDIR}/sockets"
echo -n "Launching Postgres service at $POSTGRES_PATH"
cat - >> $TMPDIR/postgresql.conf <<EOF
cat - >> "$TMPDIR/postgresql.conf" <<EOF
unix_socket_directories='${TMPDIR}/sockets'
fsync=off
max_wal_senders=0
@ -665,23 +737,30 @@ synchronous_commit=off
wal_level=minimal
listen_addresses=''
EOF
cat $TMPDIR/pg_hba.conf | grep -v host > $TMPDIR/pg_hba.conf.new
mv $TMPDIR/pg_hba.conf.new $TMPDIR/pg_hba.conf
${POSTGRES_PATH}/pg_ctl -D $TMPDIR -l /dev/null start > ${MYDIR}/postgres-start.log 2> ${MYDIR}/postgres-start.err
grep -v host \
< "$TMPDIR/pg_hba.conf" \
> "$TMPDIR/pg_hba.conf.new"
mv "$TMPDIR/pg_hba.conf.new" "$TMPDIR/pg_hba.conf"
"${POSTGRES_PATH}/pg_ctl" \
-D "$TMPDIR" \
-l /dev/null \
start \
> "${MY_TMP_DIR}/postgres-start.log" \
2> "${MY_TMP_DIR}/postgres-start.err"
echo " DONE"
PGHOST="$TMPDIR/sockets"
export PGHOST
echo "Generating fresh database at $MYDIR"
if faketime -f '-1 d' ./generate-revoke-basedb.sh $MYDIR/$DB
echo "Generating fresh database at $MY_TMP_DIR"
if faketime -f '-1 d' ./generate-revoke-basedb.sh "$MY_TMP_DIR/$DB"
then
check_with_database $MYDIR/$DB
if test x$fail != x0
check_with_database "$MY_TMP_DIR/$DB"
if [ "x$fail" != "x0" ]
then
exit $fail
exit "$fail"
else
echo "Cleaning up $MYDIR..."
rm -rf $MYDIR || echo "Removing $MYDIR failed"
echo "Cleaning up $MY_TMP_DIR..."
rm -rf "$MY_TMP_DIR" || echo "Removing $MY_TMP_DIR failed"
fi
else
echo "Generation failed"

View File

@ -1,8 +1,7 @@
#!/bin/bash
#
# This file is part of TALER
# Copyright (C) 2014-2021 Taler Systems SA
# Copyright (C) 2014-2023 Taler Systems SA
#
# TALER is free software; you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
@ -15,6 +14,7 @@
# You should have received a copy of the GNU General Public License along with
# TALER; see the file COPYING. If not, If not, see <http://www.gnu.org/license>
#
# shellcheck disable=SC2317
set -eu
@ -32,13 +32,13 @@ function exit_fail() {
# Cleanup to run whenever we exit
function cleanup() {
if test ! -z "${POSTGRES_PATH:-}"
if [ -n "${POSTGRES_PATH:-}" ]
then
${POSTGRES_PATH}/pg_ctl -D $TMPDIR stop &> /dev/null || true
"${POSTGRES_PATH}/pg_ctl" -D "$TMPDIR" stop &> /dev/null || true
fi
for n in `jobs -p`
for n in $(jobs -p)
do
kill $n 2> /dev/null || true
kill "$n" 2> /dev/null || true
done
wait
}
@ -59,19 +59,25 @@ function check_with_database()
taler-exchange-dbinit -c test-sync-out.conf
echo -n "."
psql -Aqt talercheck-in -q -1 -f $1.sql >/dev/null || exit_skip "Failed to load database"
psql -Aqt talercheck-in \
-q -1 \
-f "$1.sql" \
>/dev/null \
|| exit_skip "Failed to load database"
echo -n "."
taler-auditor-sync -s test-sync-in.conf -d test-sync-out.conf -t
taler-auditor-sync \
-s test-sync-in.conf \
-d test-sync-out.conf -t
# cs_nonce_locks excluded: no point
for table in denominations denomination_revocations wire_targets reserves reserves_in reserves_close reserves_out auditors auditor_denom_sigs exchange_sign_keys signkey_revocations extensions policy_details policy_fulfillments known_coins refresh_commitments refresh_revealed_coins refresh_transfer_keys deposits refunds wire_out aggregation_tracking wire_fee recoup recoup_refresh
do
echo -n "."
CIN=`echo "SELECT COUNT(*) FROM exchange.$table" | psql talercheck-in -Aqt`
COUT=`echo "SELECT COUNT(*) FROM exchange.$table" | psql talercheck-out -Aqt`
CIN=$(echo "SELECT COUNT(*) FROM exchange.$table" | psql talercheck-in -Aqt)
COUT=$(echo "SELECT COUNT(*) FROM exchange.$table" | psql talercheck-out -Aqt)
if test ${CIN} != ${COUT}
if [ "${CIN}" != "${COUT}" ]
then
dropdb talercheck-in
dropdb talercheck-out
@ -88,14 +94,6 @@ function check_with_database()
fail=0
}
# Postgres database to use
DB=auditor-basedb
# Configuration file to use
CONF=${DB}.conf
# test required commands exist
echo "Testing for jq"
jq -h > /dev/null || exit_skip "jq required"
@ -111,23 +109,25 @@ taler-wallet-cli -h >/dev/null </dev/null 2>/dev/null || exit_skip "taler-wallet
echo -n "Testing for Postgres"
# Available directly in path?
INITDB_BIN=$(command -v initdb) || true
if [[ ! -z "$INITDB_BIN" ]]; then
echo " FOUND (in path) at" $INITDB_BIN
if [[ -n "$INITDB_BIN" ]]; then
echo " FOUND (in path) at $INITDB_BIN"
else
HAVE_INITDB=`find /usr -name "initdb" | head -1 2> /dev/null | grep postgres` || exit_skip " MISSING"
echo " FOUND at" `dirname $HAVE_INITDB`
INITDB_BIN=`echo $HAVE_INITDB | grep bin/initdb | grep postgres | sort -n | tail -n1`
HAVE_INITDB=$(find /usr -name "initdb" | head -1 2> /dev/null | grep postgres) || exit_skip " MISSING"
echo " FOUND at " "$(dirname "$HAVE_INITDB")"
INITDB_BIN=$(echo "$HAVE_INITDB" | grep bin/initdb | grep postgres | sort -n | tail -n1)
fi
echo -n "Setting up Postgres DB"
POSTGRES_PATH=`dirname $INITDB_BIN`
MYDIR=`mktemp -d /tmp/taler-auditor-basedbXXXXXX`
POSTGRES_PATH=$(dirname "$INITDB_BIN")
MYDIR=$(mktemp -d /tmp/taler-auditor-basedbXXXXXX)
TMPDIR="$MYDIR/postgres/"
mkdir -p $TMPDIR
$INITDB_BIN --no-sync --auth=trust -D ${TMPDIR} > ${MYDIR}/postgres-dbinit.log 2> ${MYDIR}/postgres-dbinit.err
mkdir -p "$TMPDIR"
"$INITDB_BIN" --no-sync --auth=trust -D "${TMPDIR}" \
> "${MYDIR}/postgres-dbinit.log" \
2> "${MYDIR}/postgres-dbinit.err"
echo " DONE"
mkdir ${TMPDIR}/sockets
mkdir "${TMPDIR}/sockets"
echo -n "Launching Postgres service"
cat - >> $TMPDIR/postgresql.conf <<EOF
cat - >> "$TMPDIR/postgresql.conf" <<EOF
unix_socket_directories='${TMPDIR}/sockets'
fsync=off
max_wal_senders=0
@ -135,23 +135,30 @@ synchronous_commit=off
wal_level=minimal
listen_addresses=''
EOF
cat $TMPDIR/pg_hba.conf | grep -v host > $TMPDIR/pg_hba.conf.new
mv $TMPDIR/pg_hba.conf.new $TMPDIR/pg_hba.conf
${POSTGRES_PATH}/pg_ctl -D $TMPDIR -l /dev/null start > ${MYDIR}/postgres-start.log 2> ${MYDIR}/postgres-start.err
grep -v host \
< "$TMPDIR/pg_hba.conf" \
> "$TMPDIR/pg_hba.conf.new"
mv "$TMPDIR/pg_hba.conf.new" "$TMPDIR/pg_hba.conf"
"${POSTGRES_PATH}/pg_ctl" \
-D "$TMPDIR" \
-l /dev/null \
start \
> "${MYDIR}/postgres-start.log" \
2> "${MYDIR}/postgres-start.err"
echo " DONE"
PGHOST="$TMPDIR/sockets"
export PGHOST
echo "Generating fresh database at $MYDIR"
if faketime -f '-1 d' ./generate-auditor-basedb.sh $MYDIR/auditor-basedb
if faketime -f '-1 d' ./generate-auditor-basedb.sh -d "$MYDIR/auditor-basedb"
then
check_with_database $MYDIR/auditor-basedb
if test x$fail != x0
check_with_database "$MYDIR/auditor-basedb"
if [ x$fail != x0 ]
then
exit $fail
exit "$fail"
else
echo "Cleaning up $MYDIR..."
rm -rf $MYDIR || echo "Removing $MYDIR failed"
rm -rf "$MYDIR" || echo "Removing $MYDIR failed"
fi
else
echo "Generation failed"

View File

@ -499,6 +499,8 @@ kyc_satisfied (struct AggregationUnit *au_active)
char *requirement;
enum GNUNET_DB_QueryStatus qs;
if (kyc_off)
return true;
qs = TALER_KYCLOGIC_kyc_test_required (
TALER_KYCLOGIC_KYC_TRIGGER_DEPOSIT,
&au_active->h_payto,

View File

@ -876,7 +876,7 @@ sign_and_do_age_withdraw (
/* Prepare the hashes of the coins for insertion */
for (uint32_t i = 0; i<awc->num_coins; i++)
{
TALER_coin_ev_hash (&awc->coin_evs[i],
TALER_coin_ev_hash (&awc->coin_evs[TALER_CNC_KAPPA * i + noreveal_index],
&awc->denom_hs[i],
&h_coin_evs[i]);
}

View File

@ -71,7 +71,7 @@ struct AgeRevealContext
* The data from the original age-withdraw. Will be retrieved from
* the DB via @a ach and @a reserve_pub.
*/
struct TALER_EXCHANGEDB_AgeWithdraw *commitment;
struct TALER_EXCHANGEDB_AgeWithdraw commitment;
};
@ -106,11 +106,8 @@ parse_age_withdraw_reveal_json (
error = "disclosed_coin_secrets must be an array";
else if (num_entries == 0)
error = "disclosed_coin_secrets must not be empty";
else if (num_entries > TALER_MAX_FRESH_COINS * (TALER_CNC_KAPPA - 1))
else if (num_entries > TALER_MAX_FRESH_COINS)
error = "maximum number of coins that can be withdrawn has been exceeded";
else if (0 != num_entries % (TALER_CNC_KAPPA - 1))
error = "the size of disclosed_coin_secrets must be a multiple of "
TALER_CNC_KAPPA_MINUS_ONE_STR;
if (NULL != error)
{
@ -120,29 +117,26 @@ parse_age_withdraw_reveal_json (
return GNUNET_SYSERR;
}
actx->num_secrets = num_entries;
actx->num_coins = num_entries / (TALER_CNC_KAPPA - 1);
actx->num_secrets = num_entries * (TALER_CNC_KAPPA - 1);
actx->num_coins = num_entries;
}
/* Continue parsing the parts */
{
unsigned int idx = 0;
unsigned int k = 0;
json_t *array = NULL;
json_t *value = NULL;
/* Parse diclosed keys */
actx->disclosed_coin_secrets =
GNUNET_new_array (num_entries,
GNUNET_new_array (actx->num_secrets,
struct TALER_PlanchetMasterSecretP);
json_array_foreach (j_disclosed_coin_secrets, idx, value) {
struct GNUNET_JSON_Specification spec[] = {
GNUNET_JSON_spec_fixed_auto (NULL, &actx->disclosed_coin_secrets[idx]),
GNUNET_JSON_spec_end ()
};
if (GNUNET_OK !=
GNUNET_JSON_parse (value, spec, NULL, NULL))
json_array_foreach (j_disclosed_coin_secrets, idx, array) {
if (! json_is_array (array) ||
(TALER_CNC_KAPPA - 1 != json_array_size (array)))
{
char msg[256] = {0};
GNUNET_snprintf (msg,
@ -153,6 +147,32 @@ parse_age_withdraw_reveal_json (
TALER_EC_GENERIC_PARAMETER_MALFORMED,
msg);
goto EXIT;
}
json_array_foreach (array, k, value)
{
struct TALER_PlanchetMasterSecretP *secret =
&actx->disclosed_coin_secrets[2 * idx + k];
struct GNUNET_JSON_Specification spec[] = {
GNUNET_JSON_spec_fixed_auto (NULL, secret),
GNUNET_JSON_spec_end ()
};
if (GNUNET_OK !=
GNUNET_JSON_parse (value, spec, NULL, NULL))
{
char msg[256] = {0};
GNUNET_snprintf (msg,
sizeof(msg),
"couldn't parse entry no. %d in array disclosed_coin_secrets[%d]",
k + 1,
idx + 1);
*mhd_ret = TALER_MHD_reply_with_ec (connection,
TALER_EC_GENERIC_PARAMETER_MALFORMED,
msg);
goto EXIT;
}
}
};
}
@ -182,7 +202,7 @@ find_original_commitment (
struct MHD_Connection *connection,
const struct TALER_AgeWithdrawCommitmentHashP *h_commitment,
const struct TALER_ReservePublicKeyP *reserve_pub,
struct TALER_EXCHANGEDB_AgeWithdraw **commitment,
struct TALER_EXCHANGEDB_AgeWithdraw *commitment,
MHD_RESULT *result)
{
enum GNUNET_DB_QueryStatus qs;
@ -192,7 +212,7 @@ find_original_commitment (
qs = TEH_plugin->get_age_withdraw (TEH_plugin->cls,
reserve_pub,
h_commitment,
*commitment);
commitment);
switch (qs)
{
case GNUNET_DB_STATUS_SUCCESS_ONE_RESULT:
@ -262,7 +282,13 @@ calculate_blinded_hash (
connection,
result);
if (NULL == denom_key)
{
GNUNET_break_op (0);
*result = TALER_MHD_reply_with_ec (connection,
TALER_EC_EXCHANGE_GENERIC_KEYS_MISSING,
NULL);
return GNUNET_SYSERR;
}
/* calculate age commitment hash */
{
@ -286,10 +312,10 @@ calculate_blinded_hash (
/* Next: calculate planchet */
{
struct TALER_CoinPubHashP c_hash;
struct TALER_PlanchetDetail detail;
struct TALER_CoinSpendPrivateKeyP coin_priv;
union TALER_DenominationBlindingKeyP bks;
struct TALER_CoinPubHashP c_hash = {0};
struct TALER_PlanchetDetail detail = {0};
struct TALER_CoinSpendPrivateKeyP coin_priv = {0};
union TALER_DenominationBlindingKeyP bks = {0};
struct TALER_ExchangeWithdrawValues alg_values = {
.cipher = denom_key->denom_pub.cipher,
};
@ -298,24 +324,23 @@ calculate_blinded_hash (
{
struct TALER_CsNonce nonce;
TALER_cs_withdraw_nonce_derive (
secret,
TALER_cs_withdraw_nonce_derive (secret,
&nonce);
{
enum TALER_ErrorCode ec;
struct TEH_CsDeriveData cdd = {
.h_denom_pub = &denom_key->h_denom_pub,
.nonce = &nonce,
};
ec = TEH_keys_denomination_cs_r_pub (&cdd,
GNUNET_assert (TALER_EC_NONE ==
TEH_keys_denomination_cs_r_pub (
&cdd,
false,
&alg_values.details.
cs_values);
/* FIXME Handle error? */
GNUNET_assert (TALER_EC_NONE == ec);
&alg_values.details.cs_values));
}
detail.blinded_planchet.details.cs_blinded_planchet.nonce = nonce;
}
TALER_planchet_blinding_secret_create (secret,
@ -348,10 +373,11 @@ calculate_blinded_hash (
ret = TALER_coin_ev_hash (&detail.blinded_planchet,
&denom_key->h_denom_pub,
bch);
GNUNET_assert (GNUNET_OK == ret);
}
return GNUNET_SYSERR;
return ret;
}
@ -417,9 +443,9 @@ verify_commitment_and_max_age (
{
size_t i = 0; /* either 0 or 1, to index into coin_evs */
for (size_t gamma = 0; gamma<TALER_CNC_KAPPA; gamma++)
for (size_t k = 0; k<TALER_CNC_KAPPA; k++)
{
if (gamma == (size_t) commitment->noreveal_index)
if (k == (size_t) commitment->noreveal_index)
{
GNUNET_CRYPTO_hash_context_read (hash_context,
&commitment->h_coin_evs[coin_idx],
@ -432,7 +458,7 @@ verify_commitment_and_max_age (
const struct TALER_PlanchetMasterSecretP *secret;
struct TALER_BlindedCoinHashP bch;
GNUNET_assert (i<2);
GNUNET_assert (2>i);
GNUNET_assert ((TALER_CNC_KAPPA - 1) * num_coins > j);
secret = &disclosed_coin_secrets[j];
@ -478,8 +504,7 @@ verify_commitment_and_max_age (
}
}
return ret;
return GNUNET_OK;
}
@ -501,7 +526,7 @@ reply_age_withdraw_reveal_success (
for (unsigned int i = 0; i < commitment->num_coins; i++)
{
json_t *obj = GNUNET_JSON_PACK (
TALER_JSON_pack_blinded_denom_sig ("ev_sig",
TALER_JSON_pack_blinded_denom_sig (NULL,
&commitment->denom_sigs[i]));
GNUNET_assert (0 ==
json_array_append_new (list,
@ -572,7 +597,7 @@ TEH_handler_age_withdraw_reveal (
if (GNUNET_OK !=
verify_commitment_and_max_age (
rc->connection,
actx.commitment,
&actx.commitment,
actx.disclosed_coin_secrets,
actx.num_coins,
&result))
@ -580,7 +605,7 @@ TEH_handler_age_withdraw_reveal (
/* Finally, return the signatures */
result = reply_age_withdraw_reveal_success (rc->connection,
actx.commitment);
&actx.commitment);
} while(0);

View File

@ -29,6 +29,8 @@ BEGIN
'(age_withdraw_id BIGINT GENERATED BY DEFAULT AS IDENTITY'
',h_commitment BYTEA NOT NULL CONSTRAINT h_commitment_length CHECK(LENGTH(h_commitment)=64)'
',max_age SMALLINT NOT NULL CONSTRAINT max_age_positive CHECK(max_age>=0)'
',amount_with_fee_val INT8 NOT NULL'
',amount_with_fee_frac INT4 NOT NULL'
',reserve_pub BYTEA NOT NULL CONSTRAINT reserve_pub_length CHECK(LENGTH(reserve_pub)=32)'
',reserve_sig BYTEA NOT NULL CONSTRAINT reserve_sig_length CHECK(LENGTH(reserve_sig)=64)'
',noreveal_index SMALLINT NOT NULL CONSTRAINT noreveal_index_positive CHECK(noreveal_index>=0)'

View File

@ -143,6 +143,8 @@ WHERE
INSERT INTO exchange.age_withdraw
(h_commitment
,max_age
,amount_with_fee_val
,amount_with_fee_frac
,reserve_pub
,reserve_sig
,noreveal_index
@ -152,6 +154,8 @@ INSERT INTO exchange.age_withdraw
VALUES
(h_commitment
,maximum_age_committed
,amount_val
,amount_frac
,rpub
,rsig
,noreveal_index

View File

@ -52,13 +52,13 @@ TEH_PG_do_age_withdraw (
GNUNET_PQ_query_param_auto_from_type (&commitment->h_commitment),
GNUNET_PQ_query_param_uint16 (&commitment->max_age),
GNUNET_PQ_query_param_uint16 (&commitment->noreveal_index),
GNUNET_PQ_query_param_array_auto_from_type (commitment->num_coins,
TALER_PQ_query_param_array_blinded_coin_hash (commitment->num_coins,
commitment->h_coin_evs,
pg->conn),
GNUNET_PQ_query_param_array_uint64 (commitment->num_coins,
commitment->denom_serials,
pg->conn),
GNUNET_PQ_query_param_array_auto_from_type (commitment->num_coins,
TALER_PQ_query_param_array_blinded_denom_sig (commitment->num_coins,
commitment->denom_sigs,
pg->conn),
GNUNET_PQ_query_param_end

View File

@ -52,27 +52,26 @@ TEH_PG_get_age_withdraw (
&aw->amount_with_fee),
GNUNET_PQ_result_spec_uint16 ("noreveal_index",
&aw->noreveal_index),
GNUNET_PQ_result_spec_array_fixed_size (
TALER_PQ_result_spec_array_blinded_coin_hash (
pg->conn,
"h_coin_evs",
sizeof(struct TALER_BlindedPlanchet),
"h_blind_evs",
&aw->num_coins,
(void **) &aw->h_coin_evs),
GNUNET_PQ_result_spec_array_fixed_size (
&aw->h_coin_evs),
TALER_PQ_result_spec_array_blinded_denom_sig (
pg->conn,
"denom_sigs",
sizeof(struct TALER_DenominationSignature),
NULL,
(void **) &aw->denom_sigs),
GNUNET_PQ_result_spec_array_fixed_size (
NULL, /* we assume that this is the same size as h_coin_evs */
&aw->denom_sigs),
TALER_PQ_result_spec_array_denom_hash (
pg->conn,
"denom_pub_hashes",
sizeof(struct TALER_DenominationHashP),
NULL,
(void **) &aw->denom_pub_hashes),
NULL, /* we assume that this is the same size as h_coin_evs */
&aw->denom_pub_hashes),
GNUNET_PQ_result_spec_end
};
GNUNET_assert (NULL != aw);
/* Used in #postgres_get_age_withdraw() to
locate the response for a /reserve/$RESERVE_PUB/age-withdraw request
using the hash of the blinded message. Also needed to ensure
@ -87,12 +86,12 @@ TEH_PG_get_age_withdraw (
",amount_with_fee_val"
",amount_with_fee_frac"
",noreveal_index"
",h_coin_evs"
",h_blind_evs"
",denom_sigs"
",ARRAY("
" SELECT denominations.denom_pub_hash FROM ("
" SELECT UNNEST(denomination_serials) AS id,"
" generate_subscripts(denominations_serials, 1) AS nr" /* for order */
" SELECT UNNEST(denom_serials) AS id,"
" generate_subscripts(denom_serials, 1) AS nr" /* for order */
" ) AS denoms"
" LEFT JOIN denominations ON denominations.denominations_serial=denoms.id"
") AS denom_pub_hashes"

View File

@ -3066,6 +3066,7 @@ typedef void
* @param coin_inputs The input for the coins to withdraw, same as in the previous call to /age-withdraw
* @param noreveal_index The index into each of the kappa coin candidates, that should not be revealed to the exchange
* @param h_commitment The commmitment from the previous call to /age-withdraw
* @param reserve_pub The public key of the reserve the original call to /age-withdraw was made to
* @param res_cb A callback for the result, maybe NULL
* @param res_cb_cls A closure for @e res_cb, maybe NULL
* @return a handle for this request; NULL if the argument was invalid.
@ -3080,6 +3081,7 @@ TALER_EXCHANGE_age_withdraw_reveal (
num_coins],
uint8_t noreveal_index,
const struct TALER_AgeWithdrawCommitmentHashP *h_commitment,
const struct TALER_ReservePublicKeyP *reserve_pub,
TALER_EXCHANGE_AgeWithdrawRevealCallback res_cb,
void *res_cb_cls);

View File

@ -126,6 +126,33 @@ struct GNUNET_PQ_QueryParam
TALER_PQ_query_param_json (const json_t *x);
/**
* Generate query parameter for an array of blinded denomination signatures
*
* @param num number of elements in @e denom_sigs
* @param denom_sigs array of blinded denomination signatures
* @param db context for the db-connection
*/
struct GNUNET_PQ_QueryParam
TALER_PQ_query_param_array_blinded_denom_sig (
size_t num,
const struct TALER_BlindedDenominationSignature *denom_sigs,
const struct GNUNET_PQ_Context *db
);
/**
* Generate query parameter for an array of blinded hashes of coin envelopes
*
* @param num number of elements in @e denom_sigs
* @param coin_evs array of blinded hashes of coin envelopes
* @param db context for the db-connection
*/
struct GNUNET_PQ_QueryParam
TALER_PQ_query_param_array_blinded_coin_hash (
size_t num,
const struct TALER_BlindedCoinHashP *coin_evs,
const struct GNUNET_PQ_Context *db);
/**
* Currency amount expected.
*
@ -229,6 +256,54 @@ TALER_PQ_result_spec_json (const char *name,
json_t **jp);
/**
* Array of blinded denomination signature expected
*
* @param db context of the database connection
* @param name name of the field in the table
* @param[out] num number of elements in @e denom_sigs
* @param[out] denom_sigs where to store the result
* @return array entry for the result specification to use
*/
struct GNUNET_PQ_ResultSpec
TALER_PQ_result_spec_array_blinded_denom_sig (
const struct GNUNET_PQ_Context *db,
const char *name,
size_t *num,
struct TALER_BlindedDenominationSignature **denom_sigs);
/**
* Array of blinded hashes of coin envelopes
*
* @param db context of the database connection
* @param name name of the field in the table
* @param[out] num number of elements in @e denom_sigs
* @param[out] h_coin_evs where to store the result
* @return array entry for the result specification to use
*/
struct GNUNET_PQ_ResultSpec
TALER_PQ_result_spec_array_blinded_coin_hash (
const struct GNUNET_PQ_Context *db,
const char *name,
size_t *num,
struct TALER_BlindedCoinHashP **h_coin_evs);
/**
* Array of hashes of denominations
*
* @param db context of the database connection
* @param name name of the field in the table
* @param[out] num number of elements in @e denom_sigs
* @param[out] denom_hs where to store the result
* @return array entry for the result specification to use
*/
struct GNUNET_PQ_ResultSpec
TALER_PQ_result_spec_array_denom_hash (
const struct GNUNET_PQ_Context *db,
const char *name,
size_t *num,
struct TALER_DenominationHashP **denom_hs);
#endif /* TALER_PQ_LIB_H_ */
/* end of include/taler_pq_lib.h */

View File

@ -346,6 +346,7 @@ reserve_age_withdraw_ok (
return GNUNET_SYSERR;
}
awbh->callback (awbh->callback_cls,
&response);
/* make sure the callback isn't called again */
@ -780,7 +781,7 @@ copy_results (
const struct TALER_EXCHANGE_AgeWithdrawBlindedResponse *awbr)
{
struct TALER_EXCHANGE_AgeWithdrawHandle *awh = cls;
uint8_t idx = awbr->details.ok.noreveal_index;
uint8_t k = awbr->details.ok.noreveal_index;
struct TALER_EXCHANGE_AgeWithdrawCoinPrivateDetails details[awh->num_coins];
struct TALER_BlindedCoinHashP blinded_coin_hs[awh->num_coins];
struct TALER_EXCHANGE_AgeWithdrawResponse resp = {
@ -797,9 +798,9 @@ copy_results (
for (size_t n = 0; n< awh->num_coins; n++)
{
details[n] = awh->coin_data[n].coin_candidates[idx].details;
details[n].planchet = awh->coin_data[n].planchet_details[idx];
blinded_coin_hs[n] = awh->coin_data[n].coin_candidates[idx].blinded_coin_h;
details[n] = awh->coin_data[n].coin_candidates[k].details;
details[n].planchet = awh->coin_data[n].planchet_details[k];
blinded_coin_hs[n] = awh->coin_data[n].coin_candidates[k].blinded_coin_h;
}
awh->callback (awh->callback_cls,
@ -824,9 +825,9 @@ call_age_withdraw_blinded (
for (size_t n = 0; n < awh->num_coins; n++)
{
blinded_input[n].denom_pub = &awh->coin_data[n].denom_pub;
for (uint8_t i = 0; i < TALER_CNC_KAPPA; i++)
blinded_input[n].planchet_details[i] =
awh->coin_data[n].planchet_details[i];
for (uint8_t k = 0; k < TALER_CNC_KAPPA; k++)
blinded_input[n].planchet_details[k] =
awh->coin_data[n].planchet_details[k];
}
awh->procotol_handle =
@ -918,6 +919,8 @@ csr_withdraw_done (
bool success = false;
/* Complete the initialization of the coin with CS denomination */
can->details.alg_values = csrr->details.ok.alg_values;
GNUNET_assert (can->details.alg_values.cipher
== TALER_DENOMINATION_CS);
TALER_planchet_setup_coin_priv (&can->secret,
&can->details.alg_values,
&can->details.coin_priv);
@ -950,7 +953,6 @@ csr_withdraw_done (
TALER_EXCHANGE_age_withdraw_cancel (awh);
break;
}
success = true;
} while(0);
@ -1023,7 +1025,6 @@ prepare_coins (
struct TALER_PlanchetDetail *planchet = &cd->planchet_details[k];
can->secret = input->secrets[k];
/* Derive the age restriction from the given secret and
* the maximum age */
FAIL_IF (GNUNET_OK !=
@ -1063,6 +1064,8 @@ prepare_coins (
}
case TALER_DENOMINATION_CS:
{
can->details.alg_values.cipher = TALER_DENOMINATION_CS;
struct CSRClosure *cls = &cd->csr_cls[k];
/**
* Save the handler and the denomination for the callback

View File

@ -47,6 +47,9 @@ struct TALER_EXCHANGE_AgeWithdrawRevealHandle
/* The age-withdraw commitment */
struct TALER_AgeWithdrawCommitmentHashP h_commitment;
/* The reserve's public key */
const struct TALER_ReservePublicKeyP *reserve_pub;
/* Number of coins */
size_t num_coins;
@ -115,17 +118,18 @@ age_withdraw_reveal_ok (
{
struct TALER_BlindedDenominationSignature denom_sigs[awrh->num_coins];
json_t *j_sig;
size_t n;
/* Reconstruct the coins and unblind the signatures */
for (size_t n = 0; n < awrh->num_coins; n++)
json_array_foreach (j_sigs, n, j_sig)
{
json_t *j_sig = json_array_get (j_sigs, n);
struct GNUNET_JSON_Specification spec[] = {
GNUNET_JSON_spec_fixed_auto ("", &denom_sigs[n]),
TALER_JSON_spec_blinded_denom_sig (NULL,
&denom_sigs[n]),
GNUNET_JSON_spec_end ()
};
GNUNET_assert (NULL != j_sig);
if (GNUNET_OK != GNUNET_JSON_parse (j_sig,
spec,
NULL, NULL))
@ -133,6 +137,7 @@ age_withdraw_reveal_ok (
GNUNET_break_op (0);
return GNUNET_SYSERR;
}
}
response.details.ok.num_sigs = awrh->num_coins;
@ -231,7 +236,7 @@ handle_age_withdraw_reveal_finished (
break;
case MHD_HTTP_NOT_FOUND:
/* Nothing really to verify, the exchange basically just says
that it doesn't know this age-withraw commitment. */
that it doesn't know this age-withdraw commitment. */
awr.hr.ec = TALER_JSON_get_error_code (j_response);
awr.hr.hint = TALER_JSON_get_error_hint (j_response);
break;
@ -299,7 +304,7 @@ prepare_url (
*end = '\0';
GNUNET_snprintf (arg_str,
sizeof (arg_str),
"age-withraw/%s/reveal",
"age-withdraw/%s/reveal",
pub_str);
awrh->request_url = TALER_url_join (exchange_url,
@ -343,6 +348,9 @@ perform_protocol (
} \
} while(0)
j_array_of_secrets = json_array ();
FAIL_IF (NULL == j_array_of_secrets);
for (size_t n = 0; n < awrh->num_coins; n++)
{
const struct TALER_PlanchetMasterSecretP *secrets =
@ -369,6 +377,8 @@ perform_protocol (
j_secrets));
}
j_request_body = GNUNET_JSON_PACK (
GNUNET_JSON_pack_data_auto ("reserve_pub",
awrh->reserve_pub),
GNUNET_JSON_pack_array_steal ("disclosed_coin_secrets",
j_array_of_secrets));
FAIL_IF (NULL == j_request_body);
@ -418,6 +428,7 @@ TALER_EXCHANGE_age_withdraw_reveal (
num_coins],
uint8_t noreveal_index,
const struct TALER_AgeWithdrawCommitmentHashP *h_commitment,
const struct TALER_ReservePublicKeyP *reserve_pub,
TALER_EXCHANGE_AgeWithdrawRevealCallback reveal_cb,
void *reveal_cb_cls)
{
@ -429,6 +440,7 @@ TALER_EXCHANGE_age_withdraw_reveal (
awrh->coins_input = coins_input;
awrh->callback = reveal_cb;
awrh->callback_cls = reveal_cb_cls;
awrh->reserve_pub = reserve_pub;
if (GNUNET_OK !=
prepare_url (exchange_url,

View File

@ -24,6 +24,7 @@
#include <gnunet/gnunet_util_lib.h>
#include <gnunet/gnunet_pq_lib.h>
#include "taler_pq_lib.h"
#include "pq_common.h"
/**
@ -671,4 +672,388 @@ TALER_PQ_query_param_json (const json_t *x)
}
/** ------------------- Array support -----------------------------------**/
/**
* Closure for the array type handlers.
*
* May contain sizes information for the data, given (and handled) by the
* caller.
*/
struct qconv_array_cls
{
/**
* If not null, contains the array of sizes (the size of the array is the
* .size field in the ambient GNUNET_PQ_QueryParam struct). We do not free
* this memory.
*
* If not null, this value has precedence over @a sizes, which MUST be NULL */
const size_t *sizes;
/**
* If @a size and @a c_sizes are NULL, this field defines the same size
* for each element in the array.
*/
size_t same_size;
/**
* If true, the array parameter to the data pointer to the qconv_array is a
* continuous byte array of data, either with @a same_size each or sizes
* provided bytes by @a sizes;
*/
bool continuous;
/**
* Type of the array elements
*/
enum TALER_PQ_ArrayType typ;
/**
* Oid of the array elements
*/
Oid oid;
};
/**
* Callback to cleanup a qconv_array_cls to be used during
* GNUNET_PQ_cleanup_query_params_closures
*/
static void
qconv_array_cls_cleanup (void *cls)
{
GNUNET_free (cls);
}
/**
* Function called to convert input argument into SQL parameters for arrays
*
* Note: the format for the encoding of arrays for libpq is not very well
* documented. We peeked into various sources (postgresql and libpqtypes) for
* guidance.
*
* @param cls Closure of type struct qconv_array_cls*
* @param data Pointer to first element in the array
* @param data_len Number of _elements_ in array @a data (if applicable)
* @param[out] param_values SQL data to set
* @param[out] param_lengths SQL length data to set
* @param[out] param_formats SQL format data to set
* @param param_length number of entries available in the @a param_values, @a param_lengths and @a param_formats arrays
* @param[out] scratch buffer for dynamic allocations (to be done via #GNUNET_malloc()
* @param scratch_length number of entries left in @a scratch
* @return -1 on error, number of offsets used in @a scratch otherwise
*/
static int
qconv_array (
void *cls,
const void *data,
size_t data_len,
void *param_values[],
int param_lengths[],
int param_formats[],
unsigned int param_length,
void *scratch[],
unsigned int scratch_length)
{
struct qconv_array_cls *meta = cls;
size_t num = data_len;
size_t total_size;
const size_t *sizes;
bool same_sized;
void *elements = NULL;
bool noerror = true;
/* needed to capture the encoded rsa signatures */
void **buffers = NULL;
size_t *buffer_lengths = NULL;
(void) (param_length);
(void) (scratch_length);
GNUNET_assert (NULL != meta);
GNUNET_assert (num < INT_MAX);
sizes = meta->sizes;
same_sized = (0 != meta->same_size);
#define RETURN_UNLESS(cond) \
do { \
if (! (cond)) \
{ \
GNUNET_break ((cond)); \
noerror = false; \
goto DONE; \
} \
} while(0)
/* Calculate sizes and check bounds */
{
/* num * length-field */
size_t x = sizeof(uint32_t);
size_t y = x * num;
RETURN_UNLESS ((0 == num) || (y / num == x));
/* size of header */
total_size = x = sizeof(struct TALER_PQ_ArrayHeader);
total_size += y;
RETURN_UNLESS (total_size >= x);
/* sizes of elements */
if (same_sized)
{
x = num * meta->same_size;
RETURN_UNLESS ((0 == num) || (x / num == meta->same_size));
y = total_size;
total_size += x;
RETURN_UNLESS (total_size >= y);
}
else /* sizes are different per element */
{
switch (meta->typ)
{
case TALER_PQ_array_of_blinded_denom_sig:
{
const struct TALER_BlindedDenominationSignature *denom_sigs = data;
size_t len;
buffers = GNUNET_new_array (num, void *);
buffer_lengths = GNUNET_new_array (num, size_t);
for (size_t i = 0; i<num; i++)
{
switch (denom_sigs[i].cipher)
{
case TALER_DENOMINATION_RSA:
len = GNUNET_CRYPTO_rsa_signature_encode (
denom_sigs[i].details.blinded_rsa_signature,
&buffers[i]);
RETURN_UNLESS (len != 0);
break;
case TALER_DENOMINATION_CS:
len = sizeof (denom_sigs[i].details.blinded_cs_answer);
break;
default:
GNUNET_assert (0);
}
/* for the cipher and marker */
len += 2 * sizeof(uint32_t);
buffer_lengths[i] = len;
y = total_size;
total_size += len;
RETURN_UNLESS (total_size >= y);
}
sizes = buffer_lengths;
break;
}
default:
GNUNET_assert (0);
}
}
RETURN_UNLESS (INT_MAX > total_size);
RETURN_UNLESS (0 != total_size);
elements = GNUNET_malloc (total_size);
}
/* Write data */
{
char *out = elements;
struct TALER_PQ_ArrayHeader h = {
.ndim = htonl (1), /* We only support one-dimensional arrays */
.has_null = htonl (0), /* We do not support NULL entries in arrays */
.lbound = htonl (1), /* Default start index value */
.dim = htonl (num),
.oid = htonl (meta->oid),
};
/* Write header */
GNUNET_memcpy (out, &h, sizeof(h));
out += sizeof(h);
/* Write elements */
for (size_t i = 0; i < num; i++)
{
size_t sz = same_sized ? meta->same_size : sizes[i];
*(uint32_t *) out = htonl (sz);
out += sizeof(uint32_t);
switch (meta->typ)
{
case TALER_PQ_array_of_blinded_denom_sig:
{
const struct TALER_BlindedDenominationSignature *denom_sigs = data;
uint32_t be[2];
be[0] = htonl ((uint32_t) denom_sigs[i].cipher);
be[1] = htonl (0x01); /* magic margker: blinded */
GNUNET_memcpy (out,
&be,
sizeof(be));
out += sizeof(be);
sz -= sizeof(be);
switch (denom_sigs[i].cipher)
{
case TALER_DENOMINATION_RSA:
{
void *buf = buffers[i];
GNUNET_memcpy (out,
buf,
sz);
break;
}
case TALER_DENOMINATION_CS:
GNUNET_memcpy (out,
&denom_sigs[i].details.blinded_cs_answer,
sz);
break;
default:
GNUNET_assert (0);
}
break;
}
case TALER_PQ_array_of_blinded_coin_hash:
{
const struct TALER_BlindedCoinHashP *coin_hs = data;
GNUNET_memcpy (out,
&coin_hs[i],
sizeof(struct TALER_BlindedCoinHashP));
break;
}
case TALER_PQ_array_of_denom_hash:
{
const struct TALER_DenominationHashP *denom_hs = data;
GNUNET_memcpy (out,
&denom_hs[i],
sizeof(struct TALER_DenominationHashP));
break;
}
default:
{
GNUNET_assert (0);
break;
}
}
out += sz;
}
}
param_values[0] = elements;
param_lengths[0] = total_size;
param_formats[0] = 1;
scratch[0] = elements;
DONE:
if (NULL != buffers)
{
for (size_t i = 0; i<num; i++)
GNUNET_free (buffers[i]);
GNUNET_free (buffers);
}
GNUNET_free (buffer_lengths);
if (noerror)
return 1;
return -1;
}
/**
* Function to genreate a typ specific query parameter and corresponding closure
*
* @param num Number of elements in @a elements
* @param continuous If true, @a elements is an continuous array of data
* @param elements Array of @a num elements, either continuous or pointers
* @param sizes Array of @a num sizes, one per element, may be NULL
* @param same_size If not 0, all elements in @a elements have this size
* @param typ Supported internal type of each element in @a elements
* @param oid Oid of the type to be used in Postgres
* @return Query parameter
*/
static struct GNUNET_PQ_QueryParam
query_param_array_generic (
unsigned int num,
bool continuous,
const void *elements,
const size_t *sizes,
size_t same_size,
enum TALER_PQ_ArrayType typ,
Oid oid)
{
struct qconv_array_cls *meta = GNUNET_new (struct qconv_array_cls);
meta->typ = typ;
meta->oid = oid;
meta->sizes = sizes;
meta->same_size = same_size;
meta->continuous = continuous;
struct GNUNET_PQ_QueryParam res = {
.conv = qconv_array,
.conv_cls = meta,
.conv_cls_cleanup = qconv_array_cls_cleanup,
.data = elements,
.size = num,
.num_params = 1,
};
return res;
}
struct GNUNET_PQ_QueryParam
TALER_PQ_query_param_array_blinded_denom_sig (
size_t num,
const struct TALER_BlindedDenominationSignature *denom_sigs,
const struct GNUNET_PQ_Context *db)
{
return query_param_array_generic (num,
true,
denom_sigs,
NULL,
0,
TALER_PQ_array_of_blinded_denom_sig,
GNUNET_PQ_get_oid (db,
GNUNET_PQ_DATATYPE_BYTEA));
};
struct GNUNET_PQ_QueryParam
TALER_PQ_query_param_array_blinded_coin_hash (
size_t num,
const struct TALER_BlindedCoinHashP *coin_hs,
const struct GNUNET_PQ_Context *db)
{
return query_param_array_generic (num,
true,
coin_hs,
NULL,
sizeof(struct TALER_BlindedCoinHashP),
TALER_PQ_array_of_blinded_coin_hash,
GNUNET_PQ_get_oid (db,
GNUNET_PQ_DATATYPE_BYTEA));
};
struct GNUNET_PQ_QueryParam
TALER_PQ_query_param_array_denom_hash (
size_t num,
const struct TALER_DenominationHashP *denom_hs,
const struct GNUNET_PQ_Context *db)
{
return query_param_array_generic (num,
true,
denom_hs,
NULL,
sizeof(struct TALER_DenominationHashP),
TALER_PQ_array_of_denom_hash,
GNUNET_PQ_get_oid (db,
GNUNET_PQ_DATATYPE_BYTEA));
};
/* end of pq/pq_query_helper.c */

View File

@ -20,6 +20,7 @@
*/
#include "platform.h"
#include <gnunet/gnunet_util_lib.h>
#include "pq_common.h"
#include "taler_pq_lib.h"
@ -975,4 +976,305 @@ TALER_PQ_result_spec_exchange_withdraw_values (
}
/**
* Closure for the array result specifications. Contains type information
* for the generic parser extract_array_generic and out-pointers for the results.
*/
struct ArrayResultCls
{
/* Oid of the expected type, must match the oid in the header of the PQResult struct */
Oid oid;
/* Target type */
enum TALER_PQ_ArrayType typ;
/* If not 0, defines the expected size of each entry */
size_t same_size;
/* Out-pointer to write the number of elements in the array */
size_t *num;
/* Out-pointer. If @a typ is TALER_PQ_array_of_byte and @a same_size is 0,
* allocate and put the array of @a num sizes here. NULL otherwise */
size_t **sizes;
};
/**
* Extract data from a Postgres database @a result as array of a specific type
* from row @a row. The type information and optionally additional
* out-parameters are given in @a cls which is of type array_result_cls.
*
* @param cls closure of type array_result_cls
* @param result where to extract data from
* @param row row to extract data from
* @param fname name (or prefix) of the fields to extract from
* @param[in,out] dst_size where to store size of result, may be NULL
* @param[out] dst where to store the result
* @return
* #GNUNET_YES if all results could be extracted
* #GNUNET_SYSERR if a result was invalid (non-existing field or NULL)
*/
static enum GNUNET_GenericReturnValue
extract_array_generic (
void *cls,
PGresult *result,
int row,
const char *fname,
size_t *dst_size,
void *dst)
{
const struct ArrayResultCls *info = cls;
int data_sz;
char *data;
void *out = NULL;
struct TALER_PQ_ArrayHeader header;
int col_num;
GNUNET_assert (NULL != dst);
*((void **) dst) = NULL;
#define FAIL_IF(cond) \
do { \
if ((cond)) \
{ \
GNUNET_break (! (cond)); \
goto FAIL; \
} \
} while(0)
col_num = PQfnumber (result, fname);
FAIL_IF (0 > col_num);
data_sz = PQgetlength (result, row, col_num);
FAIL_IF (0 > data_sz);
FAIL_IF (sizeof(header) > (size_t) data_sz);
data = PQgetvalue (result, row, col_num);
FAIL_IF (NULL == data);
{
struct TALER_PQ_ArrayHeader *h =
(struct TALER_PQ_ArrayHeader *) data;
header.ndim = ntohl (h->ndim);
header.has_null = ntohl (h->has_null);
header.oid = ntohl (h->oid);
header.dim = ntohl (h->dim);
header.lbound = ntohl (h->lbound);
FAIL_IF (1 != header.ndim);
FAIL_IF (INT_MAX <= header.dim);
FAIL_IF (0 != header.has_null);
FAIL_IF (1 != header.lbound);
FAIL_IF (info->oid != header.oid);
}
if (NULL != info->num)
*info->num = header.dim;
{
char *in = data + sizeof(header);
switch (info->typ)
{
case TALER_PQ_array_of_denom_hash:
if (NULL != dst_size)
*dst_size = sizeof(struct TALER_DenominationHashP) * (header.dim);
out = GNUNET_new_array (header.dim, struct TALER_DenominationHashP);
*((void **) dst) = out;
for (uint32_t i = 0; i < header.dim; i++)
{
size_t sz = ntohl (*(uint32_t *) in);
FAIL_IF (sz != sizeof(struct TALER_DenominationHashP));
in += sizeof(uint32_t);
*(struct TALER_DenominationHashP *) out =
*(struct TALER_DenominationHashP *) in;
in += sz;
out += sz;
}
return GNUNET_OK;
case TALER_PQ_array_of_blinded_coin_hash:
if (NULL != dst_size)
*dst_size = sizeof(struct TALER_BlindedCoinHashP) * (header.dim);
out = GNUNET_new_array (header.dim, struct TALER_BlindedCoinHashP);
*((void **) dst) = out;
for (uint32_t i = 0; i < header.dim; i++)
{
size_t sz = ntohl (*(uint32_t *) in);
FAIL_IF (sz != sizeof(struct TALER_BlindedCoinHashP));
in += sizeof(uint32_t);
*(struct TALER_BlindedCoinHashP *) out =
*(struct TALER_BlindedCoinHashP *) in;
in += sz;
out += sz;
}
return GNUNET_OK;
case TALER_PQ_array_of_blinded_denom_sig:
{
struct TALER_BlindedDenominationSignature *denom_sigs;
if (0 == header.dim)
{
if (NULL != dst_size)
*dst_size = 0;
break;
}
denom_sigs = GNUNET_new_array (header.dim,
struct TALER_BlindedDenominationSignature);
*((void **) dst) = denom_sigs;
/* copy data */
for (uint32_t i = 0; i < header.dim; i++)
{
struct TALER_BlindedDenominationSignature *denom_sig = &denom_sigs[i];
uint32_t be[2];
size_t sz = ntohl (*(uint32_t *) in);
in += sizeof(uint32_t);
FAIL_IF (sizeof(be) > sz);
GNUNET_memcpy (&be,
in,
sizeof(be));
FAIL_IF (0x01 != ntohl (be[1])); /* magic marker: blinded */
in += sizeof(be);
sz -= sizeof(be);
denom_sig->cipher = ntohl (be[0]);
switch (denom_sig->cipher)
{
case TALER_DENOMINATION_RSA:
denom_sig->details.blinded_rsa_signature =
GNUNET_CRYPTO_rsa_signature_decode (in,
sz);
FAIL_IF (NULL == denom_sig->details.blinded_rsa_signature);
break;
case TALER_DENOMINATION_CS:
FAIL_IF (sizeof(denom_sig->details.blinded_cs_answer) != sz);
GNUNET_memcpy (&denom_sig->details.blinded_cs_answer,
in,
sz);
break;
default:
FAIL_IF (true);
}
in += sz;
}
return GNUNET_OK;
}
default:
FAIL_IF (true);
}
}
FAIL:
GNUNET_free (*(void **) dst);
return GNUNET_SYSERR;
#undef FAIL_IF
}
/**
* Cleanup of the data and closure of an array spec.
*/
static void
array_cleanup (void *cls,
void *rd)
{
struct ArrayResultCls *info = cls;
void **dst = rd;
if ((0 == info->same_size) &&
(NULL != info->sizes))
GNUNET_free (*(info->sizes));
GNUNET_free (cls);
GNUNET_free (*dst);
*dst = NULL;
}
struct GNUNET_PQ_ResultSpec
TALER_PQ_result_spec_array_blinded_denom_sig (
const struct GNUNET_PQ_Context *db,
const char *name,
size_t *num,
struct TALER_BlindedDenominationSignature **denom_sigs)
{
struct ArrayResultCls *info = GNUNET_new (struct ArrayResultCls);
info->num = num;
info->typ = TALER_PQ_array_of_blinded_denom_sig;
info->oid = GNUNET_PQ_get_oid (db,
GNUNET_PQ_DATATYPE_BYTEA);
struct GNUNET_PQ_ResultSpec res = {
.conv = extract_array_generic,
.cleaner = array_cleanup,
.dst = (void *) denom_sigs,
.fname = name,
.cls = info
};
return res;
};
struct GNUNET_PQ_ResultSpec
TALER_PQ_result_spec_array_blinded_coin_hash (
const struct GNUNET_PQ_Context *db,
const char *name,
size_t *num,
struct TALER_BlindedCoinHashP **h_coin_evs)
{
struct ArrayResultCls *info = GNUNET_new (struct ArrayResultCls);
info->num = num;
info->typ = TALER_PQ_array_of_blinded_coin_hash;
info->oid = GNUNET_PQ_get_oid (db,
GNUNET_PQ_DATATYPE_BYTEA);
struct GNUNET_PQ_ResultSpec res = {
.conv = extract_array_generic,
.cleaner = array_cleanup,
.dst = (void *) h_coin_evs,
.fname = name,
.cls = info
};
return res;
};
struct GNUNET_PQ_ResultSpec
TALER_PQ_result_spec_array_denom_hash (
const struct GNUNET_PQ_Context *db,
const char *name,
size_t *num,
struct TALER_DenominationHashP **denom_hs)
{
struct ArrayResultCls *info = GNUNET_new (struct ArrayResultCls);
info->num = num;
info->typ = TALER_PQ_array_of_denom_hash;
info->oid = GNUNET_PQ_get_oid (db,
GNUNET_PQ_DATATYPE_BYTEA);
struct GNUNET_PQ_ResultSpec res = {
.conv = extract_array_generic,
.cleaner = array_cleanup,
.dst = (void *) denom_hs,
.fname = name,
.cls = info
};
return res;
};
/* end of pq_result_helper.c */

View File

@ -260,7 +260,7 @@ run (void *cls,
CMD_TRANSFER_TO_EXCHANGE ("create-reserve-kyc-1",
"EUR:30.02"),
TALER_TESTING_cmd_check_bank_admin_transfer (
"check-create-reserve-1",
"check-create-reserve-kyc-1",
"EUR:30.02",
cred.user42_payto,
cred.exchange_payto,
@ -290,14 +290,17 @@ run (void *cls,
MHD_HTTP_CONFLICT,
"EUR:10",
NULL),
TALER_TESTING_cmd_age_withdraw ("age-withdraw-coin-1",
TALER_TESTING_cmd_age_withdraw ("age-withdraw-coins-1",
"create-reserve-kyc-1",
8,
MHD_HTTP_OK,
"EUR:10",
"EUR:5",
"EUR:10",
"EUR:5",
NULL),
TALER_TESTING_cmd_age_withdraw_reveal ("age-withdraw-coins-reveal-1",
"age-withdraw-coins-1",
MHD_HTTP_OK),
TALER_TESTING_cmd_end (),
};

View File

@ -578,9 +578,9 @@ age_withdraw_reveal_cb (
case MHD_HTTP_OK:
{
const struct AgeWithdrawState *aws = awrs->aws;
GNUNET_log (GNUNET_ERROR_TYPE_INFO,
"Got age-withdraw reveal success!\n");
GNUNET_assert (awrs->num_coins == response->details.ok.num_sigs);
awrs->denom_sigs = GNUNET_new_array (awrs->num_coins,
struct TALER_DenominationSignature);
for (size_t n = 0; n < awrs->num_coins; n++)
TALER_denom_sig_unblind (&awrs->denom_sigs[n],
&response->details.ok.blinded_denom_sigs[n],
@ -588,6 +588,8 @@ age_withdraw_reveal_cb (
&aws->coin_outputs[n].details.h_coin_pub,
&aws->coin_outputs[n].details.alg_values,
&aws->coin_inputs[n].denom_pub->key);
GNUNET_log (GNUNET_ERROR_TYPE_INFO,
"age-withdraw reveal success!\n");
}
break;
case MHD_HTTP_NOT_FOUND:
@ -629,7 +631,7 @@ age_withdraw_reveal_run (
* Get the command and state for the previous call to "age witdraw"
*/
age_withdraw_cmd =
TALER_TESTING_interpreter_get_command (is,
TALER_TESTING_interpreter_lookup_command (is,
awrs->age_withdraw_reference);
if (NULL == age_withdraw_cmd)
{
@ -649,6 +651,7 @@ age_withdraw_reveal_run (
aws->coin_inputs,
aws->noreveal_index,
&aws->h_commitment,
&aws->reserve_pub,
age_withdraw_reveal_cb,
awrs);
}