commit 0aeca381e76623565419d9d7bc214cabd7d94761
parent 5f8720069edd14407656762dd995ad709152ce72
Author: Christian Grothoff <christian@grothoff.org>
Date: Sat, 3 Jan 2026 23:30:45 +0100
-misc bugfixes towards report generation
Diffstat:
11 files changed, 215 insertions(+), 291 deletions(-)
diff --git a/contrib/typst/transactions.typ b/contrib/typst/transactions.typ
@@ -227,7 +227,7 @@
}
- heading(level: 1)[GNU Taler Merchant Accounting: #data.business]
+ heading(level: 1)[GNU Taler Merchant Accounting: #data.business_name]
[Transaction report from
#underline([#format_round_timestamp(data.start_date,data.bucket_period.d_us,false)]) to
@@ -311,7 +311,7 @@
// Load your JSON data
#form((
- business: "Example.com",
+ business_name: "Example.com",
start_date: (t_s: 1764967786),
end_date: (t_s: 1767222000),
bucket_period: (d_us: 86400000000),
diff --git a/src/backend/Makefile.am b/src/backend/Makefile.am
@@ -15,7 +15,8 @@ pkgcfg_DATA = \
bin_SCRIPTS = \
- taler-merchant-report-generator-email
+ taler-merchant-report-generator-email \
+ taler-merchant-report-generator-file
EXTRA_DIST = \
$(pkgcfg_DATA) \
diff --git a/src/backend/taler-merchant-httpd_dispatcher.c b/src/backend/taler-merchant-httpd_dispatcher.c
@@ -894,7 +894,107 @@ determine_handler_group (const char **urlp,
.have_id_segment = true,
.handler = &TMH_private_patch_token_family_SLUG,
},
- #ifdef HAVE_DONAU_DONAU_SERVICE_H
+
+ /* Reports endpoints */
+ {
+ .url_prefix = "/reports",
+ .method = MHD_HTTP_METHOD_GET,
+ .permission = "reports-read",
+ .handler = &TMH_private_get_reports,
+ },
+ {
+ .url_prefix = "/reports",
+ .method = MHD_HTTP_METHOD_POST,
+ .permission = "reports-write",
+ .handler = &TMH_private_post_reports,
+ },
+ {
+ .url_prefix = "/reports/",
+ .method = MHD_HTTP_METHOD_GET,
+ .handler = &TMH_private_get_report,
+ .permission = "reports-read",
+ .have_id_segment = true,
+ },
+ {
+ .url_prefix = "/reports/",
+ .method = MHD_HTTP_METHOD_PATCH,
+ .handler = &TMH_private_patch_report,
+ .permission = "reports-write",
+ .have_id_segment = true,
+ },
+ {
+ .url_prefix = "/reports/",
+ .method = MHD_HTTP_METHOD_DELETE,
+ .handler = &TMH_private_delete_report,
+ .permission = "reports-write",
+ .have_id_segment = true,
+ },
+
+ /* Groups endpoints */
+ {
+ .url_prefix = "/groups",
+ .method = MHD_HTTP_METHOD_GET,
+ .permission = "groups-read",
+ .handler = &TMH_private_get_groups,
+ },
+ {
+ .url_prefix = "/groups",
+ .method = MHD_HTTP_METHOD_POST,
+ .permission = "groups-write",
+ .handler = &TMH_private_post_groups,
+ },
+ {
+ .url_prefix = "/groups/",
+ .method = MHD_HTTP_METHOD_PATCH,
+ .handler = &TMH_private_patch_group,
+ .permission = "groups-write",
+ .have_id_segment = true,
+ },
+ {
+ .url_prefix = "/groups/",
+ .method = MHD_HTTP_METHOD_DELETE,
+ .handler = &TMH_private_delete_group,
+ .permission = "groups-write",
+ .have_id_segment = true,
+ },
+
+ /* Money pots endpoints */
+ {
+ .url_prefix = "/pots",
+ .method = MHD_HTTP_METHOD_GET,
+ .handler = &TMH_private_get_pots,
+ .permission = "pots-read",
+ },
+ {
+ .url_prefix = "/pots",
+ .method = MHD_HTTP_METHOD_POST,
+ .handler = &TMH_private_post_pots,
+ .permission = "pots-write"
+ },
+ {
+ .url_prefix = "/pots/",
+ .method = MHD_HTTP_METHOD_GET,
+ .handler = &TMH_private_get_pot,
+ .have_id_segment = true,
+ .permission = "pots-read",
+ },
+ {
+ .url_prefix = "/pots/",
+ .method = MHD_HTTP_METHOD_PATCH,
+ .handler = &TMH_private_patch_pot,
+ .have_id_segment = true,
+ .permission = "pots-write"
+ },
+ {
+ .url_prefix = "/pots/",
+ .method = MHD_HTTP_METHOD_DELETE,
+ .handler = &TMH_private_delete_pot,
+ .have_id_segment = true,
+ .permission = "pots-write"
+ },
+
+
+#ifdef HAVE_DONAU_DONAU_SERVICE_H
/* GET /donau */
{
.url_prefix = "/donau",
@@ -1084,7 +1184,7 @@ determine_handler_group (const char **urlp,
},
/* POST /reports/$ID/ */
{
- .url_prefix = "/reports",
+ .url_prefix = "/reports/",
.method = MHD_HTTP_METHOD_POST,
.have_id_segment = true,
.handler = &TMH_post_reports_ID,
@@ -1152,103 +1252,6 @@ determine_handler_group (const char **urlp,
.max_upload = 1024 * 1024
},
- /* Reports endpoints */
- {
- .url_prefix = "reports",
- .method = MHD_HTTP_METHOD_GET,
- .permission = "reports-read",
- .handler = &TMH_private_get_reports,
- },
- {
- .url_prefix = "reports",
- .method = MHD_HTTP_METHOD_POST,
- .permission = "reports-write",
- .handler = &TMH_private_post_reports,
- },
- {
- .url_prefix = "reports",
- .method = MHD_HTTP_METHOD_GET,
- .handler = &TMH_private_get_report,
- .permission = "reports-read",
- .have_id_segment = true,
- },
- {
- .url_prefix = "reports",
- .method = MHD_HTTP_METHOD_PATCH,
- .handler = &TMH_private_patch_report,
- .permission = "reports-write",
- .have_id_segment = true,
- },
- {
- .url_prefix = "reports",
- .method = MHD_HTTP_METHOD_DELETE,
- .handler = &TMH_private_delete_report,
- .permission = "reports-write",
- .have_id_segment = true,
- },
-
- /* Groups endpoints */
- {
- .url_prefix = "groups",
- .method = MHD_HTTP_METHOD_GET,
- .permission = "groups-read",
- .handler = &TMH_private_get_groups,
- },
- {
- .url_prefix = "groups",
- .method = MHD_HTTP_METHOD_POST,
- .permission = "groups-write",
- .handler = &TMH_private_post_groups,
- },
- {
- .url_prefix = "groups",
- .method = MHD_HTTP_METHOD_PATCH,
- .handler = &TMH_private_patch_group,
- .permission = "groups-write",
- .have_id_segment = true,
- },
- {
- .url_prefix = "groups",
- .method = MHD_HTTP_METHOD_DELETE,
- .handler = &TMH_private_delete_group,
- .permission = "groups-write",
- .have_id_segment = true,
- },
-
- /* Money pots endpoints */
- {
- .url_prefix = "pots",
- .method = MHD_HTTP_METHOD_GET,
- .handler = &TMH_private_get_pots,
- .permission = "pots-read",
- },
- {
- .url_prefix = "pots",
- .method = MHD_HTTP_METHOD_POST,
- .handler = &TMH_private_post_pots,
- .permission = "pots-write"
- },
- {
- .url_prefix = "pots",
- .method = MHD_HTTP_METHOD_GET,
- .handler = &TMH_private_get_pot,
- .have_id_segment = true,
- .permission = "pots-read",
- },
- {
- .url_prefix = "pots",
- .method = MHD_HTTP_METHOD_PATCH,
- .handler = &TMH_private_patch_pot,
- .have_id_segment = true,
- .permission = "pots-write"
- },
- {
- .url_prefix = "pots",
- .method = MHD_HTTP_METHOD_DELETE,
- .handler = &TMH_private_delete_pot,
- .have_id_segment = true,
- .permission = "pots-write"
- },
{
.url_prefix = "*",
.method = MHD_HTTP_METHOD_OPTIONS,
diff --git a/src/backend/taler-merchant-httpd_private-post-reports.c b/src/backend/taler-merchant-httpd_private-post-reports.c
@@ -36,7 +36,8 @@ TMH_private_post_reports (const struct TMH_RequestHandler *rh,
const char *data_source;
const char *target_address;
struct GNUNET_TIME_Relative frequency;
- struct GNUNET_TIME_Relative frequency_shift;
+ struct GNUNET_TIME_Relative frequency_shift
+ = GNUNET_TIME_UNIT_ZERO;
enum GNUNET_DB_QueryStatus qs;
struct GNUNET_JSON_Specification spec[] = {
GNUNET_JSON_spec_string ("description",
@@ -51,8 +52,10 @@ TMH_private_post_reports (const struct TMH_RequestHandler *rh,
&target_address),
GNUNET_JSON_spec_relative_time ("report_frequency",
&frequency),
- GNUNET_JSON_spec_relative_time ("report_frequency_shift",
- &frequency_shift),
+ GNUNET_JSON_spec_mark_optional (
+ GNUNET_JSON_spec_relative_time ("report_frequency_shift",
+ &frequency_shift),
+ NULL),
GNUNET_JSON_spec_end ()
};
uint64_t report_id;
diff --git a/src/backend/taler-merchant-report-generator.c b/src/backend/taler-merchant-report-generator.c
@@ -159,6 +159,11 @@ static struct GNUNET_DB_EventHandler *eh;
static struct GNUNET_SCHEDULER_Task *report_task;
/**
+ * When is the current report_task scheduled to run?
+ */
+static struct GNUNET_TIME_Absolute report_task_due;
+
+/**
* Context for CURL operations.
*/
static struct GNUNET_CURL_Context *curl_ctx;
@@ -225,6 +230,15 @@ free_ra (struct ReportActivity *ra)
/**
+ * Check for pending reports and process them.
+ *
+ * @param cls closure (unused)
+ */
+static void
+check_pending_reports (void *cls);
+
+
+/**
* Finish transmission of a report and update database.
*
* @param[in] ra report activity to finish
@@ -246,9 +260,9 @@ finish_transmission (struct ReportActivity *ra,
next_ts,
ec,
error_details);
- free_ra (ra);
if (qs < 0)
{
+ free_ra (ra);
GNUNET_log (GNUNET_ERROR_TYPE_ERROR,
"Failed to update report status: %d\n",
qs);
@@ -256,6 +270,31 @@ finish_transmission (struct ReportActivity *ra,
GNUNET_SCHEDULER_shutdown ();
return;
}
+ if ( (NULL == report_task) ||
+ (GNUNET_TIME_absolute_cmp (report_task_due,
+ >,
+ ra->next_transmission)) )
+ {
+ GNUNET_log (GNUNET_ERROR_TYPE_INFO,
+ "Scheduling next report for %s\n",
+ GNUNET_TIME_absolute2s (ra->next_transmission));
+ if (NULL != report_task)
+ GNUNET_SCHEDULER_cancel (report_task);
+ report_task_due = ra->next_transmission;
+ report_task = GNUNET_SCHEDULER_add_at (ra->next_transmission,
+ &check_pending_reports,
+ NULL);
+ }
+ free_ra (ra);
+ if (test_mode &&
+ GNUNET_TIME_absolute_is_future (report_task_due) &&
+ (NULL == ra_head))
+ {
+ GNUNET_log (GNUNET_ERROR_TYPE_INFO,
+ "Test mode, existing because of going idle\n");
+ GNUNET_SCHEDULER_shutdown ();
+ return;
+ }
}
@@ -436,7 +475,7 @@ curl_completed_cb (void *cls,
{
struct ReportActivity *ra = cls;
- ra->eh = NULL;
+ ra->job = NULL;
ra->response_code = response_code;
if (MHD_HTTP_OK != response_code)
{
@@ -504,7 +543,7 @@ fetch_and_transmit (
GNUNET_free (accept_header);
}
GNUNET_assert (CURLE_OK ==
- curl_easy_setopt (eh,
+ curl_easy_setopt (ra->eh,
CURLOPT_URL,
ra->url));
{
@@ -605,11 +644,6 @@ process_pending_report (
}
-/**
- * Check for pending reports and process them.
- *
- * @param cls closure (unused)
- */
static void
check_pending_reports (void *cls)
{
@@ -631,14 +665,21 @@ check_pending_reports (void *cls)
GNUNET_SCHEDULER_shutdown ();
return;
}
+ GNUNET_log (GNUNET_ERROR_TYPE_INFO,
+ "Found %d reports pending, next at %s\n",
+ (int) qs,
+ GNUNET_TIME_absolute2s (next));
GNUNET_assert (NULL == report_task);
if (test_mode &&
GNUNET_TIME_absolute_is_future (next) &&
(NULL == ra_head))
{
+ GNUNET_log (GNUNET_ERROR_TYPE_INFO,
+ "Test mode, existing because of going idle\n");
GNUNET_SCHEDULER_shutdown ();
return;
}
+ report_task_due = next;
report_task = GNUNET_SCHEDULER_add_at (next,
&check_pending_reports,
NULL);
@@ -666,6 +707,7 @@ report_update_cb (void *cls,
/* Cancel any pending check and schedule immediate execution */
if (NULL != report_task)
GNUNET_SCHEDULER_cancel (report_task);
+ report_task_due = GNUNET_TIME_UNIT_ZERO_ABS;
report_task = GNUNET_SCHEDULER_add_now (&check_pending_reports,
NULL);
}
@@ -785,6 +827,15 @@ run (void *cls,
GNUNET_SCHEDULER_shutdown ();
return;
}
+ if (GNUNET_OK !=
+ db_plugin->connect (db_plugin->cls))
+ {
+ GNUNET_log (GNUNET_ERROR_TYPE_ERROR,
+ "Failed to connect to database. Consider running taler-merchant-dbinit!\n");
+ GNUNET_SCHEDULER_shutdown ();
+ global_ret = EXIT_FAILURE;
+ return;
+ }
{
struct GNUNET_DB_EventHeaderP ev = {
diff --git a/src/backenddb/pg_check_report.c b/src/backenddb/pg_check_report.c
@@ -41,7 +41,7 @@ TMH_PG_check_report (void *cls,
GNUNET_PQ_query_param_end
};
struct GNUNET_PQ_ResultSpec rs[] = {
- GNUNET_PQ_result_spec_string ("instance_id",
+ GNUNET_PQ_result_spec_string ("merchant_id",
instance_id),
GNUNET_PQ_result_spec_string ("data_source",
data_source),
diff --git a/src/backenddb/pg_insert_report.c b/src/backenddb/pg_insert_report.c
@@ -87,7 +87,7 @@ TMH_PG_insert_report (
",frequency_shift"
",next_transmission)"
" SELECT merchant_serial, $2, $3, $4, $5,"
- " $6, $7, $8, $9, $10, $11, $12, $13"
+ " $6, $7, $8, $9, $10"
" FROM merchant_instances"
" WHERE merchant_id=$1"
" ON CONFLICT DO NOTHING;");
diff --git a/src/backenddb/pg_lookup_statistics_amount_by_bucket2.c b/src/backenddb/pg_lookup_statistics_amount_by_bucket2.c
@@ -136,21 +136,21 @@ TMH_PG_lookup_statistics_amount_by_bucket2 (
PREPARE (pg,
"lookup_statistics_amount_by_bucket2",
"SELECT"
- " bucket_start"
- " ARRAY_AGG("
- " ROW(cumulative_value, cumulative_frac, curr)::taler_amount_currency"
- ") AS cumulative_amounts"
- " FROM merchant_statistic_bucket_meta"
- " JOIN merchant_instances"
- " USING (merchant_serial)"
- " JOIN merchant_statistic_bucket_amount"
+ " msba.bucket_start"
+ ",ARRAY_AGG ("
+ " ROW(msba.cumulative_value::INT8, msba.cumulative_frac::INT4, msba.curr)::taler_amount_currency"
+ " ) AS cumulative_amounts"
+ " FROM merchant_statistic_bucket_meta msbm"
+ " JOIN merchant_statistic_bucket_amount msba"
" USING (bmeta_serial_id)"
- " WHERE merchant_instances.merchant_id=$1"
- " AND merchant_statistic_bucket_meta.slug=$2"
- " AND merchant_statistic_bucket_meta.bucket_range=$3::TEXT::bucket_range"
- " AND merchant_statistic_bucket_meta.stype='amount'"
- " GROUP BY bucket_start"
- " ORDER BY bucket_start DESC"
+ " JOIN merchant_instances mi"
+ " USING (merchant_serial)"
+ " WHERE mi.merchant_id=$1"
+ " AND msbm.slug=$2"
+ " AND msba.bucket_range=$3::TEXT::statistic_range"
+ " AND msbm.stype='amount'"
+ " GROUP BY msba.bucket_start"
+ " ORDER BY msba.bucket_start DESC"
" LIMIT $4;");
qs = GNUNET_PQ_eval_prepared_multi_select (
pg->conn,
diff --git a/src/backenddb/pg_lookup_statistics_counter_by_bucket2.c b/src/backenddb/pg_lookup_statistics_counter_by_bucket2.c
@@ -150,20 +150,20 @@ TMH_PG_lookup_statistics_counter_by_bucket2 (
PREPARE (pg,
"lookup_statistics_counter_by_bucket2",
"SELECT"
- " bucket_start"
- " ARRAY_AGG(slug) AS slugs"
- " ARRAY_AGG(cumulative_number) AS counters"
- " FROM merchant_statistic_bucket_counter"
- " JOIN merchant_statistic_bucket_meta"
+ " msbc.bucket_start"
+ ",ARRAY_AGG(msbm.slug) AS slugs"
+ ",ARRAY_AGG(msbc.cumulative_number) AS counters"
+ " FROM merchant_statistic_bucket_counter msbc"
+ " JOIN merchant_statistic_bucket_meta msbm"
" USING (bmeta_serial_id)"
- " JOIN merchant_instances"
+ " JOIN merchant_instances mi"
" USING (merchant_serial)"
- " WHERE merchant_instances.merchant_id=$1"
- " AND merchant_statistic_bucket_meta.slug LIKE $1 || '%'"
- " AND merchant_statistic_bucket_meta.bucket_range=$3::TEXT::bucket_range"
- " AND merchant_statistic_bucket_meta.stype = 'number'"
- " GROUP BY bucket_start"
- " ORDER BY bucket_start DESC"
+ " WHERE mi.merchant_id=$1"
+ " AND msbm.slug LIKE $2 || '%'"
+ " AND msbc.bucket_range=$3::TEXT::statistic_range"
+ " AND msbm.stype = 'number'"
+ " GROUP BY msbc.bucket_start"
+ " ORDER BY msbc.bucket_start DESC"
" LIMIT $4");
qs = GNUNET_PQ_eval_prepared_multi_select (
pg->conn,
diff --git a/src/testing/test_merchant_statistics.sh b/src/testing/test_merchant_statistics.sh
@@ -214,165 +214,27 @@ fi
echo "OK"
-
-exit 0
-
-
-#
-# WIRE TRANSFER TO MERCHANT AND NOTIFY BACKEND
-#
-
-WIRE_DEADLINE=$(jq -r .contract_terms.wire_transfer_deadline.t_s < "$LAST_RESPONSE")
-
-NOW=$(date +%s)
-
-TO_SLEEP=$((1200 + WIRE_DEADLINE - NOW ))
-echo "Waiting $TO_SLEEP secs for wire transfer"
-
-echo -n "Call taler-exchange-aggregator ..."
-taler-exchange-aggregator \
- -y \
- -c "$CONF" \
- -T "${TO_SLEEP}"000000 \
- -t \
- -L INFO &> aggregator.log
-echo " DONE"
-echo -n "Call taler-exchange-transfer ..."
-taler-exchange-transfer \
- -c "$CONF" \
- -t \
- -L INFO &> transfer.log
-echo " DONE"
-echo -n "Give time to Nexus to route the payment to Sandbox..."
-# FIXME: trigger immediate update at nexus
-# NOTE: once libeufin can do long-polling, we should
-# be able to reduce the delay here and run aggregator/transfer
-# always in the background via setup
-sleep 3
-echo " DONE"
-
-echo -n "Obtaining wire transfer details from bank ($USE_FAKEBANK)..."
-
-BANKDATA="$(curl 'http://localhost:8082/accounts/exchange/taler-wire-gateway/history/outgoing?delta=1' -s)"
-WTID=$(echo "$BANKDATA" | jq -r .outgoing_transactions[0].wtid)
-WURL=$(echo "$BANKDATA" | jq -r .outgoing_transactions[0].exchange_base_url)
-CREDIT_AMOUNT=$(echo "$BANKDATA" | jq -r .outgoing_transactions[0].amount)
-TARGET_PAYTO=$(echo "$BANKDATA" | jq -r .outgoing_transactions[0].credit_account)
-
-if [ "$EXCHANGE_URL" != "$WURL" ]
-then
- exit_fail "Wrong exchange URL in '$BANKDATA' response, expected '$EXCHANGE_URL'"
-fi
-
-echo " OK"
-
-echo -n "Notifying merchant of correct wire transfer..."
-
-STATUS=$(curl 'http://localhost:9966/private/transfers' \
- -d '{"credit_amount":"'"$CREDIT_AMOUNT"'","wtid":"'"$WTID"'","payto_uri":"'"$TARGET_PAYTO"'","exchange_url":"'"$WURL"'"}' \
- -m 3 \
- -w "%{http_code}" -s -o "$LAST_RESPONSE")
-
-if [ "$STATUS" != "204" ]
-then
- jq . < "$LAST_RESPONSE"
- exit_fail "Expected response 204 No content, after providing transfer data. got: $STATUS"
-fi
-
-echo " OK"
-
-echo -n "Running taler-merchant-depositcheck ..."
-set -e
-taler-merchant-depositcheck \
- -L INFO \
- -c "$CONF" \
- -T "${TO_SLEEP}"000000 \
- -t &> taler-merchant-depositcheck.log
-echo " OK"
-
-echo -n "Running taler-merchant-reconciliation ..."
-set -e
-taler-merchant-reconciliation \
- -L INFO \
- -c "$CONF" \
- -T "${TO_SLEEP}"000000 \
- -t &> taler-merchant-reconciliation.log
-echo " OK"
-
-
-echo -n "Fetching wire transfers ..."
-
-STATUS=$(curl 'http://localhost:9966/private/transfers' \
+echo -n "Schedule periodic report..."
+STATUS=$(curl 'http://localhost:9966/private/reports' \
+ -d '{"description":"My report","program_section":"file","mime_type":"application/pdf","data_source":"/private/statistics-report/transactions","target_address":"'"${TMPDIR:-/tmp}/last-report"'","report_frequency":{"d_us" : 50000000}}' \
-w "%{http_code}" -s -o "$LAST_RESPONSE")
if [ "$STATUS" != "200" ]
then
- jq . < "$LAST_RESPONSE"
- exit_fail "Expected response 200 Ok. got: $STATUS"
-fi
-
-TRANSFERS_LIST_SIZE=$(jq -r '.transfers | length' < "$LAST_RESPONSE")
-
-if [ "$TRANSFERS_LIST_SIZE" != "1" ]
-then
- jq . < "$LAST_RESPONSE"
- exit_fail "Expected 1 entry in transfer list. Got: $TRANSFERS_LIST_SIZE"
+ cat "$LAST_RESPONSE" >&2
+ exit_fail "Expected 200, pot created. got: $STATUS"
fi
-
+REPORT_SERIAL_ID=$(jq -r .report_serial_id < "$LAST_RESPONSE")
echo "OK"
-echo -n "Checking order status ..."
-STATUS=$(curl "http://localhost:9966/private/orders/${ORDER_ID}?transfer=YES" \
- -w "%{http_code}" -s -o "$LAST_RESPONSE")
-if [ "$STATUS" != "200" ]
-then
- jq . < "$LAST_RESPONSE"
- exit_fail "Expected 200, after order inquiry. got: $STATUS"
-fi
-DEPOSIT_TOTAL=$(jq -r .deposit_total < "$LAST_RESPONSE")
-if [ "$DEPOSIT_TOTAL" == "TESTKUDOS:0" ]
-then
- jq . < "$LAST_RESPONSE"
- exit_fail "Expected non-zero deposit total. got: $DEPOSIT_TOTAL"
-fi
+echo -n "Generate periodic report..."
+taler-merchant-report-generator \
+ -c "$CONF" \
+ --test \
+ --timetravel=100000000 \
+ --log=INFO 2> report-generator.log
echo " OK"
-echo -n "Checking bank account status ..."
-if [ 1 = "$USE_FAKEBANK" ]
-then
- STATUS=$(curl "http://localhost:8082/accounts/fortythree" \
- -w "%{http_code}" \
- -s \
- -o "$LAST_RESPONSE")
- if [ "$STATUS" != "200" ]
- then
- jq . < "$LAST_RESPONSE"
- exit_fail "Expected response 200 Ok, getting account status. Got: $STATUS"
- fi
- BALANCE=$(jq -r .balance.amount < "$LAST_RESPONSE")
- if [ "$BALANCE" == "TESTKUDOS:0" ]
- then
- jq . < "$LAST_RESPONSE"
- exit_fail "Wire transfer did not happen. Got: $BALANCE"
- fi
-else
- ACCOUNT_PASSWORD="fortythree:x"
- BANK_HOST="localhost:18082"
- STATUS=$(curl "http://$ACCOUNT_PASSWORD@$BANK_HOST/accounts/fortythree" \
- -w "%{http_code}" -s -o "$LAST_RESPONSE")
- if [ "$STATUS" != "200" ]
- then
- jq . < "$LAST_RESPONSE"
- exit_fail "Expected response 200 Ok, getting account status. Got: $STATUS"
- fi
- BALANCE=$(jq -r .balance.amount < "$LAST_RESPONSE")
- if [ "$BALANCE" == "TESTKUDOS:0" ]
- then
- jq . < "$LAST_RESPONSE"
- exit_fail "Wire transfer did not happen. Got: $BALANCE"
- fi
-fi
-echo " OK"
exit 0
diff --git a/src/testing/test_template.conf b/src/testing/test_template.conf
@@ -30,9 +30,13 @@ PRIVACY_ETAG = 0
[exchangedb-postgres]
CONFIG = postgres:///talercheck
+[report-generator-file]
+BINARY = taler-merchant-report-generator-file
+
[merchant]
SERVE = tcp
PORT = 9966
+BASE_URL = http://localhost:9966/
UNIXPATH = ${TALER_RUNTIME_DIR}/merchant.http
UNIXPATH_MODE = 660
DEFAULT_WIRE_FEE_AMORTIZATION = 1