commit 044170c00077438d30ab0c832a7193eb093aae30
parent bb5a41d072ab7964006cb68a45e043d856cb630e
Author: Schanzenbach, Martin <martin.schanzenbach@aisec.fraunhofer.de>
Date: Mon, 5 Mar 2018 19:40:00 +0100
Merge branch 'master' of git+ssh://gnunet.org/gnunet
Diffstat:
8 files changed, 519 insertions(+), 11 deletions(-)
diff --git a/src/include/gnunet_protocols.h b/src/include/gnunet_protocols.h
@@ -2621,6 +2621,8 @@ extern "C"
#endif /* ENABLE_MALICIOUS */
+/* Debugging API continues at 1130 */
+
/******************************************************************************/
@@ -2944,8 +2946,30 @@ extern "C"
#define GNUNET_MESSAGE_TYPE_AUCTION_CLIENT_OUTCOME 1112
+
+/******************************************************************************/
+/********************************* RPS DEBUG ********************************/
+/******************************************************************************/
+
+/**
+ * @brief Request updates of the view
+ */
+#define GNUNET_MESSAGE_TYPE_RPS_CS_DEBUG_VIEW_REQUEST 1130
+
+/**
+ * @brief Send update of the view
+ */
+#define GNUNET_MESSAGE_TYPE_RPS_CS_DEBUG_VIEW_REPLY 1131
+
+/**
+ * @brief Cancel getting updates of the view
+ */
+#define GNUNET_MESSAGE_TYPE_RPS_CS_DEBUG_VIEW_CANCEL 1132
+
+
+
/**
- * Next available: 1130
+ * Next available: 1200
*/
diff --git a/src/include/gnunet_rps_service.h b/src/include/gnunet_rps_service.h
@@ -66,6 +66,16 @@ typedef void (* GNUNET_RPS_NotifyReadyCB) (void *cls,
const struct GNUNET_PeerIdentity *peers);
/**
+ * Callback called when view was updated
+ *
+ * @param num_peers the number of peers returned
+ * @param peers array with num_peers PeerIDs
+ */
+typedef void (* GNUNET_RPS_ViewUpdateCB) (void *cls,
+ uint64_t num_peers,
+ const struct GNUNET_PeerIdentity *peers);
+
+/**
* Connect to the rps service
*
* @param cfg configuration to use
@@ -136,6 +146,22 @@ GNUNET_RPS_act_malicious (struct GNUNET_RPS_Handle *h,
const struct GNUNET_PeerIdentity *target_peer);
#endif /* ENABLE_MALICIOUS */
+/* Get internals for debugging/profiling purposes */
+
+/**
+ * Request updates of view
+ *
+ * @param rps_handle handle to the rps service
+ * @param num_req_peers number of peers we want to receive
+ * (0 for infinite updates)
+ * @param cls a closure that will be given to the callback
+ * @param ready_cb the callback called when the peers are available
+ */
+void
+GNUNET_RPS_view_request (struct GNUNET_RPS_Handle *rps_handle,
+ uint32_t num_updates,
+ GNUNET_RPS_ViewUpdateCB view_update_cb,
+ void *cls);
/**
* Disconnect from the rps service
diff --git a/src/rps/gnunet-rps.c b/src/rps/gnunet-rps.c
@@ -45,6 +45,16 @@ static struct GNUNET_RPS_Request_Handle *req_handle;
*/
static struct GNUNET_PeerIdentity peer_id;
+/**
+ * @brief Do we want to receive updates of the view? (Option --view)
+ */
+static int view_update;
+
+/**
+ * @brief Number of updates we want to receive
+ */
+static uint64_t num_view_updates;
+
/**
* Task run when user presses CTRL-C to abort.
@@ -87,6 +97,42 @@ reply_handle (void *cls,
GNUNET_SCHEDULER_shutdown ();
}
+/**
+ * Callback called on receipt view update.
+ * Prints view.
+ *
+ * @param n number of peers
+ * @param recv_peers the received peers
+ */
+static void
+view_update_handle (void *cls,
+ uint64_t n,
+ const struct GNUNET_PeerIdentity *recv_peers)
+{
+ uint64_t i;
+
+ if (0 == n)
+ {
+ FPRINTF (stdout, "Empty view\n");
+ }
+ req_handle = NULL;
+ for (i = 0; i < n; i++)
+ {
+ FPRINTF (stdout, "%s\n",
+ GNUNET_i2s_full (&recv_peers[i]));
+ }
+
+ if (1 == num_view_updates)
+ {
+ ret = 0;
+ GNUNET_SCHEDULER_shutdown ();
+ }
+ else if (1 < num_view_updates)
+ {
+ num_view_updates--;
+ }
+}
+
/**
* Main function that will be run by the scheduler.
@@ -107,9 +153,8 @@ run (void *cls,
rps_handle = GNUNET_RPS_connect (cfg);
- if (0 == memcmp (&zero_pid,
- &peer_id,
- sizeof (peer_id)))
+ if ((0 == memcmp (&zero_pid, &peer_id, sizeof (peer_id))) &&
+ (!view_update))
{ /* Request n PeerIDs */
/* If number was specified use it, else request single peer. */
num_peers = (NULL == args[0]) ? 1 : atoi (args[0]);
@@ -117,6 +162,18 @@ run (void *cls,
"Requesting %" PRIu64 " PeerIDs\n", num_peers);
req_handle = GNUNET_RPS_request_peers (rps_handle, num_peers, reply_handle, NULL);
GNUNET_SCHEDULER_add_shutdown (&do_shutdown, NULL);
+ } else if (view_update)
+ {
+ /* Get updates of view */
+ num_view_updates = (NULL == args[0]) ? 0 : atoi (args[0]);
+ GNUNET_RPS_view_request (rps_handle, num_view_updates, view_update_handle, NULL);
+ if (0 != num_view_updates)
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ "Requesting %" PRIu64 " view updates\n", num_view_updates);
+ else
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ "Requesting contiuous view updates\n");
+ GNUNET_SCHEDULER_add_shutdown (&do_shutdown, NULL);
}
else
{ /* Seed PeerID */
@@ -145,6 +202,10 @@ main (int argc, char *const *argv)
"PEER_ID",
gettext_noop ("Seed a PeerID"),
&peer_id),
+ GNUNET_GETOPT_option_flag ('V',
+ "view",
+ gettext_noop ("Get updates of view (0 for infinite updates)"),
+ &view_update),
GNUNET_GETOPT_OPTION_END
};
return (GNUNET_OK ==
diff --git a/src/rps/gnunet-service-rps.c b/src/rps/gnunet-service-rps.c
@@ -1072,6 +1072,7 @@ Peers_terminate ()
"Iteration destroying peers was aborted.\n");
}
GNUNET_CONTAINER_multipeermap_destroy (peer_map);
+ peer_map = NULL;
store_valid_peers ();
GNUNET_free (filename_valid_peers);
GNUNET_CONTAINER_multipeermap_destroy (valid_peers);
@@ -1439,7 +1440,13 @@ Peers_get_channel_flag (const struct GNUNET_PeerIdentity *peer,
int
Peers_check_peer_known (const struct GNUNET_PeerIdentity *peer)
{
- return GNUNET_CONTAINER_multipeermap_contains (peer_map, peer);
+ if (NULL != peer_map)
+ {
+ return GNUNET_CONTAINER_multipeermap_contains (peer_map, peer);
+ } else
+ {
+ return GNUNET_NO;
+ }
}
@@ -1514,6 +1521,7 @@ Peers_handle_inbound_channel (void *cls,
const struct GNUNET_PeerIdentity *initiator)
{
struct PeerContext *peer_ctx;
+ struct GNUNET_PeerIdentity *ctx_peer;
LOG (GNUNET_ERROR_TYPE_DEBUG,
"New channel was established to us (Peer %s).\n",
@@ -1522,6 +1530,8 @@ Peers_handle_inbound_channel (void *cls,
/* Make sure we 'know' about this peer */
peer_ctx = create_or_get_peer_ctx (initiator);
set_peer_live (peer_ctx);
+ ctx_peer = GNUNET_new (struct GNUNET_PeerIdentity);
+ *ctx_peer = *initiator;
/* We only accept one incoming channel per peer */
if (GNUNET_YES == Peers_check_peer_send_intention (initiator))
{
@@ -1531,10 +1541,10 @@ Peers_handle_inbound_channel (void *cls,
GNUNET_CADET_channel_destroy (peer_ctx->recv_channel);
peer_ctx->recv_channel = channel;
/* return the channel context */
- return &peer_ctx->peer_id;
+ return ctx_peer;
}
peer_ctx->recv_channel = channel;
- return &peer_ctx->peer_id;
+ return ctx_peer;
}
@@ -1629,6 +1639,7 @@ Peers_destroy_sending_channel (const struct GNUNET_PeerIdentity *peer)
set_channel_flag (peer_ctx->send_channel_flags, Peers_CHANNEL_CLEAN);
GNUNET_CADET_channel_destroy (peer_ctx->send_channel);
peer_ctx->send_channel = NULL;
+ peer_ctx->mq = NULL;
(void) Peers_check_connected (peer);
return GNUNET_YES;
}
@@ -1856,6 +1867,11 @@ struct ClientContext
struct ReplyCls *rep_cls_tail;
/**
+ * @brief How many updates this client expects to receive.
+ */
+ int64_t view_updates_left;
+
+ /**
* The client handle to send the reply to
*/
struct GNUNET_SERVICE_Client *client;
@@ -2610,6 +2626,7 @@ cleanup_destroyed_channel (void *cls,
to_file (file_name_view_log,
"-%s\t(cleanup channel, ourself)",
GNUNET_i2s_full (peer));
+ //GNUNET_free (peer);
return;
}
@@ -2625,6 +2642,7 @@ cleanup_destroyed_channel (void *cls,
{ /* We are about to clean the sending channel. Clean the respective
* context */
Peers_cleanup_destroyed_channel (cls, channel);
+ //GNUNET_free (peer);
return;
}
else
@@ -2632,6 +2650,7 @@ cleanup_destroyed_channel (void *cls,
* open. It probably went down. Remove it from our knowledge. */
Peers_cleanup_destroyed_channel (cls, channel);
remove_peer (peer);
+ //GNUNET_free (peer);
return;
}
}
@@ -2648,6 +2667,7 @@ cleanup_destroyed_channel (void *cls,
{ /* Other peer tried to establish a channel to us twice. We do not accept
* that. Clean the context. */
Peers_cleanup_destroyed_channel (cls, channel);
+ //GNUNET_free (peer);
return;
}
else
@@ -2655,6 +2675,7 @@ cleanup_destroyed_channel (void *cls,
* it. */
Peers_cleanup_destroyed_channel (cls, channel);
clean_peer (peer);
+ //GNUNET_free (peer);
return;
}
}
@@ -2663,6 +2684,7 @@ cleanup_destroyed_channel (void *cls,
LOG (GNUNET_ERROR_TYPE_WARNING,
"Destroyed channel is neither sending nor receiving channel\n");
}
+ //GNUNET_free (peer);
}
/***********************************************************************
@@ -2940,6 +2962,107 @@ handle_client_seed (void *cls,
}
/**
+ * @brief Send view to client
+ *
+ * @param cli_ctx the context of the client
+ * @param view_array the peerids of the view as array (can be empty)
+ * @param view_size the size of the view array (can be 0)
+ */
+void
+send_view (const struct ClientContext *cli_ctx,
+ const struct GNUNET_PeerIdentity *view_array,
+ uint64_t view_size)
+{
+ struct GNUNET_MQ_Envelope *ev;
+ struct GNUNET_RPS_CS_DEBUG_ViewReply *out_msg;
+
+ if (NULL == view_array)
+ {
+ view_size = View_size ();
+ view_array = View_get_as_array();
+ }
+
+ ev = GNUNET_MQ_msg_extra (out_msg,
+ view_size * sizeof (struct GNUNET_PeerIdentity),
+ GNUNET_MESSAGE_TYPE_RPS_CS_DEBUG_VIEW_REPLY);
+ out_msg->num_peers = htonl (view_size);
+
+ GNUNET_memcpy (&out_msg[1],
+ view_array,
+ view_size * sizeof (struct GNUNET_PeerIdentity));
+ GNUNET_MQ_send (cli_ctx->mq, ev);
+}
+
+/**
+ * @brief sends updates to clients that are interested
+ */
+static void
+clients_notify_view_update (void)
+{
+ struct ClientContext *cli_ctx_iter;
+ uint64_t num_peers;
+ const struct GNUNET_PeerIdentity *view_array;
+
+ num_peers = View_size ();
+ view_array = View_get_as_array();
+ /* check size of view is small enough */
+ if (GNUNET_MAX_MESSAGE_SIZE < num_peers)
+ {
+ GNUNET_log (GNUNET_ERROR_TYPE_ERROR,
+ "View is too big to send\n");
+ return;
+ }
+
+ for (cli_ctx_iter = cli_ctx_head;
+ NULL != cli_ctx_iter;
+ cli_ctx_iter = cli_ctx_head->next)
+ {
+ if (1 < cli_ctx_iter->view_updates_left)
+ {
+ /* Client wants to receive limited amount of updates */
+ cli_ctx_iter->view_updates_left -= 1;
+ } else if (1 == cli_ctx_iter->view_updates_left)
+ {
+ /* Last update of view for client */
+ cli_ctx_iter->view_updates_left = -1;
+ } else if (0 > cli_ctx_iter->view_updates_left) {
+ /* Client is not interested in updates */
+ continue;
+ }
+ /* else _updates_left == 0 - infinite amount of updates */
+
+ /* send view */
+ send_view (cli_ctx_iter, view_array, num_peers);
+ }
+}
+
+
+/**
+ * Handle RPS request from the client.
+ *
+ * @param cls closure
+ * @param message the actual message
+ */
+static void
+handle_client_view_request (void *cls,
+ const struct GNUNET_RPS_CS_DEBUG_ViewRequest *msg)
+{
+ struct ClientContext *cli_ctx = cls;
+ uint64_t num_updates;
+
+ num_updates = ntohl (msg->num_updates);
+
+ LOG (GNUNET_ERROR_TYPE_DEBUG,
+ "Client requested %" PRIu64 " updates of view.\n",
+ num_updates);
+
+ GNUNET_assert (NULL != cli_ctx);
+ cli_ctx->view_updates_left = num_updates;
+ send_view (cli_ctx, NULL, 0);
+ GNUNET_SERVICE_client_continue (cli_ctx->client);
+}
+
+/**
* Handle a CHECK_LIVE message from another peer.
*
* This does nothing. But without calling #GNUNET_CADET_receive_done()
@@ -3554,7 +3677,6 @@ do_mal_round (void *cls)
}
#endif /* ENABLE_MALICIOUS */
-
/**
* Send out PUSHes and PULLs, possibly update #view, samplers.
*
@@ -3724,6 +3846,7 @@ do_round (void *cls)
}
GNUNET_array_grow (peers_to_clean, peers_to_clean_size, 0);
+ clients_notify_view_update();
} else {
LOG (GNUNET_ERROR_TYPE_DEBUG, "No update of the view.\n");
GNUNET_STATISTICS_update(stats, "# rounds blocked", 1, GNUNET_NO);
@@ -3973,6 +4096,7 @@ client_connect_cb (void *cls,
return client; /* Server was destroyed before a client connected. Shutting down */
cli_ctx = GNUNET_new (struct ClientContext);
cli_ctx->mq = GNUNET_SERVICE_client_get_mq (client);
+ cli_ctx->view_updates_left = -1;
cli_ctx->client = client;
GNUNET_CONTAINER_DLL_insert (cli_ctx_head,
cli_ctx_tail,
@@ -4216,6 +4340,10 @@ GNUNET_SERVICE_MAIN
struct GNUNET_RPS_CS_ActMaliciousMessage,
NULL),
#endif /* ENABLE_MALICIOUS */
+ GNUNET_MQ_hd_fixed_size (client_view_request,
+ GNUNET_MESSAGE_TYPE_RPS_CS_DEBUG_VIEW_REQUEST,
+ struct GNUNET_RPS_CS_DEBUG_ViewRequest,
+ NULL),
GNUNET_MQ_handler_end());
/* end of gnunet-service-rps.c */
diff --git a/src/rps/gnunet-service-rps_custommap.c b/src/rps/gnunet-service-rps_custommap.c
@@ -128,7 +128,9 @@ CustomPeerMap_put (const struct CustomPeerMap *c_peer_map,
*index = CustomPeerMap_size (c_peer_map);
p = GNUNET_new (struct GNUNET_PeerIdentity);
*p = *peer;
- GNUNET_CONTAINER_multipeermap_put (c_peer_map->peer_map, peer, index,
+ GNUNET_assert (p != peer);
+ GNUNET_assert (0 == memcmp (p, peer, sizeof(struct GNUNET_PeerIdentity)));
+ GNUNET_CONTAINER_multipeermap_put (c_peer_map->peer_map, p, index,
GNUNET_CONTAINER_MULTIHASHMAPOPTION_UNIQUE_FAST);
GNUNET_CONTAINER_multihashmap32_put (c_peer_map->hash_map, *index, p,
GNUNET_CONTAINER_MULTIHASHMAPOPTION_UNIQUE_FAST);
@@ -218,7 +220,6 @@ CustomPeerMap_remove_peer (const struct CustomPeerMap *c_peer_map,
*last_index = *index;
}
GNUNET_free (index);
- GNUNET_free (p);
GNUNET_assert (GNUNET_CONTAINER_multihashmap32_size (c_peer_map->hash_map) ==
GNUNET_CONTAINER_multipeermap_size (c_peer_map->peer_map));
return GNUNET_OK;
diff --git a/src/rps/rps.h b/src/rps/rps.h
@@ -176,6 +176,49 @@ struct GNUNET_RPS_CS_ActMaliciousMessage
#endif /* ENABLE_MALICIOUS */
+/* Debug messages */
+
+/**
+ * Message from client to service indicating that
+ * clients wants to get updates of the view
+ */
+struct GNUNET_RPS_CS_DEBUG_ViewRequest
+{
+ /**
+ * Header including size and type in NBO
+ */
+ struct GNUNET_MessageHeader header;
+
+ /**
+ * Number of updates
+ * 0 for sending updates until cancellation
+ */
+ uint32_t num_updates GNUNET_PACKED;
+};
+
+/**
+ * Message from service to client containing current update of view
+ */
+struct GNUNET_RPS_CS_DEBUG_ViewReply
+{
+ /**
+ * Header including size and type in NBO
+ */
+ struct GNUNET_MessageHeader header;
+
+ /**
+ * Identifyer of the message.
+ */
+ uint32_t id GNUNET_PACKED;
+
+ /**
+ * Number of peers in the view
+ */
+ uint64_t num_peers GNUNET_PACKED;
+};
+ /* Followed by num_peers * GNUNET_PeerIdentity */
+
+
/***********************************************************************
* Defines from old gnunet-service-rps_peers.h
***********************************************************************/
diff --git a/src/rps/rps_api.c b/src/rps/rps_api.c
@@ -56,6 +56,16 @@ struct GNUNET_RPS_Handle
* The id of the last request.
*/
uint32_t current_request_id;
+
+ /**
+ * @brief Callback called on each update of the view
+ */
+ GNUNET_RPS_ViewUpdateCB view_update_cb;
+
+ /**
+ * @brief Callback called on each update of the view
+ */
+ void *view_update_cls;
};
@@ -236,6 +246,86 @@ handle_reply (void *cls,
}
+/* Get internals for debugging/profiling purposes */
+
+/**
+ * Request updates of view
+ *
+ * @param rps_handle handle to the rps service
+ * @param num_req_peers number of peers we want to receive
+ * (0 for infinite updates)
+ * @param cls a closure that will be given to the callback
+ * @param ready_cb the callback called when the peers are available
+ */
+void
+GNUNET_RPS_view_request (struct GNUNET_RPS_Handle *rps_handle,
+ uint32_t num_updates,
+ GNUNET_RPS_ViewUpdateCB view_update_cb,
+ void *cls)
+{
+ struct GNUNET_MQ_Envelope *ev;
+ struct GNUNET_RPS_CS_DEBUG_ViewRequest *msg;
+
+ rps_handle->view_update_cb = view_update_cb;
+ rps_handle->view_update_cls = cls;
+
+ ev = GNUNET_MQ_msg (msg, GNUNET_MESSAGE_TYPE_RPS_CS_DEBUG_VIEW_REQUEST);
+ msg->num_updates = htonl (num_updates);
+ GNUNET_MQ_send (rps_handle->mq, ev);
+}
+
+/**
+ * This function is called, when the service updates the view.
+ * It verifies that @a msg is well-formed.
+ *
+ * @param cls the closure
+ * @param msg the message
+ * @return #GNUNET_OK if @a msg is well-formed
+ */
+static int
+check_view_update (void *cls,
+ const struct GNUNET_RPS_CS_DEBUG_ViewReply *msg)
+{
+ uint16_t msize = ntohs (msg->header.size);
+ uint32_t num_peers = ntohl (msg->num_peers);
+
+ msize -= sizeof (struct GNUNET_RPS_CS_DEBUG_ViewReply);
+ if ( (msize / sizeof (struct GNUNET_PeerIdentity) != num_peers) ||
+ (msize % sizeof (struct GNUNET_PeerIdentity) != 0) )
+ {
+ GNUNET_break (0);
+ return GNUNET_SYSERR;
+ }
+ return GNUNET_OK;
+}
+
+/**
+ * This function is called, when the service updated its view.
+ * It calls the callback the caller provided
+ * and disconnects afterwards.
+ *
+ * @param msg the message
+ */
+static void
+handle_view_update (void *cls,
+ const struct GNUNET_RPS_CS_DEBUG_ViewReply *msg)
+{
+ struct GNUNET_RPS_Handle *h = cls;
+ struct GNUNET_PeerIdentity *peers;
+
+ /* Give the peers back */
+ LOG (GNUNET_ERROR_TYPE_DEBUG,
+ "New view of %" PRIu32 " peers:\n",
+ ntohl (msg->num_peers));
+
+ peers = (struct GNUNET_PeerIdentity *) &msg[1];
+ GNUNET_assert (NULL != h);
+ GNUNET_assert (NULL != h->view_update_cb);
+ h->view_update_cb (h->view_update_cls, ntohl (msg->num_peers), peers);
+}
+
+
+
/**
* Reconnect to the service
*/
@@ -281,6 +371,10 @@ reconnect (struct GNUNET_RPS_Handle *h)
GNUNET_MESSAGE_TYPE_RPS_CS_REPLY,
struct GNUNET_RPS_CS_ReplyMessage,
h),
+ GNUNET_MQ_hd_var_size (view_update,
+ GNUNET_MESSAGE_TYPE_RPS_CS_DEBUG_VIEW_REPLY,
+ struct GNUNET_RPS_CS_DEBUG_ViewReply,
+ h),
GNUNET_MQ_handler_end ()
};
@@ -306,6 +400,7 @@ GNUNET_RPS_connect (const struct GNUNET_CONFIGURATION_Handle *cfg)
struct GNUNET_RPS_Handle *h;
h = GNUNET_new (struct GNUNET_RPS_Handle);
+ h->current_request_id = 0;
h->cfg = cfg;
reconnect (h);
if (NULL == h->mq)
diff --git a/src/rps/test_rps.c b/src/rps/test_rps.c
@@ -258,6 +258,16 @@ struct RPSPeer
* @brief File name of the file the stats are finally written to
*/
char *file_name_stats;
+
+ /**
+ * @brief The current view
+ */
+ struct GNUNET_PeerIdentity *cur_view;
+
+ /**
+ * @brief Number of peers in the #cur_view.
+ */
+ uint32_t cur_view_count;
};
enum STAT_TYPE
@@ -419,6 +429,21 @@ enum OPTION_COLLECT_STATISTICS {
};
/**
+ * @brief Do we collect views during run?
+ */
+enum OPTION_COLLECT_VIEW {
+ /**
+ * @brief We collect view during run
+ */
+ COLLECT_VIEW,
+
+ /**
+ * @brief We do not collect the view during run
+ */
+ NO_COLLECT_VIEW,
+};
+
+/**
* Structure to define a single test
*/
struct SingleTestRun
@@ -484,6 +509,11 @@ struct SingleTestRun
enum OPTION_COLLECT_STATISTICS have_collect_statistics;
/**
+ * Collect view during run?
+ */
+ enum OPTION_COLLECT_VIEW have_collect_view;
+
+ /**
* @brief Mark which values from the statistics service to collect at the end
* of the run
*/
@@ -1788,6 +1818,91 @@ store_stats_file_name (struct RPSPeer *rps_peer)
rps_peer->file_name_stats = file_name;
}
+void compute_diversity ()
+{
+ uint32_t i, j, k;
+ /* ith entry represents the numer of occurrences in other peer's views */
+ uint32_t *count_peers = GNUNET_new_array (num_peers, uint32_t);
+ uint32_t views_total_size;
+ double expected;
+ /* deviation from expected number of peers */
+ double *deviation = GNUNET_new_array (num_peers, double);
+
+ views_total_size = 0;
+ expected = 0;
+
+ /* For each peer count its representation in other peer's views*/
+ for (i = 0; i < num_peers; i++) /* Peer to count */
+ {
+ views_total_size += rps_peers[i].cur_view_count;
+ for (j = 0; j < num_peers; j++) /* Peer in which view is counted */
+ {
+ for (k = 0; k < rps_peers[j].cur_view_count; k++) /* entry in view */
+ {
+ if (0 == memcmp (rps_peers[i].peer_id,
+ &rps_peers[j].cur_view[k],
+ sizeof (struct GNUNET_PeerIdentity)))
+ {
+ count_peers[i]++;
+ }
+ }
+ }
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ "Counted representation of %" PRIu32 "th peer: %" PRIu32"\n",
+ i,
+ count_peers[i]);
+ }
+
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ "size of all views combined: %" PRIu32 "\n",
+ views_total_size);
+ expected = ((double) 1/num_peers) * views_total_size;
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ "Expected number of occurrences of each peer in all views: %f\n",
+ expected);
+ for (i = 0; i < num_peers; i++) /* Peer to count */
+ {
+ deviation[i] = expected - count_peers[i];
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ "Deviation from expectation: %f\n", deviation[i]);
+ }
+ GNUNET_free (count_peers);
+ GNUNET_free (deviation);
+}
+
+void all_views_updated_cb ()
+{
+ compute_diversity ();
+}
+
+void view_update_cb (void *cls,
+ uint64_t num_peers,
+ const struct GNUNET_PeerIdentity *peers)
+{
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ "View was updated (%" PRIu64 ")\n", num_peers);
+ struct RPSPeer *rps_peer = (struct RPSPeer *) cls;
+ for (int i = 0; i < num_peers; i++)
+ {
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ "\t%s\n", GNUNET_i2s (&peers[i]));
+ }
+ GNUNET_array_grow (rps_peer->cur_view,
+ rps_peer->cur_view_count,
+ num_peers);
+ //*rps_peer->cur_view = *peers;
+ memcpy (rps_peer->cur_view,
+ peers,
+ num_peers * sizeof (struct GNUNET_PeerIdentity));
+ all_views_updated_cb();
+}
+
+static void
+pre_profiler (struct RPSPeer *rps_peer, struct GNUNET_RPS_Handle *h)
+{
+ GNUNET_RPS_view_request (h, 0, view_update_cb, rps_peer);
+}
+
/**
* Continuation called by #GNUNET_STATISTICS_get() functions.
*
@@ -2009,6 +2124,11 @@ run (void *cls,
rps_peers[i].index = i;
if (NULL != cur_test_run.init_peer)
cur_test_run.init_peer (&rps_peers[i]);
+ if (NO_COLLECT_VIEW == cur_test_run.have_collect_view)
+ {
+ rps_peers->cur_view_count = 0;
+ rps_peers->cur_view = NULL;
+ }
entry->op = GNUNET_TESTBED_peer_get_information (peers[i],
GNUNET_TESTBED_PIT_IDENTITY,
&info_cb,
@@ -2067,6 +2187,7 @@ main (int argc, char *argv[])
{
int ret_value;
+ /* Defaults for tests */
num_peers = 5;
cur_test_run.name = "test-rps-default";
cur_test_run.init_peer = default_init_peer;
@@ -2077,6 +2198,7 @@ main (int argc, char *argv[])
cur_test_run.have_churn = HAVE_CHURN;
cur_test_run.have_collect_statistics = NO_COLLECT_STATISTICS;
cur_test_run.stat_collect_flags = 0;
+ cur_test_run.have_collect_view = NO_COLLECT_VIEW;
churn_task = NULL;
timeout = GNUNET_TIME_relative_multiply (GNUNET_TIME_UNIT_SECONDS, 30);
@@ -2190,7 +2312,8 @@ main (int argc, char *argv[])
num_peers = 10;
mal_type = 3;
cur_test_run.init_peer = profiler_init_peer;
- cur_test_run.pre_test = mal_pre;
+ //cur_test_run.pre_test = mal_pre;
+ cur_test_run.pre_test = pre_profiler;
cur_test_run.main_test = profiler_cb;
cur_test_run.reply_handle = profiler_reply_handle;
cur_test_run.eval_cb = profiler_eval;
@@ -2216,6 +2339,7 @@ main (int argc, char *argv[])
STAT_TYPE_RECV_PUSH_SEND |
STAT_TYPE_RECV_PULL_REQ |
STAT_TYPE_RECV_PULL_REP;
+ cur_test_run.have_collect_view = COLLECT_VIEW;
timeout = GNUNET_TIME_relative_multiply (GNUNET_TIME_UNIT_SECONDS, 300);
/* 'Clean' directory */
@@ -2249,6 +2373,12 @@ main (int argc, char *argv[])
}
ret_value = cur_test_run.eval_cb();
+ if (NO_COLLECT_VIEW == cur_test_run.have_collect_view)
+ {
+ GNUNET_array_grow (rps_peers->cur_view,
+ rps_peers->cur_view_count,
+ 0);
+ }
GNUNET_free (rps_peers);
GNUNET_free (rps_peer_ids);
GNUNET_CONTAINER_multipeermap_destroy (peer_map);