[Barrelfish-users] 3-way Handshake IDC

Dominic Hung domchdh at hku.hk
Tue Nov 25 08:42:30 CET 2014


Dear Team Barrelfish,

I have worked around the problem. But would like to present the
investigation and the workaround to the community. The workaround is
clearly not perfect but would do the trick for me.

After reproduced the deadlock with the micro-test, pingpong. I tried an
idc_2to1 test. It does this,

Core 0 --  WAKE  --> Core 1
Core 0 --             WAKE             -> Core 2
                     Core 1 <- CRADLE --- Core 2

And that was successful. That means inside a RPC triggering, it can handle
event dispatched. But I conjecture that it cannot handle triggering from
the same binding from this result and the result of the pingpong test. So I
tried a dual channel approach. And build two bindings for each core pair.
Say binding_channel_0 and binding_channel_1.

Upon the action,

Core 0 --   SYN   -> Core 1
Core 0 <- SYN/ACK -- Core 1
Core 0 --   ACK   -> Core 1

Originally, the ACK message should be sent with the same binding. Now I
will use it with the second binding, binding_channel_1. And I realised this
in an experiment idc_pingpong_double_channel. All's well.

I think Team Barrelfish should not make the system to have such constraint.
I think that may be a problem when Team Barrelfish tried to use some
primitive data structure to have faster coding in realising the project
then using linked list this kind of more complex data structure to handle
the communication module. But I cannot prove it's so until I trace the
relative code and have some experiments. But that would be till I have time
after writing up my thesis.

Thanks!

Codes attatched for the two tests.

Cheers,
Dominic Hung

*idc_2to1.c:*

#include "stdio.h"
#include "stdbool.h"                 // FOR BOOL
#include "errors/errno.h"            // FOR ERRVAL_T
#include "arch/x86_32/barrelfish_kpi/eflags_arch.h"
#include "arch/x86_32/barrelfish_kpi/paging_arch.h"
#include "barrelfish_kpi/syscalls.h" // FOR STRUCT SYSRET
#include "barrelfish/capabilities.h"
#include "if/idc_2to1_defs.h"
#include "barrelfish/domain.h"
#include "barrelfish/spawn_client.h"
#include "barrelfish/waitset.h"
#include "barrelfish/nameservice_client.h"
#include "rck_dev.h"

struct idc_2to1_binding* binding[3];
char* idc_2to1_service_name[] = {
"idc_2to1_service_0",
"idc_2to1_service_1",
"idc_2to1_service_2"
};

int FLAG_connect = 0;

void wake_handler(struct idc_2to1_binding *b){
debug_printf("wake\n");

if(disp_get_core_id() == 1){
debug_printf("wait for cradle\n");

event_dispatch(get_default_waitset());
}
else{
binding[1]->tx_vtbl.cradle(binding[1], NOP_CONT);

debug_printf("cradle propagated\n");
}
}

void cradle_handler(struct idc_2to1_binding *b){
debug_printf("cradle hit\n");
}

/**
 * START OF MESSAGING EXPORT/BINDING SERVICE
 */

struct idc_2to1_rx_vtbl idc_2to1_rx_vtbl = {
    .wake    = wake_handler,
    .cradle  = cradle_handler
};

static void bind_cb(void *st, errval_t err, struct idc_2to1_binding *b){
if(err_is_fail(err)){
USER_PANIC_ERR(err, "bind failed");
}

debug_printf("client bound!\n");

// copy my message receive handler vtable to the binding
b->rx_vtbl = idc_2to1_rx_vtbl;

binding[(int) st] = b;
}

static void start_client(int host){
iref_t iref;
errval_t err;

debug_printf("client looking up '%s' in name service...\n",
idc_2to1_service_name[host]);
err = nameservice_blocking_lookup(idc_2to1_service_name[host], &iref);
if(err_is_fail(err)){
USER_PANIC_ERR(err, "nameservice_blocking_lookup failed");
}

debug_printf("client binding to %"PRIuIREF"...\n", iref);
err = idc_2to1_bind(iref, bind_cb, (void *) host, get_default_waitset(),
IDC_BIND_FLAGS_DEFAULT);
if(err_is_fail(err)){
USER_PANIC_ERR(err, "bind failed");
}

while(binding[host] == NULL)
event_dispatch(get_default_waitset());
}

/* ------------------------------ SERVER ------------------------------ */

static void export_cb(void *st, errval_t err, iref_t iref){
if(err_is_fail(err)){
USER_PANIC_ERR(err, "export failed");
}

debug_printf("service exported at iref %"PRIuIREF"\n", iref);

// register this iref with the name service
err = nameservice_register(idc_2to1_service_name[disp_get_core_id()], iref);
debug_printf("service exported at iref %"PRIuIREF"\n", iref);
if(err_is_fail(err)){
USER_PANIC_ERR(err, "nameservice_register failed");
}
debug_printf("service exported at iref %"PRIuIREF"\n", iref);
}

static errval_t connect_cb(void *st, struct idc_2to1_binding *b){
debug_printf("service got a connection!\n");

// copy my message receive handler vtable to the binding
b->rx_vtbl = idc_2to1_rx_vtbl;

binding[disp_get_core_id()] = b;

FLAG_connect++;

// accept the connection (we could return an error to refuse it)
return SYS_ERR_OK;
}

static void start_server(void){
errval_t err;

err = idc_2to1_export(NULL, export_cb, connect_cb, get_default_waitset(),
IDC_EXPORT_FLAGS_DEFAULT);
    if(err_is_fail(err)){
     USER_PANIC_ERR(err, "export failed");
    }

    do{
     event_dispatch(get_default_waitset());
    }while(FLAG_connect != 2);
}

/**
 * END OF MESSAGING EXPORT/BINDING SERVICE
 */

int main(int argc, char* argv[]){
debug_printf("Core ID: %d\n", disp_get_core_id());

    if(disp_get_core_id() == 0){
     spawn_program(1, "/scc/sbin/idc_2to1", argv, NULL, SPAWN_NEW_DOMAIN,
NULL);
     spawn_program(2, "/scc/sbin/idc_2to1", argv, NULL, SPAWN_NEW_DOMAIN,
NULL);
    }

    for(int i = 0; i < 3; ++i){
     if(i == disp_get_core_id())
            start_server();
        else start_client(i);
    }

    if(disp_get_core_id() == 0){
     for(int i = 1; i < 3; ++i)
     binding[i]->tx_vtbl.wake(binding[i], NOP_CONT);
    }
    else event_dispatch(get_default_waitset());

    return 0;
}

*idc_pingpong_double_channel.c*

#include "stdio.h"
#include "stdbool.h"                 // FOR BOOL
#include "errors/errno.h"            // FOR ERRVAL_T
#include "arch/x86_32/barrelfish_kpi/eflags_arch.h"
#include "arch/x86_32/barrelfish_kpi/paging_arch.h"
#include "barrelfish_kpi/syscalls.h" // FOR STRUCT SYSRET
#include "barrelfish/capabilities.h"
#include "if/idc_pingpong_defs.h"
#include "barrelfish/domain.h"
#include "barrelfish/spawn_client.h"
#include "barrelfish/waitset.h"
#include "barrelfish/nameservice_client.h"
#include "rck_dev.h"

struct idc_pingpong_binding* binding[2];
char* idc_pingpong_service_name[] = {
"idc_pingpong_service_channel_0",
"idc_pingpong_service_channel_1"
};

int FLAG_connect = 0;
int FLAG_ack     = 0;

void syn_handler(struct idc_pingpong_binding *b){
debug_printf("arrived at syn\n");

b->tx_vtbl.syn_ack(b, NOP_CONT);

do{
debug_printf("wait ack\n");

event_dispatch(get_default_waitset());
}while(!FLAG_ack);
}

void syn_ack_handler(struct idc_pingpong_binding *b){
debug_printf("arrived at syn_ack\n");
}

void ack_handler(struct idc_pingpong_binding *b){
debug_printf("arrived at ack\n");

FLAG_ack = 1;
}

/**
 * START OF MESSAGING EXPORT/BINDING SERVICE
 */

struct idc_pingpong_rx_vtbl idc_pingpong_rx_vtbl = {
    .syn     = syn_handler,
    .syn_ack = syn_ack_handler,
    .ack     = ack_handler
};

static void bind_cb(void *st, errval_t err, struct idc_pingpong_binding *b){
if(err_is_fail(err)){
USER_PANIC_ERR(err, "bind failed");
}

debug_printf("client bound!\n");

// copy my message receive handler vtable to the binding
b->rx_vtbl = idc_pingpong_rx_vtbl;

binding[(int) st] = b;
}

static void start_client(int channel){
iref_t iref;
errval_t err;

debug_printf("client looking up '%s' in name service...\n",
idc_pingpong_service_name[channel]);
err = nameservice_blocking_lookup(idc_pingpong_service_name[channel],
&iref);
if(err_is_fail(err)){
USER_PANIC_ERR(err, "nameservice_blocking_lookup failed");
}

debug_printf("client binding to %"PRIuIREF"...\n", iref);
err = idc_pingpong_bind(iref, bind_cb, (void *) channel,
get_default_waitset(), IDC_BIND_FLAGS_DEFAULT);
if(err_is_fail(err)){
USER_PANIC_ERR(err, "bind failed");
}

while(binding[channel] == NULL)
event_dispatch(get_default_waitset());
}

/* ------------------------------ SERVER ------------------------------ */

static void export_cb(void *st, errval_t err, iref_t iref){
if(err_is_fail(err)){
USER_PANIC_ERR(err, "export failed");
}

debug_printf("service exported at iref %"PRIuIREF"\n", iref);

// register this iref with the name service
err = nameservice_register(idc_pingpong_service_name[(int) st], iref);
debug_printf("service exported at iref %"PRIuIREF"\n", iref);
if(err_is_fail(err)){
USER_PANIC_ERR(err, "nameservice_register failed");
}
debug_printf("service exported at iref %"PRIuIREF"\n", iref);
}

static errval_t connect_cb(void *st, struct idc_pingpong_binding *b){
debug_printf("service got a connection!\n");

// copy my message receive handler vtable to the binding
b->rx_vtbl = idc_pingpong_rx_vtbl;

binding[(int) st] = b;

FLAG_connect = 1;

// accept the connection (we could return an error to refuse it)
return SYS_ERR_OK;
}

static void start_server(int channel){
errval_t err;

err = idc_pingpong_export((void *) channel, export_cb, connect_cb,
get_default_waitset(), IDC_EXPORT_FLAGS_DEFAULT);
    if(err_is_fail(err)){
     USER_PANIC_ERR(err, "export failed");
    }

    do{
     event_dispatch(get_default_waitset());
    }while(!FLAG_connect);

    FLAG_connect = 0;
}

/**
 * END OF MESSAGING EXPORT/BINDING SERVICE
 */

int main(int argc, char* argv[]){
debug_printf("Core ID: %d\n", disp_get_core_id());

    if(disp_get_core_id() == 0){
     spawn_program(1, "/scc/sbin/idc_pingpong_double_channel", argv, NULL,
SPAWN_NEW_DOMAIN, NULL);
    }

    for(int i = 0; i < 2; ++i){
if(disp_get_core_id() == 0){
start_server(i);
}
else start_client(i);
    }

    if(disp_get_core_id() == 1){
     binding[0]->tx_vtbl.syn(binding[0], NOP_CONT);
     event_dispatch(get_default_waitset());
     binding[1]->tx_vtbl.ack(binding[1], NOP_CONT);

     debug_printf("sent ack\n");
    }
    else event_dispatch(get_default_waitset());

    return 0;
}


On 19 November 2014 at 20:04, Dominic Hung <domchdh at hku.hk> wrote:

> Dear Stefan,
>
> Thank you for your return of email. I have flounder working here all the
> while. It's to the advance stage of my project that I need 3-way handshake
> to accomplish some tasks that I have encountered this first time.
>
> I have found an earlier email on the same issue I take on a different
> architecture, the reporter should be using x86_64.
> https://lists.inf.ethz.ch/pipermail/barrelfish-users/2011-December/000370.html.
> Although the user talks about binding. But I see it should be a problem of
> 3-way handshaking as well. So I wonder can you try my short code here on
> your platform to see if it's some unsolved problem?
>
>
>    1. *menu.lst.scc*
>
>    timeout 0
>    title   Barrelfish
>    root    (nd)
>    kernel  /scc/sbin/cpu loglevel=4
>    module  /scc/sbin/cpu
>    module  /scc/sbin/init
>
>    # Domains spawned by init
>    module  /scc/sbin/mem_serv
>    module  /scc/sbin/monitor
>
>    module  /scc/sbin/ramfsd boot
>    module  /scc/sbin/skb boot
>    module  /scc/sbin/spawnd boot BOOT_CORE
>    module  /scc/sbin/startd boot
>
>    module /scc/sbin/pingpong
>
>    # RCK memory map
>    # 624MB private RAM
>    mmap    map         0x0             0x27000000  1
>    mmap    map     0x80000000  0x03000000  1
>    # 1GB shared RAM (over all MCs) in the middle of address space
>    mmap    map     0x84000000  0x3c000000  1
>    # 384MB MPB space
>    mmap    mmp     0xc0000000  0x18000000  1
>    # 16MB more private RAM (bootup jump is in here, too)
>    mmap    map         0xff000000  0x1000000   1
>
>    2. *Config.hs*
>
>    same as shipped
>
>    3. *Output*
>
>    1. Core 0
>
>       1416397472.193425394 spawnd.0: spawning /scc/sbin/pingpong on core 0
>       No bootscript
>       1416397472.207301153 pingpong.0: lmp TX
>       monitor.get_monitor_rpc_iref_request
>       1416397472.210663985 pingpong.0: lmp RX
>       monitor.get_monitor_rpc_iref_reply
>       1416397472.211743348 pingpong.0: lmp TX
>       monitor.bind_lmp_client_request
>       1416397472.219533665 pingpong.0: lmp RX
>       monitor.bind_lmp_reply_client
>       1416397472.220788388 pingpong.0: lmp TX monitor.get_mem_iref_request
>       1416397472.224258136 pingpong.0: lmp RX monitor.get_mem_iref_reply
>       1416397472.225360878 pingpong.0: lmp TX
>       monitor.bind_lmp_client_request
>       1416397472.233636881 pingpong.0: lmp RX
>       monitor.bind_lmp_reply_client
>       1416397472.234784686 pingpong.0: lmp TX
>       monitor.get_name_iref_request
>       1416397472.238262570 pingpong.0: lmp RX monitor.get_name_iref_reply
>       1416397472.239451816 pingpong.0: lmp TX
>       monitor.bind_lmp_client_request
>       1416397472.247847588 pingpong.0: lmp RX
>       monitor.bind_lmp_reply_client
>       1416397472.248999078 pingpong.0: lmp TX octopus.get_call
>       1416397472.252848048 pingpong.0: lmp RX octopus.get_response
>       1416397472.254001968 pingpong.0: lmp TX mem.allocate_call
>       1416397472.257634876 pingpong.0: lmp RX mem.allocate_response
>       1416397472.258915623 pingpong.0: lmp TX mem.allocate_call
>       1416397472.262757649 pingpong.0: lmp RX mem.allocate_response
>       1416397472.263921523 pingpong.0: lmp TX mem.allocate_call
>       1416397472.267189100 pingpong.0: lmp RX mem.allocate_response
>       1416397472.268269091 pingpong.0: lmp TX monitor.alloc_iref_request
>       1416397472.271496468 pingpong.0: lmp RX monitor.alloc_iref_reply
>       1416397472.272688617 pingpong.0: lmp TX mem.allocate_call
>       1416397472.276029060 pingpong.0: lmp RX mem.allocate_response
>       1416397472.277117203 pingpong.0: lmp TX mem.allocate_call
>       1416397472.280418326 pingpong.0: lmp RX mem.allocate_response
>       1416397472.281633837 pingpong.0: lmp TX mem.allocate_call
>       1416397472.285130236 pingpong.0: lmp RX mem.allocate_response
>       1416397472.286213212 pingpong.0: 0
>       1416397472.287393796 pingpong.0: lmp TX octopus.wait_for_call
>       1416397472.290793620 pingpong.0: lmp RX octopus.wait_for_response
>       1416397472.291893357 pingpong.0: lmp TX
>       monitor.bind_lmp_client_request
>       1416397472.295395728 pingpong.0: lmp RX
>       monitor.bind_lmp_reply_client
>       1416397472.296516289 pingpong.0: lmp TX
>       monitor.ipi_alloc_notify_request
>       1416397472.299998660 pingpong.0: lmp RX
>       monitor.ipi_alloc_notify_reply
>       1416397472.301140384 pingpong.0: lmp TX mem.allocate_call
>       1416397472.304751910 pingpong.0: lmp RX mem.allocate_response
>       1416397472.307013834 pingpong.0: lmp TX
>       monitor.bind_ump_client_request
>       1416397472.312573193 pingpong.0: lmp RX
>       monitor.bind_ump_reply_client
>       1416397472.313715186 pingpong.0: ump_ipi TX spawn.spawn_domain_call
>       1416397472.820466055 pingpong.0: ump_ipi RX
>       spawn.spawn_domain_response
>       1416397472.821805616 pingpong.0: lmp TX monitor.alloc_iref_request
>       1416397472.825324473 pingpong.0: lmp RX monitor.alloc_iref_reply
>       1416397472.827624590 pingpong.0: lmp TX octopus.set_call
>       1416397472.831331441 pingpong.0: lmp RX octopus.set_response
>       1416397472.926158789 pingpong.0: lmp RX
>       monitor.bind_ump_service_request
>       1416397472.928521575 pingpong.0: lmp TX
>       monitor.ipi_alloc_notify_request
>       1416397472.932083642 pingpong.0: lmp RX
>       monitor.ipi_alloc_notify_reply
>       1416397472.933496293 pingpong.0: lmp TX
>       monitor.bind_ump_reply_monitor
>       1416397472.936907411 pingpong.0: ump_ipi RX pingpong.syn
>       1416397472.938218913 pingpong.0: arrived at syn
>       1416397472.939364776 pingpong.0: ump_ipi TX pingpong.syn_ack
>
>       2. Core 1
>
>       1416397473.055278067 spawnd.1: spawning /scc/sbin/pingpong on core 1
>       1416397473.057542275 pingpong.1: lmp TX
>       monitor.get_monitor_rpc_iref_request
>       1416397473.060972745 pingpong.1: lmp RX
>       monitor.get_monitor_rpc_iref_reply
>       1416397473.062117234 pingpong.1: lmp TX
>       monitor.bind_lmp_client_request
>       1416397473.065639868 pingpong.1: lmp RX
>       monitor.bind_lmp_reply_client
>       1416397473.066753919 pingpong.1: lmp TX monitor.get_mem_iref_request
>       1416397473.070126286 pingpong.1: lmp RX monitor.get_mem_iref_reply
>       1416397473.071468210 pingpong.1: lmp TX
>       monitor.bind_lmp_client_request
>       1416397473.079999095 pingpong.1: lmp RX
>       monitor.bind_lmp_reply_client
>       1416397473.081153001 pingpong.1: lmp TX
>       monitor.get_name_iref_request
>       1416397473.084712518 pingpong.1: lmp RX monitor.get_name_iref_reply
>       1416397473.085904866 pingpong.1: lmp TX
>       monitor.bind_lmp_client_request
>       1416397473.089447048 pingpong.1: lmp RX
>       monitor.bind_lmp_reply_client
>       1416397473.090610244 pingpong.1: lmp TX
>       monitor.new_monitor_binding_request
>       1416397473.098959528 pingpong.1: lmp RX
>       monitor.new_monitor_binding_reply
>       1416397473.100127046 pingpong.1: lmp TX
>       monitor.ipi_alloc_notify_request
>       1416397473.103623312 pingpong.1: lmp RX
>       monitor.ipi_alloc_notify_reply
>       1416397473.105002543 pingpong.1: lmp TX mem.allocate_call
>       1416397473.108475480 pingpong.1: lmp RX mem.allocate_response
>       1416397473.110991463 pingpong.1: lmp TX mem.allocate_call
>       1416397473.114494759 pingpong.1: lmp RX mem.allocate_response
>       1416397473.115673400 pingpong.1: lmp TX mem.allocate_call
>       1416397473.119207638 pingpong.1: lmp RX mem.allocate_response
>       1416397473.120409815 pingpong.1: lmp TX mem.allocate_call
>       1416397473.124131980 pingpong.1: lmp RX mem.allocate_response
>       1416397473.125277184 pingpong.1: lmp TX
>       monitor.bind_ump_client_request
>       1416397473.130996583 pingpong.1: lmp RX
>       monitor.bind_ump_reply_client
>       1416397473.132187371 pingpong.1: lmp TX mem.allocate_call
>       1416397473.135670285 pingpong.1: lmp RX mem.allocate_response
>       1416397473.136812122 pingpong.1: ump_ipi TX octopus.get_call
>       1416397473.137962422 pingpong.1: ump_ipi RX octopus.get_response
>       1416397473.139079514 pingpong.1: lmp TX monitor.alloc_iref_request
>       1416397473.142567382 pingpong.1: lmp RX monitor.alloc_iref_reply
>       1416397473.143769336 pingpong.1: lmp TX mem.allocate_call
>       1416397473.147360865 pingpong.1: lmp RX mem.allocate_response
>       1416397473.148610171 pingpong.1: lmp TX mem.allocate_call
>       1416397473.152200655 pingpong.1: lmp RX mem.allocate_response
>       1416397473.153391079 pingpong.1: 1
>       1416397473.154578550 client looking up 'pingpong_service' in name
>       service...
>       1416397473.155664528 pingpong.1: ump_ipi TX octopus.wait_for_call
>       1416397473.156914538 pingpong.1: ump_ipi RX
>       octopus.wait_for_response
>       1416397473.159217255 pingpong.1: lmp TX
>       monitor.bind_lmp_client_request
>       1416397473.162567186 pingpong.1: lmp RX
>       monitor.bind_lmp_reply_client
>       1416397473.163670711 pingpong.1: lmp TX
>       monitor.ipi_alloc_notify_request
>       1416397473.167537080 pingpong.1: lmp RX
>       monitor.ipi_alloc_notify_reply
>       1416397473.168660700 pingpong.1: lmp TX mem.allocate_call
>       1416397473.172058267 pingpong.1: lmp RX mem.allocate_response
>       1416397473.174202081 pingpong.1: lmp TX
>       monitor.bind_ump_client_request
>       1416397473.180213583 pingpong.1: lmp RX
>       monitor.bind_ump_reply_client
>       1416397473.182740446 pingpong.1: ump_ipi TX pingpong.syn
>       1416397473.183944994 pingpong.1: ump_ipi RX pingpong.syn_ack
>       1416397473.185028655 pingpong.1: arrived at syn_ack
>       1416397473.186160651 pingpong.1: ump_ipi TX pingpong.ack
>       1416397473.188393049 pingpong.1: ump_ipi TX octopus.get_call
>       1416397473.189509675 pingpong.1: ump_ipi RX octopus.get_response
>       1416397473.190729439 pingpong.1: lmp TX
>       monitor.bind_lmp_client_request
>       1416397473.199032249 pingpong.1: lmp RX
>       monitor.bind_lmp_reply_client
>       1416397473.375037500 pingpong.1: lmp TX spawn.exit_call
>
>       4. The *versions* of bf, gcc, ghc used by me,
>
>    barrelfish: release2012-06-06
>    gcc version: 4.4.3
>    ghc version: 6.10.4
>
>
> I am sorry and thanks!
>
> Cheers,
> Dominic Hung
>
> On 19 November 2014 18:33, Stefan Kaestle <stefan.kaestle at inf.ethz.ch>
> wrote:
>
>>  Hi Dominic,
>>
>> Unfortunately, we do not have access to SCC hardware any more so it is
>> hard for us to reproduce SCC related problems.
>>
>> One thing to keep in mind is that the x86-default flounder backend for
>> cross-core message passing (UMP) does not work on the SCC. This is due to a
>> lack of cache coherence. Instead, we use UMP-IPI, an extension to UMP that
>> sends a notification (i.e. an inter-processor interrupt) to indicate
>> message availability to the receiver. The receiver can then invalidate its
>> cache to make sure that the next read is done from memory (and not the
>> outdated cache).
>>
>> From looking at your code, I am a bit suspicious about the way you send
>> messages (although I might be wrong and that is actually not the problem).
>> I am assuming that export and connect work?
>> In general, a good starting point for developing IPC applications is to
>> look at some of our examples in usr/examples.
>>
>> I suggest the following:
>>
>>  1. Can you please try to run usr/examples/xmpl-call-response on your SCC
>> and tell me if that works?
>>  2. Convert xmpl-call-response to do the 3-way handshake
>>  3. Test on x86_32 in qemu
>>  4. Test on SCC
>>
>> Let me know if this helps!
>>
>> Also, can you please attach the following additional information to your
>> next email:
>>
>>  1. Your menu.lst
>>  2. A list of changes to hake/Config.hs
>>  3. Output when you run the program (e.g. does connect/export work)
>>  4. Your Barrelfish release, gcc and ghc version
>>
>>
>> Hope this helps,
>>     Stefan
>>
>> On 11/19/2014 06:54 AM, Dominic Hung wrote:
>>
>> Dear Team Barrelfish,
>>
>>  I am currently having a trouble with doing a 3-way handshake like IDC.
>> What I would like to achieve is
>>
>>  Core 0 -- Send Syn -> Core 1
>> Core 0 <- Send Syn/Ack -- Core 1
>> Core 0 -- Send Ack --> Core 1
>>
>>  I got no luck doing this on my original system, the message can be sent
>> from core 0 with no error code but core 1 keep looping waiting for event.
>> Having troubleshooting for quite some time, then I turned to wrote a
>> smaller scale testing program to verify that 3-way handshake can be done.
>> And I still got the same result, i.e., core 0 sent ack with no error code
>> while core 1 spin waiting for event with no arrival and delivery.
>>
>>  My platform is again... I am sorry, Intel SCC. The latest tag I can
>> compile with Intel SCC is release2012-11-03 and the real tag I am now using
>> is release2012-06-06. Hope you may help.
>>
>>  The micro program is as attached.
>>
>>  #include "stdio.h"
>> #include "stdbool.h" // FOR BOOL
>> #include <errors/errno.h> // FOR ERRVAL_T
>> #include "arch/x86_32/barrelfish_kpi/eflags_arch.h"
>>  #include "arch/x86_32/barrelfish_kpi/paging_arch.h"
>> #include "barrelfish_kpi/syscalls.h" // FOR STRUCT SYSRET
>> #include "barrelfish/capabilities.h"
>> #include "if/pingpong_defs.h"
>> #include "barrelfish/domain.h"
>>  #include "barrelfish/spawn_client.h"
>> #include "barrelfish/waitset.h"
>> #include "barrelfish/nameservice_client.h"
>> #include "rck_dev.h"
>>
>>  struct pingpong_binding* binding;
>> int flag_connect = 0;
>> int flag_ack = 0;
>>
>>  void syn_handler(struct pingpong_binding *b){
>>     debug_printf("arrived at syn\n");
>>
>>      b->tx_vtbl.syn_ack(b, NOP_CONT);
>>
>>      do{
>>         printf("wait ack\n");
>>         event_dispatch(get_default_waitset());
>>     } while(!flag_ack);
>> }
>>
>>  void syn_ack_handler(struct pingpong_binding *b){
>>     debug_printf("arrived at syn_ack\n");
>> }
>>
>>  void ack_handler(struct pingpong_binding *b){
>>     debug_printf("arrived at ack\n");
>>
>>      flag_ack = 1;
>> }
>>
>>  struct pingpong_rx_vtbl pingpong_rx_vtbl = {
>>      .syn     = syn_handler,
>>     .syn_ack = syn_ack_handler,
>>     .ack     = ack_handler,
>> };
>>
>>  static void bind_cb(void *st, errval_t err, struct pingpong_binding *b)
>> {
>>     if (err_is_fail(err)) {
>>         USER_PANIC_ERR(err, "bind failed");
>>     }
>>
>>      printf("client bound!\n");
>>
>>      // copy my message receive handler vtable to the binding
>>     b->rx_vtbl = pingpong_rx_vtbl;
>>
>>      binding = b;
>> }
>>
>>  static void start_client(void)
>> {
>>     iref_t iref;
>>     errval_t err;
>>
>>      printf("client looking up '%s' in name service...\n",
>> "pingpong_service");
>>     err = nameservice_blocking_lookup("pingpong_service", &iref);
>>     if (err_is_fail(err)) {
>>         USER_PANIC_ERR(err, "nameservice_blocking_lookup failed");
>>     }
>>
>>      printf("client binding to %"PRIuIREF"...\n", iref);
>>     err = pingpong_bind(iref, bind_cb, NULL, get_default_waitset(),
>> IDC_BIND_FLAGS_DEFAULT);
>>     if (err_is_fail(err)) {
>>         USER_PANIC_ERR(err, "bind failed");
>>     }
>>
>>      while(binding == NULL)
>>         event_dispatch(get_default_waitset());
>> }
>>
>>  /* ------------------------------ SERVER ------------------------------
>> */
>>
>>  static void export_cb(void *st, errval_t err, iref_t iref)
>> {
>>     if (err_is_fail(err)) {
>>         USER_PANIC_ERR(err, "export failed");
>>     }
>>
>>      printf("service exported at iref %"PRIuIREF"\n", iref);
>>
>>      // register this iref with the name service
>>     err = nameservice_register("pingpong_service", iref);
>>     printf("service exported at iref %"PRIuIREF"\n", iref);
>>     if (err_is_fail(err)) {
>>         USER_PANIC_ERR(err, "nameservice_register failed");
>>     }
>>     printf("service exported at iref %"PRIuIREF"\n", iref);
>> }
>>
>>  static errval_t connect_cb(void *st, struct pingpong_binding *b)
>> {
>>     printf("service got a connection!\n");
>>
>>      // copy my message receive handler vtable to the binding
>>     b->rx_vtbl = pingpong_rx_vtbl;
>>
>>      flag_connect = 1;
>>
>>      // accept the connection (we could return an error to refuse it)
>>     return SYS_ERR_OK;
>> }
>>
>>  static void start_server(void)
>> {
>>     errval_t err;
>>
>>      err = pingpong_export(NULL, export_cb, connect_cb,
>> get_default_waitset(), IDC_EXPORT_FLAGS_DEFAULT);
>>     if (err_is_fail(err)) {
>>         USER_PANIC_ERR(err, "export failed");
>>     }
>>
>>      do{
>>         event_dispatch(get_default_waitset());
>>     } while(!flag_connect);
>> }
>>
>>  int main(int argc, char* argv[]){
>>     debug_printf("%d\n", disp_get_core_id());
>>
>>      if(disp_get_core_id() == 0){
>>         spawn_program(1, "/scc/sbin/pingpong", argv, NULL,
>> SPAWN_NEW_DOMAIN, NULL);
>>
>>          start_server();
>>     }
>>     else start_client();
>>
>>      if(disp_get_core_id() == 1){
>>           binding->tx_vtbl.syn(binding, NOP_CONT);
>>           event_dispatch(get_default_waitset());
>>           binding->tx_vtbl.ack(binding, NOP_CONT);
>>           printf("sent ack\n");
>>     }
>>     else{
>>         event_dispatch(get_default_waitset());
>>     }
>>
>>      return 0;
>> }
>>
>>  Thanks again!
>>
>>  Cheers,
>>  Dominic Hung
>>
>> --------------------------------------------------
>> C H Dominic Hung, B.Eng. (CE) HK
>>  M. Phil. Student, Dept. of CS., Faculty of Engg.
>>
>> Mobile: +852-9819-9360
>> Email: domchdh at hku.hk
>>
>>
>> _______________________________________________
>> Barrelfish-users mailing listBarrelfish-users at lists.inf.ethz.chhttps://lists.inf.ethz.ch/mailman/listinfo/barrelfish-users
>>
>>
>> --
>> Stefanhttp://people.inf.ethz.ch/skaestle/
>>
>>
>
>
> --
> Dominic Hung
>
> --------------------------------------------------
> C H Dominic Hung, B.Eng. (CE) HK
> M. Phil. Student, Dept. of CS., Faculty of Engg.
>
> Mobile: +852-9819-9360
> Email: domchdh at hku.hk
>



-- 
Dominic Hung

--------------------------------------------------
C H Dominic Hung, B.Eng. (CE) HK
M. Phil. Student, Dept. of CS., Faculty of Engg.

Mobile: +852-9819-9360
Email: domchdh at hku.hk
-------------- next part --------------
An HTML attachment was scrubbed...
URL: https://lists.inf.ethz.ch/pipermail/barrelfish-users/attachments/20141125/1f004560/attachment-0001.html 


More information about the Barrelfish-users mailing list