<div dir="ltr">Hi Wang Nan,<div><br></div><div>You are right, there are currently no user-space drivers for ARM in the tree and an easy way for user-space programs to access device register is currently missing (as well as interrupt forwarding to user-space). We are currently investing some effort in better support for the pandaboard platform. Therefore, we need this as well. I am currently working on a solution that works similar to what we have for PCI on x86 right now. I hope this will be ready soon and we can release it with the next release.</div>
<div><br></div><div>If you want to test my changes prematurely, let me know. I can probably send you patches in a couple of days.</div><div><br></div><div>Gerd</div></div><div class="gmail_extra"><br><br><div class="gmail_quote">
On Wed, Jul 3, 2013 at 3:03 PM, Wang Nan <span dir="ltr"><<a href="mailto:wangnan0@huawei.com" target="_blank">wangnan0@huawei.com</a>></span> wrote:<br><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex">
Hi,<br>
I can't find a reference implementation of ARM driver. I implement an UART driver my self. Here I want you to provide some suggestion on my design.<br>
<br>
The key problem is to allow user space program to access ARM's memory mapping registers.<br>
<br>
Following is my design and code fragment:<br>
<br>
1. Export RegionType_PlatformData to init during booting:<br>
<br>
--------------<br>
/* called by spawn_bsp_init() */<br>
static void<br>
create_arch_caps(void)<br>
{<br>
errval_t err;<br>
/* sysctl */<br>
err = create_caps_to_cnode(0x20000000, 0x1000, RegionType_PlatformData, &spawn_state, bootinfo);<br>
assert(err_is_ok(err));<br>
<br>
/* uart 0~2 */<br>
err = create_caps_to_cnode(0x20001000, 0x1000, RegionType_PlatformData, &spawn_state, bootinfo);<br>
assert(err_is_ok(err));<br>
err = create_caps_to_cnode(0x20002000, 0x1000, RegionType_PlatformData, &spawn_state, bootinfo);<br>
assert(err_is_ok(err));<br>
err = create_caps_to_cnode(0x20003000, 0x1000, RegionType_PlatformData, &spawn_state, bootinfo);<br>
assert(err_is_ok(err));<br>
}<br>
--------------<br>
<br>
2. pass pacn to mem_serv in init:<br>
--------------<br>
diff --git a/usr/init/spawn.c b/usr/init/spawn.c<br>
index e343bbe..4bf35b4 100644<br>
--- a/usr/init/spawn.c<br>
+++ b/usr/init/spawn.c<br>
@@ -35,6 +35,21 @@ errval_t initialize_mem_serv(struct spawninfo *si)<br>
return err_push(err, INIT_ERR_COPY_SUPERCN_CAP);<br>
}<br>
<br>
+ /* copy physcn to memory server */<br>
+ struct capref init_pacn_cap = {<br>
+ .cnode = cnode_root,<br>
+ .slot = ROOTCN_SLOT_PACN,<br>
+ };<br>
+<br>
+ struct capref child_pacn_cap = {<br>
+ .cnode = si->rootcn,<br>
+ .slot = ROOTCN_SLOT_PACN,<br>
+ };<br>
+<br>
+ err = cap_copy(child_pacn_cap, init_pacn_cap);<br>
+ if (err_is_fail(err)) {<br>
+ return err_push(err, INIT_ERR_COPY_SUPERCN_CAP);<br>
+ }<br>
return SYS_ERR_OK;<br>
}<br>
------------<br>
<br>
3. add a new function to mem_serv to let it alloc PhysAddr<br>
<br>
<br>
--------------<br>
diff --git a/if/mem.if b/if/mem.if<br>
index e487bf5..71469cf 100644<br>
--- a/if/mem.if<br>
+++ b/if/mem.if<br>
@@ -23,4 +23,10 @@ interface mem "Memory allocation RPC interface" {<br>
// XXX: Trusted call, may only be called by monitor.<br>
// Should move this to its own binding.<br>
rpc free_monitor(in give_away_cap mem_cap, in genpaddr base, in uint8 bits, out errval err);<br>
+<br>
+ rpc allocate_devram( in genpaddr addr,<br>
+ in uint8 sizebit,<br>
+ out errval ret,<br>
+ out give_away_cap devmem_cap );<br>
}<br>
--------------<br>
<br>
<br>
<br>
4. ugly code: let mem_serv alloc devram: I put the code fragment at the end of this mail.<br>
<br>
5. driver: use devram:<br>
<br>
+ struct mem_rpc_client *mem = get_mem_client();<br>
+ assert(mem != NULL);<br>
+<br>
+ errval_t errrpc, err;<br>
+ struct capref serial_cap;<br>
+ errrpc = mem->vtbl.allocate_devram(mem, 0x20001000ULL, 12, &err, &serial_cap);<br>
+ printf("allocate_devram rpc end\n");<br>
+ assert(err_is_ok(errrpc));<br>
+ assert(err_is_ok(err));<br>
+ void *uart_base;<br>
+ err = vspace_map_one_frame_attr(&uart_base,<br>
+ 0x1000,<br>
+ serial_cap,<br>
+ KPI_PAGING_FLAGS_READ | KPI_PAGING_FLAGS_WRITE | KPI_PAGING_FLAGS_NOCACHE,<br>
+ NULL,<br>
+ NULL);<br>
+ assert(err_is_ok(err));<br>
+<br>
+ printf("after vspace_map_one_frame_attr\n");<br>
+ printf("uart_base=%p\n", uart_base);<br>
+ assert(uart_base != NULL);<br>
+<br>
+ struct hi1380_uart_t *dev;<br>
+ hi1380_uart_initialize(dev, uart_base);<br>
<br>
<br>
My driver works, but code is ugly, especially the mem_serv part. I think the additional rpc interface<br>
should be avoid.<br>
<br>
Could anyone can provide a 'standard way' to write such driver?<br>
<br>
<br>
<br>
<br>
<br>
Following code allows mem_serv management devram:<br>
--------------<br>
diff --git a/usr/mem_serv/mem_serv.c b/usr/mem_serv/mem_serv.c<br>
index fba93e2..556a506 100644<br>
--- a/usr/mem_serv/mem_serv.c<br>
+++ b/usr/mem_serv/mem_serv.c<br>
@@ -467,6 +467,96 @@ initialize_ram_alloc(void)<br>
return SYS_ERR_OK;<br>
}<br>
<br>
+struct alloc_devram_response_reply {<br>
+ struct mem_binding *b;<br>
+ errval_t err;<br>
+ struct capref cap;<br>
+};<br>
+static void<br>
+mem_allocate_devram_handler_response_done(void *args)<br>
+{<br>
+ errval_t err;<br>
+ struct alloc_devram_response_reply *r = args;<br>
+ if(!capref_is_null(r->cap)) {<br>
+ err = cap_delete(r->cap);<br>
+ if(err_is_fail(err)) {<br>
+ DEBUG_ERR(err, "cap_delete failed after send. This memory will leak.");<br>
+ }<br>
+ err = msa.a.free(&msa.a, r->cap);<br>
+ if(err_is_fail(err)) {<br>
+ DEBUG_ERR(err, "msa.a.free failed after send. This memory will leak.");<br>
+ }<br>
+ }<br>
+ free(r);<br>
+}<br>
+static void<br>
+mem_allocate_devram_handler_reply(void *args)<br>
+{<br>
+ struct alloc_devram_response_reply *r = args;<br>
+ struct mem_binding *b = r->b;<br>
+ errval_t err;<br>
+<br>
+ err = b->tx_vtbl.allocate_devram_response(b, MKCONT(mem_allocate_devram_handler_response_done, r),<br>
+ r->err, r->cap);<br>
+ if (err_is_fail(err)) {<br>
+ if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {<br>
+ err = b->register_send(b, get_default_waitset(), MKCONT(mem_allocate_devram_handler_reply ,r));<br>
+ assert(err_is_ok(err));<br>
+ } else {<br>
+ DEBUG_ERR(err, "failed to reply to memory request");<br>
+ mem_allocate_devram_handler_response_done(r);<br>
+ }<br>
+ }<br>
+}<br>
+<br>
+static void<br>
+mem_allocate_devram_handler(struct mem_binding *_binding, mem_genpaddr_t addr, uint8_t sizebit)<br>
+{<br>
+ /* find the device */<br>
+ struct capref phyaddr_cap = {<br>
+ .cnode = cnode_phyaddr,<br>
+ .slot = 0,<br>
+ };<br>
+<br>
+ errval_t err = SYS_ERR_KERNEL_MEM_INVALID;<br>
+ for (int i = 0; i < bi->regions_length; i++) {<br>
+ if (bi->regions[i].mr_type != RegionType_PlatformData)<br>
+ continue;<br>
+<br>
+ if (bi->regions[i].mr_base == addr) {<br>
+ if (bi->regions[i].mr_bits != sizebit)<br>
+ break;<br>
+ err = SYS_ERR_OK;<br>
+ break;<br>
+ }<br>
+ phyaddr_cap.slot++;<br>
+ }<br>
+<br>
+ struct capref devframe;<br>
+ err = msa.a.alloc(&msa.a, &devframe);<br>
+ if (err_is_fail(err))<br>
+ goto out;<br>
+<br>
+ err = cap_retype(devframe, phyaddr_cap, ObjType_DevFrame, sizebit);<br>
+<br>
+<br>
+ struct alloc_devram_response_reply *r;<br>
+out:<br>
+ r = malloc(sizeof(*r));<br>
+ assert(r != NULL);<br>
+ r->b = _binding;<br>
+ if (err_is_ok(err)) {<br>
+<br>
+<br>
+ r->err = err;<br>
+ r->cap = devframe;<br>
+ /* need delete?? */<br>
+ } else {<br>
+ r->err = SYS_ERR_KERNEL_MEM_LOOKUP;<br>
+ r->cap = NULL_CAP;<br>
+ }<br>
+ mem_allocate_devram_handler_reply(r);<br>
+}<br>
+<br>
static void export_callback(void *st, errval_t err, iref_t iref)<br>
{<br>
assert(err_is_ok(err));<br>
@@ -479,6 +569,7 @@ static struct mem_rx_vtbl rx_vtbl = {<br>
.allocate_call = mem_allocate_handler,<br>
.available_call = mem_available_handler,<br>
.free_monitor_call = mem_free_handler,<br>
+ .allocate_devram_call = mem_allocate_devram_handler,<br>
};<br>
static errval_t connect_callback(void *st, struct mem_binding *b)<br>
--------------<br>
<br>
<br>
<br>
_______________________________________________<br>
Barrelfish-users mailing list<br>
<a href="mailto:Barrelfish-users@lists.inf.ethz.ch">Barrelfish-users@lists.inf.ethz.ch</a><br>
<a href="https://lists.inf.ethz.ch/mailman/listinfo/barrelfish-users" target="_blank">https://lists.inf.ethz.ch/mailman/listinfo/barrelfish-users</a><br>
</blockquote></div><br></div>