00001
00002
00011
00012
00013
00014
00015
00039
00040 #include <l4/env/errno.h>
00041 #include <l4/lock/lock.h>
00042 #include <l4/names/libnames.h>
00043 #include <l4/l4rm/l4rm.h>
00044 #include <l4/dm_mem/dm_mem.h>
00045 #include <l4/dm_phys/dm_phys.h>
00046
00047 #include <l4/dde_linux/dde.h>
00048
00049
00050 #include <linux/slab.h>
00051 #include <linux/vmalloc.h>
00052
00053
00054 #include <oskit/lmm.h>
00055
00056
00057 #include "__config.h"
00058 #include "internal.h"
00059
00064 static int _initialized = 0;
00065
00066
00068 static lmm_t kpool = LMM_INITIALIZER;
00070 static lmm_region_t kregion[MM_KREGIONS];
00072 static unsigned int kregion_size;
00074 static unsigned int kcount = MM_KREGIONS - 1;
00076 static l4lock_t klock = L4LOCK_UNLOCKED_INITIALIZER;
00077
00078
00080 static lmm_t vpool = LMM_INITIALIZER;
00082 static lmm_region_t vregion;
00084 static l4lock_t vlock = L4LOCK_UNLOCKED_INITIALIZER;
00085
00107 static __inline__ int __more_kcore(l4_size_t size, l4_uint32_t flags)
00108 {
00109 int error;
00110 l4_addr_t kaddr;
00111
00112 l4_size_t tmp;
00113 l4dm_mem_addr_t dm_paddr;
00114
00115 if (!(kcount) || (size > kregion_size))
00116 {
00117 LOGdL(DEBUG_ERRORS, "Error: out of memory (kmem)");
00118 return -L4_ENOMEM;
00119 }
00120
00121 LOGd(DEBUG_MALLOC, "requesting %d bytes (kmem)", size);
00122
00123
00124 kaddr = (l4_addr_t) \
00125 l4dm_mem_allocate_named(kregion_size,
00126 L4DM_CONTIGUOUS | L4DM_PINNED | L4RM_MAP,
00127 "dde kmem");
00128 if (!kaddr)
00129 {
00130 LOGdL(DEBUG_ERRORS, "Error: allocating kmem");
00131 return -L4_ENOMEM;
00132 }
00133
00134 error = l4dm_mem_phys_addr((void *) kaddr, 1, &dm_paddr, 1, &tmp);
00135 if (error != 1)
00136 {
00137 if (error>1 || !error)
00138 Panic("Ouch, what's that?");
00139 LOGdL(DEBUG_ERRORS, "Error: getting physical address (%d)", error);
00140 return error;
00141 }
00142
00143 LOGd(DEBUG_MALLOC, "adding %d Bytes (kmem) @ 0x%08x (phys. 0x%08x) region %d",
00144 size, kaddr, dm_paddr.addr, kcount);
00145
00146
00147 lmm_add_region(&kpool, &kregion[kcount--], (void *) kaddr, size, 0, 0);
00148
00149
00150 lmm_add_free(&kpool, (void *) kaddr, size);
00151
00152
00153 address_add_region(kaddr, dm_paddr.addr, size);
00154
00155 return 0;
00156 }
00157
00167 static __inline__ int __more_vcore(l4_size_t size)
00168 {
00169 LOGdL(DEBUG_ERRORS, "Error: out of memory (vmem)");
00170 return -L4_ENOMEM;
00171 }
00172
00183 void *kmalloc(size_t size, int gfp)
00184 {
00185 lmm_flags_t lmm_flags = 0;
00186 l4_uint32_t *chunk;
00187
00188 if (gfp & GFP_DMA)
00189 LOGd(DEBUG_MSG, "Warning: No ISA DMA implemented.");
00190
00191
00192 l4lock_lock(&klock);
00193
00194 size += sizeof(size_t);
00195 while (!(chunk = lmm_alloc(&kpool, size, lmm_flags)))
00196 {
00197 if (__more_kcore(size, lmm_flags))
00198 {
00199 #if DEBUG_MALLOC
00200 LOG("failed to allocate %d bytes (kmem)", size);
00201 lmm_dump(&kpool);
00202 lmm_stats(&kpool);
00203 #endif
00204 return NULL;
00205 }
00206 }
00207 *chunk = size;
00208
00209 LOGd(DEBUG_MALLOC_EACH, "allocated %d bytes @ %p (kmem)", *chunk, chunk);
00210
00211 l4lock_unlock(&klock);
00212
00213 return ++chunk;
00214 }
00215
00223 void kfree(const void *addr)
00224 {
00225 l4_uint32_t *chunk = (l4_uint32_t *) addr - 1;
00226
00227 if (!addr)
00228 return;
00229
00230
00231 l4lock_lock(&klock);
00232
00233 LOGd(DEBUG_MALLOC_EACH, "freeing %d bytes @ %p (kmem)", *chunk, chunk);
00234
00235 lmm_free(&kpool, chunk, *chunk);
00236
00237 l4lock_unlock(&klock);
00238 }
00239
00249 void *vmalloc(unsigned long size)
00250 {
00251 lmm_flags_t lmm_flags = 0;
00252 l4_uint32_t *chunk;
00253
00254
00255 l4lock_lock(&vlock);
00256
00257 size += sizeof(size_t);
00258 while (!(chunk = lmm_alloc(&vpool, size, lmm_flags)))
00259 {
00260 if (__more_vcore(size))
00261 {
00262 #if DEBUG_MALLOC
00263 LOG("failed to allocate %ld bytes (vmem)", size);
00264 lmm_dump(&vpool);
00265 lmm_stats(&vpool);
00266 #endif
00267 return NULL;
00268 }
00269 }
00270 *chunk = size;
00271
00272 LOGd(DEBUG_MALLOC_EACH, "allocated %d bytes @ %p (vmem)", *chunk, chunk);
00273
00274 l4lock_unlock(&vlock);
00275
00276 return ++chunk;
00277 }
00278
00286 void vfree(void *addr)
00287 {
00288 l4_uint32_t *chunk = (l4_uint32_t *) addr - 1;
00289
00290 if (!addr)
00291 return;
00292
00293
00294 l4lock_lock(&vlock);
00295
00296 lmm_free(&vpool, chunk, *chunk);
00297
00298 LOGd(DEBUG_MALLOC_EACH, "freed %d bytes @ %p (vmem)", *chunk, chunk);
00299
00300 l4lock_unlock(&vlock);
00301 }
00302
00303
00305
00306 static int __setup_kmem(unsigned int *max, l4_addr_t *addr)
00307 {
00308 unsigned int size = *max;
00309 int error;
00310
00311 l4_size_t tmp;
00312 l4dm_mem_addr_t dm_paddr;
00313
00314 kregion_size = size / MM_KREGIONS;
00315 LOGd(DEBUG_MALLOC, "size/regions = %d", kregion_size);
00316
00317 kregion_size = (kregion_size + L4_PAGESIZE - 1) & L4_PAGEMASK;
00318 LOGd(DEBUG_MALLOC, "rsize (mod PAGESIZE) = %d", kregion_size);
00319
00320 if (kregion_size * MM_KREGIONS < size)
00321 {
00322 kregion_size += L4_PAGESIZE;
00323 LOGd(DEBUG_MALLOC, "new rsize = %d\n", kregion_size);
00324 }
00325
00326 LOGd(DEBUG_MALLOC, "kregion_size = 0x%x regions = %d",
00327 kregion_size, MM_KREGIONS);
00328
00329
00330 *addr = (l4_addr_t) \
00331 l4dm_mem_allocate_named(kregion_size,
00332 L4DM_CONTIGUOUS | L4DM_PINNED | L4RM_MAP,
00333 "dde kmem");
00334 if (!*addr) return -L4_ENOMEM;
00335
00336 error = l4dm_mem_phys_addr((void *)*addr, 1, &dm_paddr, 1, &tmp);
00337 if (error != 1)
00338 {
00339 if (error>1 || !error)
00340 Panic("Ouch, what's that?");
00341 LOGdL(DEBUG_ERRORS, "Error: getting physical address (%d)", error);
00342 return error;
00343 }
00344
00345 LOGd(DEBUG_MALLOC, "adding %d Bytes (kmem) @ 0x%08x (phys. 0x%08x) region 0",
00346 kregion_size, *addr, dm_paddr.addr);
00347
00348
00349 lmm_add_region(&kpool, &kregion[0], (void *) *addr, kregion_size, 0, 0);
00350
00351
00352 lmm_add_free(&kpool, (void *) *addr, kregion_size);
00353
00354
00355 address_add_region(*addr, dm_paddr.addr, kregion_size);
00356
00357 *max = kregion_size * MM_KREGIONS;
00358 return 0;
00359 }
00360
00361
00363
00364 static int __setup_vmem(unsigned int *max, l4_addr_t *addr)
00365 {
00366 unsigned int size = *max;
00367
00368
00369 size = (size + L4_PAGESIZE - 1) & L4_PAGEMASK;
00370
00371
00372 *addr = (l4_addr_t) l4dm_mem_allocate_named(size, L4RM_MAP, "dde vmem");
00373 if (!*addr) return -L4_ENOMEM;
00374
00375
00376 lmm_add_region(&vpool, &vregion, (void *) *addr, size, 0, 0);
00377
00378 lmm_add_free(&vpool, (void *) *addr, size);
00379
00380 *max = size;
00381 return 0;
00382 }
00383
00398 int l4dde_mm_init(unsigned int max_vsize, unsigned int max_ksize)
00399 {
00400 int error;
00401 l4_addr_t vaddr, kaddr;
00402
00403 char *vsize_str;
00404 char *ksize_str;
00405
00406 if (_initialized)
00407 return 0;
00408
00409
00410 if ((error=__setup_vmem(&max_vsize, &vaddr)))
00411 {
00412 LOGdL(DEBUG_ERRORS, "Error: setting up vmem: %d (%s)",
00413 error, l4env_strerror(-error));
00414 return error;
00415 }
00416
00417
00418 if ((error=__setup_kmem(&max_ksize, &kaddr)))
00419 {
00420 LOGdL(DEBUG_ERRORS, "Error: setting up kmem %d (%s)",
00421 error, l4env_strerror(-error));
00422 return error;
00423 }
00424
00425
00426 if (max_vsize > 8 * 1024 * 1024)
00427 {
00428 max_vsize /= (1024 * 1024);
00429 vsize_str = "MB";
00430 }
00431 else if (max_vsize > 8 * 1024)
00432 {
00433 max_vsize /= 1024;
00434 vsize_str = "kB";
00435 }
00436 else
00437 vsize_str = "Byte";
00438 if (max_ksize > 8 * 1024 * 1024)
00439 {
00440 max_ksize /= (1024 * 1024);
00441 ksize_str = "MB";
00442 }
00443 else if (max_ksize > 8 * 1024)
00444 {
00445 max_ksize /= 1024;
00446 ksize_str = "kB";
00447 }
00448 else
00449 ksize_str = "Byte";
00450 LOG("Using ...\n"
00451 " %d %s at 0x%08x (vmem)\n"
00452 " %d %s in %d regions (kmem)",
00453 max_vsize, vsize_str, vaddr, max_ksize, ksize_str, MM_KREGIONS);
00454
00455 #if DEBUG_MALLOC
00456 {
00457 int debug;
00458 l4dm_dataspace_t ds;
00459 l4_offs_t offset;
00460 l4_addr_t map_addr;
00461 l4_size_t map_size;
00462
00463 debug = l4rm_lookup((void*)vaddr, &ds, &offset, &map_addr, &map_size);
00464 if (debug)
00465 Panic("l4rm_lookup failed (%d)", debug);
00466 LOG("vmem: ds={%3u, "l4util_idfmt"} offset=%d map_addr=0x%08x map_size=%d",
00467 ds.id, l4util_idstr(ds.manager), offset, map_addr, map_size);
00468
00469 debug = l4rm_lookup((void*)kaddr, &ds, &offset, &map_addr, &map_size);
00470 if (debug)
00471 Panic("l4rm_lookup failed (%d)", debug);
00472 LOG("kmem: ds={%3u, "l4util_idfmt"} offset=%d map_addr=0x%08x map_size=%d",
00473 ds.id, l4util_idstr(ds.manager), offset, map_addr, map_size);
00474 }
00475 #endif
00476
00477 ++_initialized;
00478 return 0;
00479 }
00480
00483 int l4dde_mm_kmem_avail(void)
00484 {
00485 if (_initialized)
00486 {
00487 return lmm_avail(&kpool, 0);
00488 }
00489 return 0;
00490 }