1 /* 2 * Copyright 2004-2006, Axel Dörfler, axeld@pinc-software.de. All rights reserved. 3 * Copyright 2002/03, Thomas Kurschel. All rights reserved. 4 * 5 * Distributed under the terms of the MIT License. 6 */ 7 8 /* 9 VM helper functions. 10 11 Important assumption: get_memory_map must combine adjacent 12 physical pages, so contignous memory always leads to a S/G 13 list of length one. 14 */ 15 16 #include "KernelExport_ext.h" 17 #include "wrapper.h" 18 19 #include <string.h> 20 21 #include <algorithm> 22 23 24 /** get sg list of iovec 25 * TBD: this should be moved to somewhere in kernel 26 */ 27 28 status_t 29 get_iovec_memory_map(iovec *vec, size_t vec_count, size_t vec_offset, size_t len, 30 physical_entry *map, size_t max_entries, size_t *num_entries, size_t *mapped_len) 31 { 32 size_t cur_idx; 33 size_t left_len; 34 35 SHOW_FLOW(3, "vec_count=%" B_PRIuSIZE ", vec_offset=%" B_PRIuSIZE ", len=%" 36 B_PRIuSIZE ", max_entries=%" B_PRIuSIZE, vec_count, vec_offset, len, 37 max_entries); 38 39 // skip iovec blocks if needed 40 while (vec_count > 0 && vec_offset > vec->iov_len) { 41 vec_offset -= vec->iov_len; 42 --vec_count; 43 ++vec; 44 } 45 46 for (left_len = len, cur_idx = 0; left_len > 0 && vec_count > 0 && cur_idx < max_entries;) { 47 char *range_start; 48 size_t range_len; 49 status_t res; 50 size_t cur_num_entries, cur_mapped_len; 51 uint32 tmp_idx; 52 53 SHOW_FLOW( 3, "left_len=%d, vec_count=%d, cur_idx=%d", 54 (int)left_len, (int)vec_count, (int)cur_idx ); 55 56 // map one iovec 57 range_start = (char *)vec->iov_base + vec_offset; 58 range_len = std::min(vec->iov_len - vec_offset, left_len); 59 60 SHOW_FLOW( 3, "range_start=%" B_PRIxADDR ", range_len=%" B_PRIxSIZE, 61 (addr_t)range_start, range_len ); 62 63 vec_offset = 0; 64 65 if ((res = get_memory_map(range_start, range_len, &map[cur_idx], 66 max_entries - cur_idx)) != B_OK) { 67 // according to docu, no error is ever reported - argh! 68 SHOW_ERROR(1, "invalid io_vec passed (%s)", strerror(res)); 69 return res; 70 } 71 72 // stupid: get_memory_map does neither tell how many sg blocks 73 // are used nor whether there were enough sg blocks at all; 74 // -> determine that manually 75 // TODO: Use get_memory_map_etc()! 76 cur_mapped_len = 0; 77 cur_num_entries = 0; 78 79 for (tmp_idx = cur_idx; tmp_idx < max_entries; ++tmp_idx) { 80 if (map[tmp_idx].size == 0) 81 break; 82 83 cur_mapped_len += map[tmp_idx].size; 84 ++cur_num_entries; 85 } 86 87 if (cur_mapped_len == 0) { 88 panic("get_memory_map() returned empty list; left_len=%d, idx=%d/%d", 89 (int)left_len, (int)cur_idx, (int)max_entries); 90 SHOW_ERROR(2, "get_memory_map() returned empty list; left_len=%d, idx=%d/%d", 91 (int)left_len, (int)cur_idx, (int)max_entries); 92 return B_ERROR; 93 } 94 95 SHOW_FLOW( 3, "cur_num_entries=%d, cur_mapped_len=%x", 96 (int)cur_num_entries, (int)cur_mapped_len ); 97 98 // try to combine with previous sg block 99 if (cur_num_entries > 0 && cur_idx > 0 100 && map[cur_idx].address 101 == map[cur_idx - 1].address + map[cur_idx - 1].size) { 102 SHOW_FLOW0( 3, "combine with previous chunk" ); 103 map[cur_idx - 1].size += map[cur_idx].size; 104 memcpy(&map[cur_idx], &map[cur_idx + 1], (cur_num_entries - 1) * sizeof(map[0])); 105 --cur_num_entries; 106 } 107 108 cur_idx += cur_num_entries; 109 left_len -= cur_mapped_len; 110 111 // advance iovec if current one is described completely 112 if (cur_mapped_len == range_len) { 113 ++vec; 114 --vec_count; 115 } 116 } 117 118 *num_entries = cur_idx; 119 *mapped_len = len - left_len; 120 121 SHOW_FLOW( 3, "num_entries=%" B_PRIuSIZE ", mapped_len=%" B_PRIxSIZE, 122 *num_entries, *mapped_len); 123 124 return B_OK; 125 } 126 127