1 /* 2 Copyright (c) 2002, Thomas Kurschel 3 4 Part of Radeon kernel driver 5 6 PCI GART. 7 8 Currently, we use PCI DMA. Changing to AGP would 9 only affect this file, but AGP-GART is specific to 10 the chipset of the motherboard, and as DMA is really 11 overkill for 2D, I cannot bother writing a dozen 12 of AGP drivers just to gain little extra speedup. 13 */ 14 15 16 #include "radeon_driver.h" 17 #include "mmio.h" 18 #include "buscntrl_regs.h" 19 #include "memcntrl_regs.h" 20 #include "cp_regs.h" 21 22 #include <image.h> 23 24 #include <stdlib.h> 25 #include <string.h> 26 27 28 #if 1 29 //! create actual GART buffer 30 static status_t 31 createGARTBuffer(GART_info *gart, size_t size) 32 { 33 SHOW_FLOW0( 3, "" ); 34 35 gart->buffer.size = size = (size + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1); 36 37 // if this buffer is used for PCI BM, cache snooping 38 // takes care of syncing memory accesses; if used for AGP, 39 // we'll have to access via AGP aperture (and mark aperture 40 // as write-combined) as cache consistency doesn't need to 41 // be guaranteed 42 43 // the specs say that some chipsets do kind of lazy flushing 44 // so the graphics card may read obsolete data; up to now 45 // we use PCI only where this shouldn't happen by design; 46 // if we change to AGP we may tweak the pre-charge time of 47 // the write buffer pointer 48 49 // as some variables in accelerant point directly into 50 // the DMA buffer, we have to grant access for all apps 51 gart->buffer.area = create_area("Radeon PCI GART buffer", 52 &gart->buffer.ptr, B_ANY_KERNEL_ADDRESS, 53 size, B_FULL_LOCK, 54 #ifdef HAIKU_TARGET_PLATFORM_HAIKU 55 // TODO: really user read/write? 56 B_READ_AREA | B_WRITE_AREA | B_USER_CLONEABLE_AREA 57 #else 58 0 59 #endif 60 ); 61 if (gart->buffer.area < 0) { 62 SHOW_ERROR(1, "cannot create PCI GART buffer (%s)", 63 strerror(gart->buffer.area)); 64 return gart->buffer.area; 65 } 66 67 gart->buffer.unaligned_area = -1; 68 69 memset( gart->buffer.ptr, 0, size ); 70 71 return B_OK; 72 } 73 74 #else 75 76 static status_t createGARTBuffer( GART_info *gart, size_t size ) 77 { 78 physical_entry map[1]; 79 void *unaligned_addr, *aligned_phys; 80 81 SHOW_FLOW0( 3, "" ); 82 83 gart->buffer.size = size = (size + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1); 84 85 // we allocate an contiguous area having twice the size 86 // to be able to find an aligned, contiguous range within it; 87 // the graphics card doesn't care, but the CPU cannot 88 // make an arbitrary area WC'ed, at least elder ones 89 // question: is this necessary for a PCI GART because of bus snooping? 90 gart->buffer.unaligned_area = create_area( "Radeon PCI GART buffer", 91 &unaligned_addr, B_ANY_KERNEL_ADDRESS, 92 2 * size, B_CONTIGUOUS/*B_FULL_LOCK*/, B_READ_AREA | B_WRITE_AREA | B_USER_CLONEABLE_AREA ); 93 if (gart->buffer.unaligned_area < 0) { 94 SHOW_ERROR( 1, "cannot create PCI GART buffer (%s)", 95 strerror( gart->buffer.unaligned_area )); 96 return gart->buffer.unaligned_area; 97 } 98 99 get_memory_map( unaligned_addr, B_PAGE_SIZE, map, 1 ); 100 101 aligned_phys = 102 (void **)(((uint32)map[0].address + size - 1) & ~(size - 1)); 103 104 SHOW_FLOW( 3, "aligned_phys=%p", aligned_phys ); 105 106 gart->buffer.area = map_physical_memory( "Radeon aligned PCI GART buffer", 107 aligned_phys, 108 size, B_ANY_KERNEL_BLOCK_ADDRESS | B_MTR_WC, 109 B_READ_AREA | B_WRITE_AREA, &gart->buffer.ptr ); 110 111 if( gart->buffer.area < 0 ) { 112 SHOW_ERROR0( 3, "cannot map buffer with WC" ); 113 gart->buffer.area = map_physical_memory( "Radeon aligned PCI GART buffer", 114 aligned_phys, 115 size, B_ANY_KERNEL_BLOCK_ADDRESS, 116 B_READ_AREA | B_WRITE_AREA, &gart->buffer.ptr ); 117 } 118 119 if( gart->buffer.area < 0 ) { 120 SHOW_ERROR0( 1, "cannot map GART buffer" ); 121 delete_area( gart->buffer.unaligned_area ); 122 gart->buffer.unaligned_area = -1; 123 return gart->buffer.area; 124 } 125 126 memset( gart->buffer.ptr, 0, size ); 127 128 return B_OK; 129 } 130 131 #endif 132 133 // init GATT (could be used for both PCI and AGP) 134 static status_t initGATT( GART_info *gart ) 135 { 136 area_id map_area; 137 uint32 map_area_size; 138 physical_entry *map; 139 physical_entry PTB_map[1]; 140 size_t map_count; 141 uint32 i; 142 uint32 *gatt_entry; 143 size_t num_pages; 144 145 SHOW_FLOW0( 3, "" ); 146 147 num_pages = (gart->buffer.size + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1); 148 149 // GART must be contignuous 150 gart->GATT.area = create_area("Radeon GATT", (void **)&gart->GATT.ptr, 151 B_ANY_KERNEL_ADDRESS, 152 (num_pages * sizeof( uint32 ) + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1), 153 B_CONTIGUOUS, 154 #ifdef HAIKU_TARGET_PLATFORM_HAIKU 155 // TODO: really user read/write? 156 B_READ_AREA | B_WRITE_AREA | B_USER_CLONEABLE_AREA 157 #else 158 0 159 #endif 160 ); 161 162 if (gart->GATT.area < 0) { 163 SHOW_ERROR(1, "cannot create GATT table (%s)", 164 strerror(gart->GATT.area)); 165 return gart->GATT.area; 166 } 167 168 get_memory_map(gart->GATT.ptr, B_PAGE_SIZE, PTB_map, 1); 169 gart->GATT.phys = (uint32)PTB_map[0].address; 170 171 SHOW_INFO(3, "GATT_ptr=%p, GATT_phys=%p", gart->GATT.ptr, 172 (void *)gart->GATT.phys); 173 174 // get address mapping 175 memset(gart->GATT.ptr, 0, num_pages * sizeof(uint32)); 176 177 map_count = num_pages + 1; 178 179 // align size to B_PAGE_SIZE 180 map_area_size = map_count * sizeof(physical_entry); 181 if ((map_area_size / B_PAGE_SIZE) * B_PAGE_SIZE != map_area_size) 182 map_area_size = ((map_area_size / B_PAGE_SIZE) + 1) * B_PAGE_SIZE; 183 184 // temporary area where we fill in the memory map (deleted below) 185 map_area = create_area("pci_gart_map_area", (void **)&map, B_ANY_ADDRESS, map_area_size, B_FULL_LOCK, B_READ_AREA | B_WRITE_AREA); 186 dprintf("pci_gart_map_area: %ld\n", map_area); 187 188 get_memory_map( gart->buffer.ptr, gart->buffer.size, map, map_count ); 189 190 // the following looks a bit strange as the kernel 191 // combines successive entries 192 gatt_entry = gart->GATT.ptr; 193 194 for( i = 0; i < map_count; ++i ) { 195 uint32 addr = (uint32)map[i].address; 196 size_t size = map[i].size; 197 198 if( size == 0 ) 199 break; 200 201 while( size > 0 ) { 202 *gatt_entry++ = addr; 203 //SHOW_FLOW( 3, "%lx", *(gart_entry-1) ); 204 addr += ATI_PCIGART_PAGE_SIZE; 205 size -= ATI_PCIGART_PAGE_SIZE; 206 } 207 } 208 209 delete_area(map_area); 210 211 if( i == map_count ) { 212 // this case should never happen 213 SHOW_ERROR0( 0, "memory map of GART buffer too large!" ); 214 delete_area( gart->GATT.area ); 215 gart->GATT.area = -1; 216 return B_ERROR; 217 } 218 219 // this might be a bit more than needed, as 220 // 1. Intel CPUs have "processor order", i.e. writes appear to external 221 // devices in program order, so a simple final write should be sufficient 222 // 2. if it is a PCI GART, bus snooping should provide cache coherence 223 // 3. this function is a no-op :( 224 clear_caches( gart->GATT.ptr, num_pages * sizeof( uint32 ), 225 B_FLUSH_DCACHE ); 226 227 // back to real live - some chipsets have write buffers that 228 // proove all previous assumptions wrong 229 // (don't know whether this really helps though) 230 asm volatile ( "wbinvd" ::: "memory" ); 231 return B_OK; 232 } 233 234 // destroy GART buffer 235 static void destroyGARTBuffer( GART_info *gart ) 236 { 237 if( gart->buffer.area > 0 ) 238 delete_area( gart->buffer.area ); 239 240 if( gart->buffer.unaligned_area > 0 ) 241 delete_area( gart->buffer.unaligned_area ); 242 243 gart->buffer.area = gart->buffer.unaligned_area = -1; 244 } 245 246 247 // destroy GATT 248 static void destroyGATT( GART_info *gart ) 249 { 250 if( gart->GATT.area > 0 ) 251 delete_area( gart->GATT.area ); 252 253 gart->GATT.area = -1; 254 } 255 256 257 // init PCI GART 258 status_t Radeon_InitPCIGART( device_info *di ) 259 { 260 status_t result; 261 262 result = createGARTBuffer( &di->pci_gart, PCI_GART_SIZE ); 263 if( result < 0 ) 264 goto err1; 265 266 result = initGATT( &di->pci_gart ); 267 if( result < 0 ) 268 goto err2; 269 270 return B_OK; 271 272 err2: 273 destroyGARTBuffer( &di->pci_gart ); 274 275 err1: 276 return result; 277 } 278 279 280 // cleanup PCI GART 281 void Radeon_CleanupPCIGART( device_info *di ) 282 { 283 vuint8 *regs = di->regs; 284 285 SHOW_FLOW0( 3, "" ); 286 287 // perhaps we should wait for FIFO space before messing around with registers, but 288 // 1. I don't want to add all the sync stuff to the kernel driver 289 // 2. I doubt that these regs are buffered by FIFO 290 // but still: in worst case CP has written some commands to register FIFO, 291 // which can do any kind of nasty things 292 293 // disable CP BM 294 OUTREG( regs, RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIDIS_INDDIS ); 295 // read-back for flushing 296 INREG( regs, RADEON_CP_CSQ_CNTL ); 297 298 // disable bus mastering 299 OUTREGP( regs, RADEON_BUS_CNTL, RADEON_BUS_MASTER_DIS, ~RADEON_BUS_MASTER_DIS ); 300 // disable PCI GART 301 OUTREGP( regs, RADEON_AIC_CNTL, 0, ~RADEON_PCIGART_TRANSLATE_EN ); 302 303 destroyGATT( &di->pci_gart ); 304 destroyGARTBuffer( &di->pci_gart ); 305 } 306