linux的ioremap

功能:将IO地址空间映射到内核的虚拟空间上。

linux-2.6.x/arch/arm/io.h

208 /*

209  * ioremap and friends.

210  *

211  * ioremap takes a PCI memory address, as specified in

212  * Documentation/IO-mapping.txt.

213  *

214  */

215 #ifndef __arch_ioremap

216 #define ioremap(cookie,size)            __arm_ioremap(cookie, size, MT_DEVICE)

217 #define ioremap_nocache(cookie,size)    __arm_ioremap(cookie, size, MT_DEVICE)

218 #define ioremap_cached(cookie,size)     __arm_ioremap(cookie, size, MT_DEVICE_CACHED)

219 #define ioremap_wc(cookie,size)         __arm_ioremap(cookie, size, MT_DEVICE_WC)

220 #define iounmap(cookie)                 __iounmap(cookie)

221 #else

222 #define ioremap(cookie,size)            __arch_ioremap((cookie), (size), MT_DEVICE)

223 #define ioremap_nocache(cookie,size)    __arch_ioremap((cookie), (size), MT_DEVICE)

224 #define ioremap_cached(cookie,size)     __arch_ioremap((cookie), (size), MT_DEVICE_CACHED)

225 #define ioremap_wc(cookie,size)         __arch_ioremap((cookie), (size), MT_DEVICE_WC)

226 #define iounmap(cookie)                 __arch_iounmap(cookie)

227 #endif

1. __phys_to_pfn

1.1 函数原型: linux-2.6.x/arch/arm/include/asm/memory.h

1.2 功能:转换物理地址到一个页数量,简单的说就是地址空间/4096,意思就是所给地址是由多少个4096组成。

130 /*

131  * Convert a physical address to a Page Frame Number and back

132  */

133 #define __phys_to_pfn(paddr)    ((paddr) >> PAGE_SHIFT)

134 #define __pfn_to_phys(pfn)      ((pfn) << PAGE_SHIFT)

2. PAGE_SHIFT:确定一个页大小

2.1 宏定义Linux/arch/arm/include/asm/page.h

13 /* PAGE_SHIFT determines the page size */

14 #define PAGE_SHIFT              12

15 #define PAGE_SIZE               (1UL << PAGE_SHIFT) //4096

16 #define PAGE_MASK               (~(PAGE_SIZE-1))    //-4096

由些可得到,页大小(PAGE_SIZE)为4096,页掩码(PAGE_MASK)为-4096

linux-2.6.x/arch/arm/mm/ioremap.c

252 /*

253  * Remap an arbitrary physical address space into the kernel virtual

254  * address space. Needed when the kernel wants to access high addresses

255  * directly.

       重映射一个任意物理地址空间到内核虚拟地址空间。内核直接访问高地址

256  *

257  * NOTE! We need to allow non-page-aligned mappings too: we will obviously

258  * have to convert them into an offset in a page-aligned mapping, but the

259  * caller shouldn't need to know that small detail.

       需要允许非页面对齐映射,转换一个页对齐映射偏移,但需已知对方是小节。

260  *

261  * 'flags' are the extra L_PTE_ flags that you want to specify for this

262  * mapping.  See <asm/pgtable.h> for more information.

       ‘flags’扩展L_PTE_ 标志特殊的映射。

263  */

264 void __iomem *

265 __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,

266                   unsigned int mtype)

267 {

268         const struct mem_type *type;

269         int err;

270         unsigned long addr;

271         struct vm_struct * area;

272

273         /*

274          * High mappings must be supersection aligned

275          */

276         if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))

277                 return NULL;

278

279         type = get_mem_type(mtype);

280         if (!type)

281                 return NULL;

282

283         /*

284          * Page align the mapping size, taking account of any offset.

285          */

286         size = PAGE_ALIGN(offset + size);

            /*

   linux-2.6.x/include/linux/mm.h

    44  to align the pointer to the (next) page boundary 

             45 #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)

   linux-2.6.x/include/linkage.h

   53 #define ALIGN __ALIGN

            linux-2.6.x/arch/arm/include/asm/linkage.h

   4 #define __ALIGN .align 0 //默认字节对齐

   */

287 

288         area = get_vm_area(size, VM_IOREMAP);//保留一连续内核虚拟区

            /*

        get_vm_area  -  reserve a contiguous kernel virtual area

        @size:          size of the area

        @flags:         %VM_IOREMAP for I/O mappings or VM_ALLOC

        Search an area of @size in the kernel virtual mapping area,

        and reserved it for out purposes.  Returns the area descriptor

        on success or %NULL on failure.

struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)

{

         return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END,

                                 -1, GFP_KERNEL, __builtin_return_address(0));

}

   */

289         if (!area)

290                 return NULL;

291         addr = (unsigned long)area->addr;

292 

293 #ifndef CONFIG_SMP

294         if (DOMAIN_IO == 0 &&

295             (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||

296                cpu_is_xsc3()) && pfn >= 0x100000 &&

297                !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {

298                 area->flags |= VM_ARM_SECTION_MAPPING;

299                 err = remap_area_supersections(addr, pfn, size, type);

300         } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {

301                 area->flags |= VM_ARM_SECTION_MAPPING;

302                 err = remap_area_sections(addr, pfn, size, type);

303         } else

304 #endif

305                 err = remap_area_pages(addr, pfn, size, type);

306 

307         if (err) {

308                 vunmap((void *)addr);

309                 return NULL;

310         }

311 

312         flush_cache_vmap(addr, addr + size);

313         return (void __iomem *) (offset + addr);

314 }

315 EXPORT_SYMBOL(__arm_ioremap_pfn);

316 

317 void __iomem *

318 __arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)

319 {

320         unsigned long last_addr;

321         unsigned long offset = phys_addr & ~PAGE_MASK; //物理地址 & 4096

322         unsigned long pfn = __phys_to_pfn(phys_addr);  //物理地址右移12位

323 

324         /*

325          * Don't allow wraparound or zero size

326          */

327         last_addr = phys_addr + size - 1;

328         if (!size || last_addr < phys_addr)

329                 return NULL;

330 

331         return __arm_ioremap_pfn(pfn, offset, size, mtype);

332 }

333 EXPORT_SYMBOL(__arm_ioremap);

原文地址:https://www.cnblogs.com/cute/p/2089368.html