Properly detect memory using atags and add it to the memory-management subsystem
This commit is contained in:
		| @@ -1,5 +1,6 @@ | ||||
| KERNEL_PREFIX = kernel | ||||
|  | ||||
| KOBJS += $(KERNEL_PREFIX)/atags.o | ||||
| KOBJS += $(KERNEL_PREFIX)/console.o | ||||
| KOBJS += $(KERNEL_PREFIX)/font.o | ||||
| KOBJS += $(KERNEL_PREFIX)/framebuffer.o | ||||
|   | ||||
							
								
								
									
										42
									
								
								kernel/atags.c
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										42
									
								
								kernel/atags.c
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,42 @@ | ||||
| #include <atags.h> | ||||
|  | ||||
| extern uint32 atags_ptr; | ||||
|  | ||||
| int atag_valid(struct atag *a) { | ||||
| 	switch (a->tag) { | ||||
| 		case ATAG_NONE: | ||||
| 			return a->size == 2; | ||||
| 		case ATAG_CORE: | ||||
| 			return a->size == 2 || a->size == 5; | ||||
| 		case ATAG_MEM: | ||||
| 		case ATAG_INITRD2: | ||||
| 		case ATAG_SERIAL: | ||||
| 			return a->size == 4; | ||||
| 		case ATAG_VIDEOTEXT: | ||||
| 		case ATAG_RAMDISK: | ||||
| 			return a->size == 5; | ||||
| 		case ATAG_REVISION: | ||||
| 			return a->size == 3; | ||||
| 		case ATAG_VIDEOLFB: | ||||
| 			return a->size == 8; | ||||
| 		case ATAG_CMDLINE: | ||||
| 			return a->size >= 3; | ||||
| 		default: | ||||
| 			return 0; | ||||
| 	} | ||||
| } | ||||
|  | ||||
| int _atags_mem_region(struct atag **mem_header, int initialize) { | ||||
| 	if (initialize) | ||||
| 		*mem_header = (struct atag *)atags_ptr; | ||||
| 	else | ||||
| 		*mem_header = (struct atag *)((uint32 *)*mem_header + (*mem_header)->size); | ||||
|  | ||||
| 	while (atag_valid(*mem_header) && (*mem_header)->tag != ATAG_NONE && (*mem_header)->tag != ATAG_MEM) { | ||||
| 		*mem_header = (struct atag *)((uint32 *)*mem_header + (*mem_header)->size); | ||||
| 	} | ||||
|  | ||||
| 	if (!atag_valid(*mem_header) || (*mem_header)->tag == ATAG_NONE) | ||||
| 		return -1; | ||||
| 	return 0; | ||||
| } | ||||
							
								
								
									
										15
									
								
								kernel/mm.c
									
									
									
									
									
								
							
							
						
						
									
										15
									
								
								kernel/mm.c
									
									
									
									
									
								
							| @@ -32,12 +32,13 @@ void mm_add_free_region(void *start, void *end) { | ||||
| 	void *page; | ||||
|  | ||||
| 	//make sure both start and end address are aligned to the size of a page | ||||
| 	if ((unsigned int)start & MM_PAGE_SIZE || (unsigned int)(end+1) & MM_PAGE_SIZE) { | ||||
| 		print("Error: Supplied memory area(%x,%x) is not aligned to the page size (%d)\n", (unsigned int)start, (unsigned int)end, MM_PAGE_SIZE); | ||||
| 		return; | ||||
| 	} | ||||
| 	if ((char *)end - (char *)start < MM_PAGE_SIZE<<1) { | ||||
| 		print("Error: Supplied memory area(%x,%x) is not aligned to the page size (%d)\n", (unsigned int)start, (unsigned int)end, MM_PAGE_SIZE); | ||||
| 	if ((unsigned int)start % MM_PAGE_SIZE != 0) | ||||
| 		start = (char*)start + (MM_PAGE_SIZE - ((unsigned int)start % MM_PAGE_SIZE)); | ||||
| 	if (((unsigned int)end + 1) % MM_PAGE_SIZE != 0) | ||||
| 		end = (char*)end - ((unsigned int)end + 1) % MM_PAGE_SIZE; | ||||
|  | ||||
| 	if ((char *)end + 1 - (char *)start < MM_PAGE_SIZE<<1) { | ||||
| 		print("Error: Supplied memory area(%x,%x) is smaller than the page size (%d)\n", (unsigned int)start, (unsigned int)end, MM_PAGE_SIZE); | ||||
| 		return; | ||||
| 	} | ||||
|  | ||||
| @@ -46,7 +47,7 @@ void mm_add_free_region(void *start, void *end) { | ||||
|  | ||||
| 	//TODO we're potentially losing memory here because we're calculating | ||||
| 	//the number of page structs we need even for those pages that will contain only page structs | ||||
| 	num_pages = ((char *)end - (char *)start) / MM_PAGE_SIZE; | ||||
| 	num_pages = ((char *)end + 1 - (char *)start) / MM_PAGE_SIZE; | ||||
| 	usable_pages = num_pages - num_pages * sizeof(struct page) / MM_PAGE_SIZE; | ||||
| 	if (num_pages * sizeof(struct page) % MM_PAGE_SIZE) | ||||
| 		usable_pages--; | ||||
|   | ||||
							
								
								
									
										64
									
								
								kernel/mmu.c
									
									
									
									
									
								
							
							
						
						
									
										64
									
								
								kernel/mmu.c
									
									
									
									
									
								
							| @@ -1,4 +1,6 @@ | ||||
| #include <print.h> | ||||
| #include <types.h> | ||||
| #include <mm.h> | ||||
|  | ||||
| #define SCTLR 15,0,1,0,0 | ||||
| #define TTBR0 15,0,2,0,0 | ||||
| @@ -11,23 +13,29 @@ | ||||
| #define cp_write(var, ...)  _cp_write(var, __VA_ARGS__) | ||||
|  | ||||
| #define TT_BASE_SIZE (1<<14) /* 16k */ | ||||
| #define TT_SECTION_SIZE (1<<20) /* 1mb */ | ||||
|  | ||||
| unsigned int *kernel_start_phys, *kernel_start_virt, *kernel_end_phys, *kernel_end_virt; | ||||
| uint32 *kernel_start_phys, *kernel_start_virt, *kernel_end_phys, *kernel_end_virt; | ||||
|  | ||||
| void print_mapping(void *addr) { | ||||
| 	extern uint32 tt_base_virtual; | ||||
| 	print("%x: %x\n", addr, *(uint32 *)(tt_base_virtual + (((uint32)addr)>>18))); | ||||
| } | ||||
|  | ||||
| void mmu_reinit() { | ||||
| 	extern unsigned int tt_base_virtual, tt_base_physical, start; | ||||
| 	unsigned int curr_addr; | ||||
| 	unsigned int *curr_tt_entry; | ||||
| 	extern uint32 tt_base_virtual, tt_base_physical, start; | ||||
| 	uint32 curr_addr; | ||||
| 	uint32 *curr_tt_entry; | ||||
| 	int virt_phys_offset; | ||||
|  | ||||
| 	virt_phys_offset = tt_base_virtual - tt_base_physical; | ||||
| 	kernel_start_virt = &start; | ||||
| 	kernel_start_phys = kernel_start_virt - virt_phys_offset/4; | ||||
| 	kernel_end_virt = (unsigned int *)(tt_base_virtual + TT_BASE_SIZE); | ||||
| 	kernel_end_phys = (unsigned int *)(tt_base_physical + TT_BASE_SIZE); | ||||
| 	kernel_end_virt = (uint32 *)(tt_base_virtual + TT_BASE_SIZE); | ||||
| 	kernel_end_phys = (uint32 *)(tt_base_physical + TT_BASE_SIZE); | ||||
|  | ||||
| 	//get the current translation table base address | ||||
| 	curr_tt_entry = (unsigned int *)tt_base_virtual; | ||||
| 	curr_tt_entry = (uint32 *)tt_base_virtual; | ||||
|  | ||||
| 	//do first loop iteration outside the loop, because we have to check against wrapping back around to know we're done | ||||
| 	*curr_tt_entry = 0xc02; /* 0xc02 means read/write at any priviledge level, and that it's a section w/o PXN bit set */ | ||||
| @@ -38,9 +46,9 @@ void mmu_reinit() { | ||||
| 	//memory, make sure we keep those mappings correct, and we'll actually | ||||
| 	//swap the twp mappings so all of memory is addressable. | ||||
| 	for (curr_addr = 0x00100000; curr_addr != 0; curr_addr += 0x00100000) { | ||||
| 		if ((unsigned int *)curr_addr >= kernel_start_phys && (unsigned int *)curr_addr < kernel_end_phys) { | ||||
| 		if ((uint32 *)curr_addr >= kernel_start_phys && (uint32 *)curr_addr < kernel_end_phys) { | ||||
| 			*curr_tt_entry = (curr_addr + virt_phys_offset) | 0xc02; | ||||
| 		} else if ((unsigned int *)curr_addr >= kernel_start_virt && (unsigned int *)curr_addr < kernel_end_virt) { | ||||
| 		} else if ((uint32 *)curr_addr >= kernel_start_virt && (uint32 *)curr_addr < kernel_end_virt) { | ||||
| 			*curr_tt_entry = (curr_addr - virt_phys_offset) | 0xc02; | ||||
| 		} else { | ||||
| 			*curr_tt_entry = curr_addr | 0xc02; | ||||
| @@ -48,3 +56,41 @@ void mmu_reinit() { | ||||
| 		curr_tt_entry++; | ||||
| 	} | ||||
| } | ||||
|  | ||||
| int mmu_region_contains(void *lower_a, void *upper_a, void *lower_b, void *upper_b) { | ||||
| 	return lower_b >= lower_a && upper_b <= upper_a; | ||||
| } | ||||
|  | ||||
| #define section_round_down(ptr) (((uint32)ptr) & ~(TT_SECTION_SIZE-1)) | ||||
| #define section_round_up(ptr) (((((uint32)ptr) & ~1) + (TT_SECTION_SIZE-1) ) & ~(TT_SECTION_SIZE-1)) | ||||
|  | ||||
| /* Called one per physical memory region by bootup code. This function is | ||||
|  * responsible for only adding (via mm_add_free_region()) those parts of the | ||||
|  * memory region which are still available (i.e. aren't in the kernel and | ||||
|  * haven't been remapped anywhere else. */ | ||||
| void declare_memory_region(void *lower, void *upper) { | ||||
| 	void *k_section_start_phys = (void *)section_round_down(kernel_start_phys); | ||||
| 	void *k_section_end_phys = (void *)(section_round_up(kernel_end_phys) - 1); | ||||
| 	void *k_section_start_virt = (void *)section_round_down(kernel_start_virt); | ||||
| 	void *k_section_end_virt = (void *)(section_round_up(kernel_end_virt) - 1); | ||||
|  | ||||
| 	if (upper - lower < 1) { | ||||
| 		print("Warning: declare_memory_region() called with lower=%x, upper=%x. Ignoring.\n", lower, upper); | ||||
| 		return; | ||||
| 	} | ||||
|  | ||||
| 	//TODO It's possible (though highly unlikely) that the kernel (virtual) | ||||
| 	//is split across two different memory regions. We should probably | ||||
| 	//handle this. | ||||
| 	if (mmu_region_contains(lower, upper, k_section_start_phys, k_section_end_phys)) { | ||||
| 		//Don't map any of the physical kernel's memory | ||||
| 		declare_memory_region(lower, (void *) ((char *)k_section_start_phys - 1)); | ||||
| 		declare_memory_region((void *) ((char *)k_section_end_phys + 1), upper); | ||||
| 		mm_add_free_region(kernel_end_virt, k_section_end_virt); | ||||
| 	} else if (mmu_region_contains(lower, upper, k_section_start_virt, k_section_end_virt)) { | ||||
| 		declare_memory_region(lower, (void *) ((char *)k_section_start_virt - 1)); | ||||
| 		declare_memory_region((void *) ((char *)k_section_end_virt + 1), upper); | ||||
| 	} else { | ||||
| 		mm_add_free_region(lower, upper); | ||||
| 	} | ||||
| } | ||||
|   | ||||
| @@ -1,3 +1,4 @@ | ||||
| #include <atags.h> | ||||
| #include <mmu.h> | ||||
| #include <mm.h> | ||||
| #include <print.h> | ||||
| @@ -52,7 +53,8 @@ void test_memory() { | ||||
| } | ||||
|   | ||||
| int main(void) { | ||||
| 	char *lower; | ||||
| 	char *lower, *upper; | ||||
| 	struct atag *atags; | ||||
|  | ||||
| 	//setup MMU | ||||
| 	mmu_reinit(); | ||||
| @@ -61,12 +63,17 @@ int main(void) { | ||||
|  | ||||
| 	//setup memory | ||||
| 	mm_init(); | ||||
| 	mm_add_free_region((void*)0x60000000, (void*)0x7FFFFFFF); | ||||
| 	mm_add_free_region((void*)0x80000000, (void*)0x800FFFFF); | ||||
| 	lower = (char*) &kernel_end_virt; | ||||
| 	if ((unsigned int)lower % MM_PAGE_SIZE != 0) | ||||
| 		lower += (MM_PAGE_SIZE - ((unsigned int)lower % MM_PAGE_SIZE)); | ||||
| 	mm_add_free_region((void*)lower, (void*)0x9FFFFFFF); //subtract the memory used by the kernel | ||||
|  | ||||
| 	if (atags_first_mem_region(&atags)) { | ||||
| 		print("Error: atags must contain at least one memory region\n"); | ||||
| 		return -1; | ||||
| 	} | ||||
|  | ||||
| 	do { | ||||
| 		lower = (char *)atags->data.mem.start; | ||||
| 		upper = lower + atags->data.mem.size - 1; | ||||
| 		declare_memory_region(lower, upper); | ||||
| 	} while (!atags_next_mem_region(&atags)); | ||||
|  | ||||
| 	test_memory(); | ||||
|  | ||||
|   | ||||
		Reference in New Issue
	
	Block a user