blob: 010f3f69fe43139f5b1cfc694f3c717c07f5a082 [file] [log] [blame]
Martin Roth9b1b3352016-02-24 12:27:06 -08001/*
2 * MemTest86+ V5 Specific code (GPL V2.0)
3 * By Samuel DEMEULEMEESTER, sdemeule@memtest.org
4 * http://www.canardpc.com - http://www.memtest.org
5 * ------------------------------------------------
6 * main.c - MemTest-86 Version 3.5
7 *
8 * Released under version 2 of the Gnu Public License.
9 * By Chris Brady
10 */
Martin Roth4dcd13d2016-02-24 13:53:07 -080011
Martin Roth9b1b3352016-02-24 12:27:06 -080012#include "stdint.h"
13#include "stddef.h"
14#include "test.h"
15#include "defs.h"
16#include "cpuid.h"
17#include "smp.h"
18#include "config.h"
Martin Roth8cc1aeb2016-02-24 13:03:52 -080019#include "multiboot.h"
Martin Roth9b1b3352016-02-24 12:27:06 -080020#undef TEST_TIMES
21#define DEFTESTS 9
22#define FIRST_DIVISER 3
23
24/* The main stack is allocated during boot time. The stack size should
25 * preferably be a multiple of page size(4Kbytes)
Ben Gardner90f7d112016-03-15 15:25:22 -050026 */
Martin Roth9b1b3352016-02-24 12:27:06 -080027
28extern struct cpu_ident cpu_id;
29extern char toupper(char c);
30extern int isxdigit(char c);
31extern void reboot();
32extern void bzero();
33extern void smp_set_ordinal(int me, int ord);
34extern int smp_my_ord_num(int me);
35extern int smp_ord_to_cpu(int me);
36extern void get_cpuid();
37extern void initialise_cpus();
38extern ulong rand(int cpu);
39extern void get_mem_speed(int cpu, int ncpus);
40extern void rand_seed(unsigned int seed1, unsigned int seed2, int cpu);
41extern struct barrier_s *barr;
42extern int num_cpus;
43extern int act_cpus;
44
Martin Roth8cc1aeb2016-02-24 13:03:52 -080045extern struct multiboot_info *mbiptr;
46
Martin Roth9b1b3352016-02-24 12:27:06 -080047static int find_ticks_for_test(int test);
48void find_ticks_for_pass(void);
49int find_chunks(int test);
50static void test_setup(void);
51static int compute_segments(struct pmap map, int cpu);
52int do_test(int ord);
53struct tseq tseq[] = {
Ben Gardner90f7d112016-03-15 15:25:22 -050054 { 1, -1, 0, 6, 0, "[Address test, walking ones, no cache] " },
55 { 1, -1, 1, 6, 0, "[Address test, own address Sequential] " },
56 { 1, 32, 2, 6, 0, "[Address test, own address Parallel] " },
57 { 1, 32, 3, 6, 0, "[Moving inversions, 1s & 0s Parallel] " },
58 { 1, 32, 5, 3, 0, "[Moving inversions, 8 bit pattern] " },
59 { 1, 32, 6, 30, 0, "[Moving inversions, random pattern] " },
60 { 1, 32, 7, 81, 0, "[Block move] " },
61 { 1, 1, 8, 3, 0, "[Moving inversions, 32 bit pattern] " },
62 { 1, 32, 9, 48, 0, "[Random number sequence] " },
63 { 1, 32, 10, 6, 0, "[Modulo 20, Random pattern] " },
64 { 1, 1, 11, 240, 0, "[Bit fade test, 2 patterns] " },
65 { 1, 0, 0, 0, 0, NULL }
Martin Roth9b1b3352016-02-24 12:27:06 -080066};
67
68volatile int mstr_cpu;
69volatile int run_cpus;
70volatile int cpu_ord=0;
71int maxcpus=MAX_CPUS;
72volatile short cpu_sel;
73volatile short cpu_mode;
74char cpu_mask[MAX_CPUS];
75long bin_mask=0xffffffff;
76short onepass;
77volatile short btflag = 0;
78volatile int test;
79short restart_flag;
80bool reloc_pending = FALSE;
81uint8_t volatile stacks[MAX_CPUS][STACKSIZE];
82int bitf_seq = 0;
83char cmdline_parsed = 0;
84struct vars variables = {};
85struct vars * const v = &variables;
86volatile int bail;
87int nticks;
88int test_ticks;
89volatile int segs;
90static int ltest;
91static int pass_flag = 0;
92volatile short start_seq = 0;
93static int c_iter;
94ulong high_test_adr;
95volatile static int window;
96volatile static unsigned long win_next;
97volatile static ulong win0_start; /* Start test address for window 0 */
98volatile static ulong win1_end; /* End address for relocation */
99volatile static struct pmap winx; /* Window struct for mapping windows */
100
101/* Find the next selected test to run */
Martin Roth69593432016-03-27 19:46:03 -0600102void next_test(void)
Martin Roth9b1b3352016-02-24 12:27:06 -0800103{
104 test++;
105 while (tseq[test].sel == 0 && tseq[test].cpu_sel != 0) {
Ben Gardner90f7d112016-03-15 15:25:22 -0500106 test++;
Martin Roth9b1b3352016-02-24 12:27:06 -0800107 }
108
109 if (tseq[test].cpu_sel == 0) {
Ben Gardner90f7d112016-03-15 15:25:22 -0500110 /* We hit the end of the list so we completed a pass */
111 pass_flag++;
112 /* Find the next test to run, start searching from 0 */
113 test = 0;
114 while (tseq[test].sel == 0 && tseq[test].cpu_sel != 0) {
115 test++;
116 }
Martin Roth9b1b3352016-02-24 12:27:06 -0800117 }
118}
119
120/* Set default values for all parameters */
Martin Roth69593432016-03-27 19:46:03 -0600121void set_defaults(void)
Martin Roth9b1b3352016-02-24 12:27:06 -0800122{
123 int i;
124
125 if (start_seq == 2) {
126 /* This is a restart so we reset everything */
127 onepass = 0;
128 i = 0;
129 while (tseq[i].cpu_sel) {
130 tseq[i].sel = 1;
131 i++;
132 }
133 test = 0;
134 if (tseq[0].sel == 0) {
135 next_test();
136 }
137 }
138 ltest = -1;
139 win_next = 0;
140 window = 0;
141 bail = 0;
142 cpu_mode = CPM_ALL;
143 cpu_sel = 0;
144 v->printmode=PRINTMODE_ADDRESSES;
145 v->numpatn=0;
146 v->plim_lower = 0;
147 v->plim_upper = v->pmap[v->msegs-1].end;
148 v->pass = 0;
149 v->msg_line = 0;
150 v->ecount = 0;
151 v->ecc_ecount = 0;
152 v->msg_line = LINE_SCROLL-1;
153 v->scroll_start = v->msg_line * 160;
154 v->erri.low_addr.page = 0x7fffffff;
155 v->erri.low_addr.offset = 0xfff;
156 v->erri.high_addr.page = 0;
157 v->erri.high_addr.offset = 0;
158 v->erri.min_bits = 32;
159 v->erri.max_bits = 0;
160 v->erri.min_bits = 32;
161 v->erri.max_bits = 0;
162 v->erri.maxl = 0;
163 v->erri.cor_err = 0;
164 v->erri.ebits = 0;
165 v->erri.hdr_flag = 0;
166 v->erri.tbits = 0;
167 for (i=0; tseq[i].msg != NULL; i++) {
168 tseq[i].errors = 0;
169 }
170 restart_flag = 0;
171 tseq[10].sel = 0;
172}
173
174/* Boot trace function */
175short tidx = 25;
176void btrace(int me, int line, char *msg, int wait, long v1, long v2)
177{
178 int y, x;
179
180 /* Is tracing turned on? */
181 if (btflag == 0) return;
182
183 spin_lock(&barr->mutex);
184 y = tidx%13;
185 x = tidx/13*40;
186 cplace(y+11, x+1, ' ');
187 if (++tidx > 25) {
188 tidx = 0;
189 }
190 y = tidx%13;
191 x = tidx/13*40;
192
193 cplace(y+11, x+1, '>');
194 dprint(y+11, x+2, me, 2, 0);
195 dprint(y+11, x+5, line, 4, 0);
196 cprint(y+11, x+10, msg);
197 hprint(y+11, x+22, v1);
198 hprint(y+11, x+31, v2);
199 if (wait) {
200 wait_keyup();
201 }
202 spin_unlock(&barr->mutex);
203}
204
205/* Relocate the test to a new address. Be careful to not overlap! */
206static void run_at(unsigned long addr, int cpu)
207{
208 ulong *ja = (ulong *)(addr + startup_32 - _start);
209
210 /* CPU 0, Copy memtest86+ code */
211 if (cpu == 0) {
212 memmove((void *)addr, &_start, _end - _start);
213 }
214
215 /* Wait for the copy */
216 barrier();
217
218 /* We use a lock to insure that only one CPU at a time jumps to
219 * the new code. Some of the startup stuff is not thread safe! */
Ben Gardner90f7d112016-03-15 15:25:22 -0500220 spin_lock(&barr->mutex);
Martin Roth9b1b3352016-02-24 12:27:06 -0800221
222 /* Jump to the start address */
Paul Menzel1fa69862017-01-28 01:31:20 +0100223 /* Rewrite `goto *ja;` for Clang. */
224 typedef void fn(void);
225 fn *volatile fp = (fn*)ja;
226 fp();
Martin Roth9b1b3352016-02-24 12:27:06 -0800227}
228
229/* Switch from the boot stack to the main stack. First the main stack
230 * is allocated, then the contents of the boot stack are copied, then
Martin Roth4dcd13d2016-02-24 13:53:07 -0800231 * ESP is adjusted to point to the new stack.
Martin Roth9b1b3352016-02-24 12:27:06 -0800232 */
233static void
234switch_to_main_stack(unsigned cpu_num)
235{
236 extern uintptr_t boot_stack;
Martin Roth4dcd13d2016-02-24 13:53:07 -0800237 extern uintptr_t boot_stack_top;
Martin Roth9b1b3352016-02-24 12:27:06 -0800238 uintptr_t *src, *dst;
239 int offs;
240 uint8_t * stackAddr, *stackTop;
Martin Roth4dcd13d2016-02-24 13:53:07 -0800241
Martin Roth9b1b3352016-02-24 12:27:06 -0800242 stackAddr = (uint8_t *) &stacks[cpu_num][0];
243
244 stackTop = stackAddr + STACKSIZE;
Martin Roth4dcd13d2016-02-24 13:53:07 -0800245
Martin Roth9b1b3352016-02-24 12:27:06 -0800246 src = (uintptr_t*)&boot_stack_top;
247 dst = (uintptr_t*)stackTop;
248 do {
249 src--; dst--;
250 *dst = *src;
251 } while ((uintptr_t *)src > (uintptr_t *)&boot_stack);
252
253 offs = (uint8_t *)&boot_stack_top - stackTop;
254 __asm__ __volatile__ (
Ben Gardner90f7d112016-03-15 15:25:22 -0500255 "subl %%eax, %%esp"
Martin Roth9b1b3352016-02-24 12:27:06 -0800256 : /*no output*/
Martin Roth4dcd13d2016-02-24 13:53:07 -0800257 : "a" (offs) : "memory"
Martin Roth9b1b3352016-02-24 12:27:06 -0800258 );
259}
260
261void reloc_internal(int cpu)
262{
263 /* clear variables */
Ben Gardner90f7d112016-03-15 15:25:22 -0500264 reloc_pending = FALSE;
Martin Roth9b1b3352016-02-24 12:27:06 -0800265
266 run_at(LOW_TEST_ADR, cpu);
267}
268
269void reloc(void)
270{
271 bail++;
Ben Gardner90f7d112016-03-15 15:25:22 -0500272 reloc_pending = TRUE;
Martin Roth9b1b3352016-02-24 12:27:06 -0800273}
274
275/* command line passing using the 'old' boot protocol */
Ben Gardner90f7d112016-03-15 15:25:22 -0500276#define MK_PTR(seg, off) ((void*)(((unsigned long)(seg) << 4) + (off)))
277#define OLD_CL_MAGIC_ADDR ((unsigned short*) MK_PTR(INITSEG, 0x20))
Martin Roth4dcd13d2016-02-24 13:53:07 -0800278#define OLD_CL_MAGIC 0xA33F
Ben Gardner90f7d112016-03-15 15:25:22 -0500279#define OLD_CL_OFFSET_ADDR ((unsigned short*) MK_PTR(INITSEG, 0x22))
Martin Roth9b1b3352016-02-24 12:27:06 -0800280
281static void parse_command_line(void)
282{
283 long simple_strtoul(char *cmd, char *ptr, int base);
284 char *cp, dummy;
285 int i, j, k;
286
287 if (cmdline_parsed)
288 return;
289
290 /* Fill in the cpu mask array with the default */
291 for (i=0; i<MAX_CPUS; i++) {
292 cpu_mask[i] = 1;
293 }
294
Martin Roth8cc1aeb2016-02-24 13:03:52 -0800295 if (mbiptr && (mbiptr->flags & MULTIBOOT_INFO_CMDLINE)) {
296 cp = (void *) mbiptr->cmdline;
297 } else {
298 if (*OLD_CL_MAGIC_ADDR != OLD_CL_MAGIC)
299 return;
Martin Roth9b1b3352016-02-24 12:27:06 -0800300
Martin Roth8cc1aeb2016-02-24 13:03:52 -0800301 unsigned short offset = *OLD_CL_OFFSET_ADDR;
302 cp = MK_PTR(INITSEG, offset);
303 }
Martin Roth9b1b3352016-02-24 12:27:06 -0800304
305 /* skip leading spaces */
306 while (*cp == ' ')
307 cp++;
308
309 while (*cp) {
310 if (!strncmp(cp, "console=", 8)) {
311 cp += 8;
312 serial_console_setup(cp);
313 }
314 /* Enable boot trace? */
315 if (!strncmp(cp, "btrace", 6)) {
316 cp += 6;
317 btflag++;
318 }
319 /* Limit number of CPUs */
320 if (!strncmp(cp, "maxcpus=", 8)) {
321 cp += 8;
322 maxcpus=(int)simple_strtoul(cp, &dummy, 10);
323 }
324 /* Run one pass and exit if there are no errors */
325 if (!strncmp(cp, "onepass", 7)) {
326 cp += 7;
327 onepass++;
328 }
329 /* Setup a list of tests to run */
330 if (!strncmp(cp, "tstlist=", 8)) {
331 cp += 8;
332 /* Clear all of the tests first */
333 k = 0;
334 while (tseq[k].cpu_sel) {
335 tseq[k].sel = 0;
336 k++;
337 }
338
339 /* Now enable all of the tests in the list */
340 j = 0;
Ben Gardner90f7d112016-03-15 15:25:22 -0500341 while (*cp && isdigit(*cp)) {
342 i = *cp-'0';
343 j = j*10 + i;
344 cp++;
345 if (*cp == ',' || !isdigit(*cp)) {
346 if (j < k) {
347 tseq[j].sel = 1;
348 }
349 if (*cp != ',') break;
350 j = 0;
351 cp++;
Martin Roth9b1b3352016-02-24 12:27:06 -0800352 }
Martin Roth9b1b3352016-02-24 12:27:06 -0800353 }
354 }
355 /* Set a CPU mask to select CPU's to use for testing */
356 if (!strncmp(cp, "cpumask=", 8)) {
Ben Gardner90f7d112016-03-15 15:25:22 -0500357 cp += 8;
358 if (cp[0] == '0' && toupper(cp[1]) == 'X') cp += 2;
359 while (*cp && *cp != ' ' && isxdigit(*cp)) {
360 i = isdigit(*cp) ? *cp-'0' : toupper(*cp)-'A'+10;
361 bin_mask = bin_mask * 16 + i;
362 cp++;
Martin Roth9b1b3352016-02-24 12:27:06 -0800363 }
Ben Gardner90f7d112016-03-15 15:25:22 -0500364 /* Force CPU zero to always be selected */
365 bin_mask |= 1;
366 for (i=0; i<32; i++) {
367 if (((bin_mask>>i) & 1) == 0) {
368 cpu_mask[i] = 0;
369 }
370 }
Martin Roth9b1b3352016-02-24 12:27:06 -0800371 }
372 /* go to the next parameter */
373 while (*cp && *cp != ' ') cp++;
374 while (*cp == ' ') cp++;
375 }
376
377 cmdline_parsed = 1;
378}
379
Martin Roth69593432016-03-27 19:46:03 -0600380void clear_screen(void)
Martin Roth9b1b3352016-02-24 12:27:06 -0800381{
382 int i;
383 char *pp;
384
385 /* Clear screen & set background to blue */
Ben Gardner90f7d112016-03-15 15:25:22 -0500386 for (i=0, pp=(char *)(SCREEN_ADR); i<80*25; i++) {
Martin Roth9b1b3352016-02-24 12:27:06 -0800387 *pp++ = ' ';
388 *pp++ = 0x17;
389 }
390 if (btflag) {
Ben Gardner90f7d112016-03-15 15:25:22 -0500391 cprint(1, 0, "Boot Trace Enabled");
392 cprint(1, 0, "Press any key to advance to next trace point");
393 cprint(9, 1, "CPU Line Message Param #1 Param #2 CPU Line Message Param #1 Param #2");
394 cprint(10, 1, "--- ---- ----------- -------- -------- --- ---- ----------- -------- --------");
Martin Roth9b1b3352016-02-24 12:27:06 -0800395 }
Martin Roth9b1b3352016-02-24 12:27:06 -0800396}
Ben Gardner90f7d112016-03-15 15:25:22 -0500397
Martin Roth9b1b3352016-02-24 12:27:06 -0800398/* This is the test entry point. We get here on statup and also whenever
399 * we relocate. */
400void test_start(void)
401{
402 int my_cpu_num, my_cpu_ord, run;
403
404 /* If this is the first time here we are CPU 0 */
405 if (start_seq == 0) {
406 my_cpu_num = 0;
Martin Roth37665722016-02-24 13:37:04 -0800407 if ((ulong)&_start != LOW_TEST_ADR) {
408 run_at(LOW_TEST_ADR, 0);
409 }
Martin Roth9b1b3352016-02-24 12:27:06 -0800410 } else {
411 my_cpu_num = smp_my_cpu_num();
412 }
413 /* First thing, switch to main stack */
414 switch_to_main_stack(my_cpu_num);
415
416 /* First time (for this CPU) initialization */
417 if (start_seq < 2) {
Ben Gardner90f7d112016-03-15 15:25:22 -0500418 /* These steps are only done by the boot cpu */
419 if (my_cpu_num == 0) {
420 my_cpu_ord = cpu_ord++;
421 smp_set_ordinal(my_cpu_num, my_cpu_ord);
422 parse_command_line();
423 clear_screen();
424 /* Initialize the barrier so the lock in btrace will work.
425 * Will get redone later when we know how many CPUs we have */
426 barrier_init(1);
427 btrace(my_cpu_num, __LINE__, "Begin ", 1, 0, 0);
428 /* Find memory size */
429 mem_size(); /* must be called before initialise_cpus(); */
430 /* Fill in the CPUID table */
431 get_cpuid();
432 /* Startup the other CPUs */
433 start_seq = 1;
434 //initialise_cpus();
435 btrace(my_cpu_num, __LINE__, "BeforeInit", 1, 0, 0);
436 /* Draw the screen and get system information */
437 init();
Martin Roth9b1b3352016-02-24 12:27:06 -0800438
Ben Gardner90f7d112016-03-15 15:25:22 -0500439 /* Set defaults and initialize variables */
440 set_defaults();
Martin Roth9b1b3352016-02-24 12:27:06 -0800441
Ben Gardner90f7d112016-03-15 15:25:22 -0500442 /* Setup base address for testing, 1 MB */
443 win0_start = 0x100;
Martin Roth4dcd13d2016-02-24 13:53:07 -0800444
Ben Gardner90f7d112016-03-15 15:25:22 -0500445 /* Set relocation address to 32Mb if there is enough
446 * memory. Otherwise set it to 3Mb */
447 /* Large reloc addr allows for more testing overlap */
448 if ((ulong)v->pmap[v->msegs-1].end > 0x2f00) {
449 high_test_adr = 0x2000000;
450 } else {
451 high_test_adr = 0x300000;
452 }
453 win1_end = (high_test_adr >> 12);
Martin Roth9b1b3352016-02-24 12:27:06 -0800454
Ben Gardner90f7d112016-03-15 15:25:22 -0500455 /* Adjust the map to not test the page at 939k,
456 * reserved for locks */
457 v->pmap[0].end--;
458
459 find_ticks_for_pass();
460 } else {
461 /* APs only, Register the APs */
462 btrace(my_cpu_num, __LINE__, "AP_Start ", 0, my_cpu_num,
463 cpu_ord);
464 smp_ap_booted(my_cpu_num);
465 /* Asign a sequential CPU ordinal to each active cpu */
466 spin_lock(&barr->mutex);
467 my_cpu_ord = cpu_ord++;
468 smp_set_ordinal(my_cpu_num, my_cpu_ord);
469 spin_unlock(&barr->mutex);
470 btrace(my_cpu_num, __LINE__, "AP_Done ", 0, my_cpu_num,
471 my_cpu_ord);
Martin Roth4dcd13d2016-02-24 13:53:07 -0800472 }
Martin Roth9b1b3352016-02-24 12:27:06 -0800473 } else {
Ben Gardner90f7d112016-03-15 15:25:22 -0500474 /* Unlock after a relocation */
475 spin_unlock(&barr->mutex);
476 /* Get the CPU ordinal since it is lost during relocation */
477 my_cpu_ord = smp_my_ord_num(my_cpu_num);
478 btrace(my_cpu_num, __LINE__, "Reloc_Done", 0, my_cpu_num, my_cpu_ord);
Martin Roth9b1b3352016-02-24 12:27:06 -0800479 }
480
481 /* A barrier to insure that all of the CPUs are done with startup */
482 barrier();
483 btrace(my_cpu_num, __LINE__, "1st Barr ", 1, my_cpu_num, my_cpu_ord);
Martin Roth4dcd13d2016-02-24 13:53:07 -0800484
Martin Roth9b1b3352016-02-24 12:27:06 -0800485
486 /* Setup Memory Management and measure memory speed, we do it here
487 * because we need all of the available CPUs */
488 if (start_seq < 2) {
Ben Gardner90f7d112016-03-15 15:25:22 -0500489 /* Enable floating point processing */
490 if (cpu_id.fid.bits.fpu)
491 __asm__ __volatile__ (
492 "movl %%cr0, %%eax\n\t"
493 "andl $0x7, %%eax\n\t"
494 "movl %%eax, %%cr0\n\t"
495 : :
496 : "ax"
497 );
498 if (cpu_id.fid.bits.sse)
499 __asm__ __volatile__ (
500 "movl %%cr4, %%eax\n\t"
501 "orl $0x00000200, %%eax\n\t"
502 "movl %%eax, %%cr4\n\t"
503 : :
504 : "ax"
505 );
Martin Roth9b1b3352016-02-24 12:27:06 -0800506
Ben Gardner90f7d112016-03-15 15:25:22 -0500507 btrace(my_cpu_num, __LINE__, "Mem Mgmnt ", 1, cpu_id.fid.bits.pae, cpu_id.fid.bits.lm);
508 /* Setup memory management modes */
509 /* If we have PAE, turn it on */
510 if (cpu_id.fid.bits.pae == 1) {
511 __asm__ __volatile__ (
512 "movl %%cr4, %%eax\n\t"
513 "orl $0x00000020, %%eax\n\t"
514 "movl %%eax, %%cr4\n\t"
515 : :
516 : "ax"
517 );
518 cprint(LINE_TITLE+1, COL_MODE, "(PAE Mode)");
519 }
520 /* If this is a 64 CPU enable long mode */
521 if (cpu_id.fid.bits.lm == 1) {
522 __asm__ __volatile__ (
523 "movl $0xc0000080, %%ecx\n\t"
524 "rdmsr\n\t"
525 "orl $0x00000100, %%eax\n\t"
526 "wrmsr\n\t"
527 : :
528 : "ax", "cx"
529 );
530 cprint(LINE_TITLE+1, COL_MODE, "(X64 Mode)");
531 }
532 /* Get the memory Speed with all CPUs */
533 get_mem_speed(my_cpu_num, num_cpus);
Martin Roth9b1b3352016-02-24 12:27:06 -0800534 }
535
536 /* Set the initialized flag only after all of the CPU's have
537 * Reached the barrier. This insures that relocation has
538 * been completed for each CPU. */
539 btrace(my_cpu_num, __LINE__, "Start Done", 1, 0, 0);
540 start_seq = 2;
541
542 /* Loop through all tests */
543 while (1) {
Ben Gardner90f7d112016-03-15 15:25:22 -0500544 /* If the restart flag is set all initial params */
545 if (restart_flag) {
546 set_defaults();
547 continue;
548 }
549 /* Skip single CPU tests if we are using only one CPU */
550 if (tseq[test].cpu_sel == -1 &&
551 (num_cpus == 1 || cpu_mode != CPM_ALL)) {
552 test++;
553 continue;
Martin Roth9b1b3352016-02-24 12:27:06 -0800554 }
555
Ben Gardner90f7d112016-03-15 15:25:22 -0500556 test_setup();
557
558 /* Loop through all possible windows */
559 while (win_next <= ((ulong)v->pmap[v->msegs-1].end + WIN_SZ)) {
560 /* Main scheduling barrier */
561 cprint(8, my_cpu_num+7, "W");
562 btrace(my_cpu_num, __LINE__, "Sched_Barr", 1, window, win_next);
563 barrier();
564
565 /* Don't go over the 8TB PAE limit */
566 if (win_next > MAX_MEM) {
567 break;
568 }
569
570 /* For the bit fade test, #11, we cannot relocate so bump the
571 * window to 1 */
572 if (tseq[test].pat == 11 && window == 0) {
573 window = 1;
574 }
575
576 /* Relocate if required */
577 if (window != 0 && (ulong)&_start != LOW_TEST_ADR) {
578 btrace(my_cpu_num, __LINE__, "Sched_RelL", 1, 0, 0);
579 run_at(LOW_TEST_ADR, my_cpu_num);
580 }
581 if (window == 0 && v->plim_lower >= win0_start) {
582 window++;
583 }
584 if (window == 0 && (ulong)&_start == LOW_TEST_ADR) {
585 btrace(my_cpu_num, __LINE__, "Sched_RelH", 1, 0, 0);
586 run_at(high_test_adr, my_cpu_num);
587 }
588
589 /* Decide which CPU(s) to use */
590 btrace(my_cpu_num, __LINE__, "Sched_CPU0", 1, cpu_sel,
591 tseq[test].cpu_sel);
592 run = 1;
593 switch (cpu_mode) {
594 case CPM_RROBIN:
595 case CPM_SEQ:
596 /* Select a single CPU */
597 if (my_cpu_ord == cpu_sel) {
598 mstr_cpu = cpu_sel;
599 run_cpus = 1;
600 } else {
601 run = 0;
602 }
603 break;
604 case CPM_ALL:
605 /* Use all CPUs */
606 if (tseq[test].cpu_sel == -1) {
607 /* Round robin through all of the CPUs */
608 if (my_cpu_ord == cpu_sel) {
609 mstr_cpu = cpu_sel;
610 run_cpus = 1;
611 } else {
612 run = 0;
613 }
614 } else {
615 /* Use the number of CPUs specified by the test,
616 * Starting with zero */
617 if (my_cpu_ord >= tseq[test].cpu_sel) {
618 run = 0;
619 }
620 /* Set the master CPU to the highest CPU number
621 * that has been selected */
622 if (act_cpus < tseq[test].cpu_sel) {
623 mstr_cpu = act_cpus-1;
624 run_cpus = act_cpus;
625 } else {
626 mstr_cpu = tseq[test].cpu_sel-1;
627 run_cpus = tseq[test].cpu_sel;
628 }
629 }
630 }
631 btrace(my_cpu_num, __LINE__, "Sched_CPU1", 1, run_cpus, run);
632 barrier();
633 dprint(9, 7, run_cpus, 2, 0);
634
635 /* Setup a sub barrier for only the selected CPUs */
636 if (my_cpu_ord == mstr_cpu) {
637 s_barrier_init(run_cpus);
638 }
639
640 /* Make sure the the sub barrier is ready before proceeding */
641 barrier();
642
643 /* Not selected CPUs go back to the scheduling barrier */
644 if (run == 0) {
645 continue;
646 }
647 cprint(8, my_cpu_num+7, "-");
648 btrace(my_cpu_num, __LINE__, "Sched_Win0", 1, window, win_next);
649
650 /* Do we need to exit */
651 if (reloc_pending) {
652 reloc_internal(my_cpu_num);
653 }
654
655 if (my_cpu_ord == mstr_cpu) {
656 switch (window) {
657 /* Special case for relocation */
658 case 0:
659 winx.start = 0;
660 winx.end = win1_end;
661 window++;
662 break;
663 /* Special case for first segment */
664 case 1:
665 winx.start = win0_start;
666 winx.end = WIN_SZ;
667 win_next += WIN_SZ;
668 window++;
669 break;
670 /* For all other windows */
671 default:
672 winx.start = win_next;
673 win_next += WIN_SZ;
674 winx.end = win_next;
675 }
676 btrace(my_cpu_num, __LINE__, "Sched_Win1", 1, winx.start,
677 winx.end);
678
679 /* Find the memory areas to test */
680 segs = compute_segments(winx, my_cpu_num);
681 }
682 s_barrier();
683 btrace(my_cpu_num, __LINE__, "Sched_Win2", 1, segs,
684 v->map[0].pbase_addr);
685
686 if (segs == 0) {
687 /* No memory in this window so skip it */
688 continue;
689 }
690
691 /* map in the window... */
692 if (map_page(v->map[0].pbase_addr) < 0) {
693 /* Either there is no PAE or we are at the PAE limit */
694 break;
695 }
696
697 btrace(my_cpu_num, __LINE__, "Strt_Test ", 1, my_cpu_num,
698 my_cpu_ord);
699 do_test(my_cpu_ord);
700 btrace(my_cpu_num, __LINE__, "End_Test ", 1, my_cpu_num,
701 my_cpu_ord);
702
703 paging_off();
704 } /* End of window loop */
705
706 s_barrier();
707 btrace(my_cpu_num, __LINE__, "End_Win ", 1, test, window);
708
709 /* Setup for the next set of windows */
710 win_next = 0;
711 window = 0;
712 bail = 0;
713
714 /* Only the master CPU does the end of test housekeeping */
715 if (my_cpu_ord != mstr_cpu) {
716 continue;
Martin Roth9b1b3352016-02-24 12:27:06 -0800717 }
718
Ben Gardner90f7d112016-03-15 15:25:22 -0500719 /* Special handling for the bit fade test #11 */
720 if (tseq[test].pat == 11 && bitf_seq != 6) {
721 /* Keep going until the sequence is complete. */
722 bitf_seq++;
723 continue;
724 } else {
725 bitf_seq = 0;
Martin Roth9b1b3352016-02-24 12:27:06 -0800726 }
727
Ben Gardner90f7d112016-03-15 15:25:22 -0500728 /* Select advancement of CPUs and next test */
729 switch (cpu_mode) {
Martin Roth9b1b3352016-02-24 12:27:06 -0800730 case CPM_RROBIN:
Ben Gardner90f7d112016-03-15 15:25:22 -0500731 if (++cpu_sel >= act_cpus) {
732 cpu_sel = 0;
733 }
734 next_test();
735 break;
Martin Roth9b1b3352016-02-24 12:27:06 -0800736 case CPM_SEQ:
Ben Gardner90f7d112016-03-15 15:25:22 -0500737 if (++cpu_sel >= act_cpus) {
738 cpu_sel = 0;
739 next_test();
Martin Roth9b1b3352016-02-24 12:27:06 -0800740 }
741 break;
742 case CPM_ALL:
Ben Gardner90f7d112016-03-15 15:25:22 -0500743 if (tseq[test].cpu_sel == -1) {
744 /* Do the same test for each CPU */
745 if (++cpu_sel >= act_cpus) {
746 cpu_sel = 0;
747 next_test();
748 } else {
749 continue;
750 }
Martin Roth9b1b3352016-02-24 12:27:06 -0800751 } else {
Ben Gardner90f7d112016-03-15 15:25:22 -0500752 next_test();
Martin Roth9b1b3352016-02-24 12:27:06 -0800753 }
Ben Gardner90f7d112016-03-15 15:25:22 -0500754 } //????
755 btrace(my_cpu_num, __LINE__, "Next_CPU ", 1, cpu_sel, test);
Martin Roth9b1b3352016-02-24 12:27:06 -0800756
Ben Gardner90f7d112016-03-15 15:25:22 -0500757 /* If this was the last test then we finished a pass */
758 if (pass_flag) {
Martin Roth9b1b3352016-02-24 12:27:06 -0800759 pass_flag = 0;
Martin Roth4dcd13d2016-02-24 13:53:07 -0800760
Martin Roth9b1b3352016-02-24 12:27:06 -0800761 v->pass++;
Martin Roth4dcd13d2016-02-24 13:53:07 -0800762
Martin Roth9b1b3352016-02-24 12:27:06 -0800763 dprint(LINE_INFO, 49, v->pass, 5, 0);
764 find_ticks_for_pass();
765 ltest = -1;
Martin Roth4dcd13d2016-02-24 13:53:07 -0800766
Ben Gardner90f7d112016-03-15 15:25:22 -0500767 if (v->ecount == 0) {
768 /* If onepass is enabled and we did not get any errors
769 * reboot to exit the test */
770 if (onepass) { reboot(); }
771 if (!btflag) cprint(LINE_MSG, COL_MSG-8, "** Pass complete, no errors, press Esc to exit **");
772 if (BEEP_END_NO_ERROR) {
773 beep(1000);
774 beep(2000);
775 beep(1000);
776 beep(2000);
Martin Roth9b1b3352016-02-24 12:27:06 -0800777 }
Ben Gardner90f7d112016-03-15 15:25:22 -0500778 }
779 }
Martin Roth9b1b3352016-02-24 12:27:06 -0800780
Ben Gardner90f7d112016-03-15 15:25:22 -0500781 bail=0;
Martin Roth9b1b3352016-02-24 12:27:06 -0800782 } /* End test loop */
783}
784
785
Martin Roth69593432016-03-27 19:46:03 -0600786void test_setup(void)
Martin Roth9b1b3352016-02-24 12:27:06 -0800787{
788 static int ltest = -1;
789
790 /* See if a specific test has been selected */
791 if (v->testsel >= 0) {
Ben Gardner90f7d112016-03-15 15:25:22 -0500792 test = v->testsel;
793 }
Martin Roth9b1b3352016-02-24 12:27:06 -0800794
795 /* Only do the setup if this is a new test */
796 if (test == ltest) {
797 return;
798 }
799 ltest = test;
800
801 /* Now setup the test parameters based on the current test number */
802 if (v->pass == 0) {
803 /* Reduce iterations for first pass */
804 c_iter = tseq[test].iter/FIRST_DIVISER;
805 } else {
806 c_iter = tseq[test].iter;
807 }
808
809 /* Set the number of iterations. We only do half of the iterations */
Ben Gardner90f7d112016-03-15 15:25:22 -0500810 /* on the first pass */
Martin Roth9b1b3352016-02-24 12:27:06 -0800811 //dprint(LINE_INFO, 28, c_iter, 3, 0);
812 test_ticks = find_ticks_for_test(test);
813 nticks = 0;
814 v->tptr = 0;
815
816 cprint(LINE_PAT, COL_PAT, " ");
817 cprint(LINE_PAT, COL_PAT-3, " ");
818 dprint(LINE_TST, COL_MID+6, tseq[test].pat, 2, 1);
819 cprint(LINE_TST, COL_MID+9, tseq[test].msg);
820 cprint(2, COL_MID+8, " ");
821}
822
823/* A couple static variables for when all cpus share the same pattern */
824static ulong sp1, sp2;
825
826int do_test(int my_ord)
827{
828 int i=0, j=0;
829 static int bitf_sleep;
830 unsigned long p0=0, p1=0, p2=0;
831
832 if (my_ord == mstr_cpu) {
Ben Gardner90f7d112016-03-15 15:25:22 -0500833 if ((ulong)&_start > LOW_TEST_ADR) {
834 /* Relocated so we need to test all selected lower memory */
835 v->map[0].start = mapping(v->plim_lower);
Martin Roth4dcd13d2016-02-24 13:53:07 -0800836
Ben Gardner90f7d112016-03-15 15:25:22 -0500837 /* Good 'ol Legacy USB_WAR */
838 if (v->map[0].start < (ulong*)0x500) {
839 v->map[0].start = (ulong*)0x500;
840 }
841
842 cprint(LINE_PAT, COL_MID+25, " R");
843 } else {
844 cprint(LINE_PAT, COL_MID+25, " ");
Martin Roth9b1b3352016-02-24 12:27:06 -0800845 }
Martin Roth4dcd13d2016-02-24 13:53:07 -0800846
Ben Gardner90f7d112016-03-15 15:25:22 -0500847 /* Update display of memory segments being tested */
848 p0 = page_of(v->map[0].start);
849 p1 = page_of(v->map[segs-1].end);
850 aprint(LINE_RANGE, COL_MID+9, p0);
851 cprint(LINE_RANGE, COL_MID+14, " - ");
852 aprint(LINE_RANGE, COL_MID+17, p1);
853 aprint(LINE_RANGE, COL_MID+25, p1-p0);
854 cprint(LINE_RANGE, COL_MID+30, " of ");
855 aprint(LINE_RANGE, COL_MID+34, v->selected_pages);
Martin Roth9b1b3352016-02-24 12:27:06 -0800856 }
Martin Roth4dcd13d2016-02-24 13:53:07 -0800857
Ben Gardner90f7d112016-03-15 15:25:22 -0500858 switch (tseq[test].pat) {
Martin Roth9b1b3352016-02-24 12:27:06 -0800859 /* Do the testing according to the selected pattern */
860
861 case 0: /* Address test, walking ones (test #0) */
862 /* Run with cache turned off */
863 set_cache(0);
864 addr_tst1(my_ord);
865 set_cache(1);
866 BAILOUT;
867 break;
868
869 case 1:
870 case 2: /* Address test, own address (test #1, 2) */
871 addr_tst2(my_ord);
872 BAILOUT;
873 break;
874
875 case 3:
Ben Gardner90f7d112016-03-15 15:25:22 -0500876 case 4: /* Moving inversions, all ones and zeros (tests #3, 4) */
Martin Roth9b1b3352016-02-24 12:27:06 -0800877 p1 = 0;
878 p2 = ~p1;
879 s_barrier();
Ben Gardner90f7d112016-03-15 15:25:22 -0500880 movinv1(c_iter, p1, p2, my_ord);
Martin Roth9b1b3352016-02-24 12:27:06 -0800881 BAILOUT;
Martin Roth4dcd13d2016-02-24 13:53:07 -0800882
Martin Roth9b1b3352016-02-24 12:27:06 -0800883 /* Switch patterns */
884 s_barrier();
Ben Gardner90f7d112016-03-15 15:25:22 -0500885 movinv1(c_iter, p2, p1, my_ord);
Martin Roth9b1b3352016-02-24 12:27:06 -0800886 BAILOUT;
887 break;
Martin Roth4dcd13d2016-02-24 13:53:07 -0800888
Martin Roth9b1b3352016-02-24 12:27:06 -0800889 case 5: /* Moving inversions, 8 bit walking ones and zeros (test #5) */
890 p0 = 0x80;
891 for (i=0; i<8; i++, p0=p0>>1) {
892 p1 = p0 | (p0<<8) | (p0<<16) | (p0<<24);
893 p2 = ~p1;
894 s_barrier();
Ben Gardner90f7d112016-03-15 15:25:22 -0500895 movinv1(c_iter, p1, p2, my_ord);
Martin Roth9b1b3352016-02-24 12:27:06 -0800896 BAILOUT;
Martin Roth4dcd13d2016-02-24 13:53:07 -0800897
Martin Roth9b1b3352016-02-24 12:27:06 -0800898 /* Switch patterns */
899 s_barrier();
Ben Gardner90f7d112016-03-15 15:25:22 -0500900 movinv1(c_iter, p2, p1, my_ord);
Ben Gardnerb6621c32016-03-08 08:47:58 -0600901 BAILOUT;
Martin Roth9b1b3352016-02-24 12:27:06 -0800902 }
903 break;
Martin Roth4dcd13d2016-02-24 13:53:07 -0800904
Martin Roth9b1b3352016-02-24 12:27:06 -0800905 case 6: /* Random Data (test #6) */
906 /* Seed the random number generator */
907 if (my_ord == mstr_cpu) {
Ben Gardner90f7d112016-03-15 15:25:22 -0500908 if (cpu_id.fid.bits.rdtsc) {
909 asm __volatile__ ("rdtsc" : "=a" (sp1), "=d" (sp2));
910 } else {
911 sp1 = 521288629 + v->pass;
912 sp2 = 362436069 - v->pass;
913 }
914 rand_seed(sp1, sp2, 0);
Martin Roth9b1b3352016-02-24 12:27:06 -0800915 }
916
917 s_barrier();
918 for (i=0; i < c_iter; i++) {
919 if (my_ord == mstr_cpu) {
920 sp1 = rand(0);
921 sp2 = ~p1;
922 }
923 s_barrier();
Ben Gardner90f7d112016-03-15 15:25:22 -0500924 movinv1(2, sp1, sp2, my_ord);
Martin Roth9b1b3352016-02-24 12:27:06 -0800925 BAILOUT;
926 }
927 break;
928
929
930 case 7: /* Block move (test #7) */
931 block_move(c_iter, my_ord);
932 BAILOUT;
933 break;
934
935 case 8: /* Moving inversions, 32 bit shifting pattern (test #8) */
936 for (i=0, p1=1; p1; p1=p1<<1, i++) {
937 s_barrier();
Ben Gardner90f7d112016-03-15 15:25:22 -0500938 movinv32(c_iter, p1, 1, 0x80000000, 0, i, my_ord);
Ben Gardnerb6621c32016-03-08 08:47:58 -0600939 BAILOUT;
Martin Roth9b1b3352016-02-24 12:27:06 -0800940 s_barrier();
Ben Gardner90f7d112016-03-15 15:25:22 -0500941 movinv32(c_iter, ~p1, 0xfffffffe,
942 0x7fffffff, 1, i, my_ord);
Ben Gardnerb6621c32016-03-08 08:47:58 -0600943 BAILOUT;
Martin Roth9b1b3352016-02-24 12:27:06 -0800944 }
945 break;
946
947 case 9: /* Random Data Sequence (test #9) */
948 for (i=0; i < c_iter; i++) {
949 s_barrier();
950 movinvr(my_ord);
951 BAILOUT;
952 }
953 break;
954
955 case 10: /* Modulo 20 check, Random pattern (test #10) */
956 for (j=0; j<c_iter; j++) {
957 p1 = rand(0);
958 for (i=0; i<MOD_SZ; i++) {
959 p2 = ~p1;
960 s_barrier();
961 modtst(i, 2, p1, p2, my_ord);
Ben Gardnerb6621c32016-03-08 08:47:58 -0600962 BAILOUT;
Martin Roth9b1b3352016-02-24 12:27:06 -0800963
964 /* Switch patterns */
965 s_barrier();
966 modtst(i, 2, p2, p1, my_ord);
Ben Gardnerb6621c32016-03-08 08:47:58 -0600967 BAILOUT;
Martin Roth9b1b3352016-02-24 12:27:06 -0800968 }
969 }
970 break;
971
972 case 11: /* Bit fade test, fill (test #11) */
Ben Gardner90f7d112016-03-15 15:25:22 -0500973 /* Use a sequence to process all windows for each stage */
974 switch (bitf_seq) {
975 case 0: /* Fill all of memory 0's */
Martin Roth9b1b3352016-02-24 12:27:06 -0800976 bit_fade_fill(0, my_ord);
977 bitf_sleep = 1;
978 break;
979 case 1: /* Sleep for the specified time */
980 /* Only sleep once */
981 if (bitf_sleep) {
982 sleep(c_iter, 1, my_ord, 0);
983 bitf_sleep = 0;
984 }
985 break;
986 case 2: /* Now check all of memory for changes */
987 bit_fade_chk(0, my_ord);
988 break;
Ben Gardner90f7d112016-03-15 15:25:22 -0500989 case 3: /* Fill all of memory 1's */
Martin Roth9b1b3352016-02-24 12:27:06 -0800990 bit_fade_fill(-1, my_ord);
991 bitf_sleep = 1;
992 break;
993 case 4: /* Sleep for the specified time */
994 /* Only sleep once */
995 if (bitf_sleep) {
996 sleep(c_iter, 1, my_ord, 0);
997 bitf_sleep = 0;
998 }
999 break;
1000 case 5: /* Now check all of memory for changes */
1001 bit_fade_chk(-1, my_ord);
1002 break;
1003 }
1004 BAILOUT;
1005 break;
1006
1007 case 90: /* Modulo 20 check, all ones and zeros (unused) */
1008 p1=0;
1009 for (i=0; i<MOD_SZ; i++) {
1010 p2 = ~p1;
1011 modtst(i, c_iter, p1, p2, my_ord);
Ben Gardnerb6621c32016-03-08 08:47:58 -06001012 BAILOUT;
Martin Roth9b1b3352016-02-24 12:27:06 -08001013
1014 /* Switch patterns */
1015 p2 = p1;
1016 p1 = ~p2;
Ben Gardner90f7d112016-03-15 15:25:22 -05001017 modtst(i, c_iter, p1, p2, my_ord);
Ben Gardnerb6621c32016-03-08 08:47:58 -06001018 BAILOUT;
Martin Roth9b1b3352016-02-24 12:27:06 -08001019 }
1020 break;
1021
1022 case 91: /* Modulo 20 check, 8 bit pattern (unused) */
1023 p0 = 0x80;
1024 for (j=0; j<8; j++, p0=p0>>1) {
1025 p1 = p0 | (p0<<8) | (p0<<16) | (p0<<24);
1026 for (i=0; i<MOD_SZ; i++) {
1027 p2 = ~p1;
1028 modtst(i, c_iter, p1, p2, my_ord);
Ben Gardnerb6621c32016-03-08 08:47:58 -06001029 BAILOUT;
Martin Roth9b1b3352016-02-24 12:27:06 -08001030
1031 /* Switch patterns */
1032 p2 = p1;
1033 p1 = ~p2;
1034 modtst(i, c_iter, p1, p2, my_ord);
Ben Gardnerb6621c32016-03-08 08:47:58 -06001035 BAILOUT;
Martin Roth9b1b3352016-02-24 12:27:06 -08001036 }
1037 }
1038 break;
1039 }
1040 return(0);
1041}
1042
1043/* Compute number of SPINSZ chunks being tested */
Martin Roth4dcd13d2016-02-24 13:53:07 -08001044int find_chunks(int tst)
Martin Roth9b1b3352016-02-24 12:27:06 -08001045{
1046 int i, j, sg, wmax, ch;
Ben Gardner90f7d112016-03-15 15:25:22 -05001047 struct pmap twin={0, 0};
Martin Roth9b1b3352016-02-24 12:27:06 -08001048 unsigned long wnxt = WIN_SZ;
1049 unsigned long len;
1050
1051 wmax = MAX_MEM/WIN_SZ+2; /* The number of 2 GB segments +2 */
1052 /* Compute the number of SPINSZ memory segments */
1053 ch = 0;
Ben Gardner90f7d112016-03-15 15:25:22 -05001054 for (j = 0; j < wmax; j++) {
Martin Roth9b1b3352016-02-24 12:27:06 -08001055 /* special case for relocation */
1056 if (j == 0) {
1057 twin.start = 0;
1058 twin.end = win1_end;
1059 }
1060
1061 /* special case for first 2 GB */
1062 if (j == 1) {
1063 twin.start = win0_start;
1064 twin.end = WIN_SZ;
1065 }
1066
1067 /* For all other windows */
1068 if (j > 1) {
1069 twin.start = wnxt;
1070 wnxt += WIN_SZ;
1071 twin.end = wnxt;
1072 }
1073
Ben Gardner90f7d112016-03-15 15:25:22 -05001074 /* Find the memory areas I am going to test */
Martin Roth9b1b3352016-02-24 12:27:06 -08001075 sg = compute_segments(twin, -1);
Ben Gardner90f7d112016-03-15 15:25:22 -05001076 for (i = 0; i < sg; i++) {
Martin Roth9b1b3352016-02-24 12:27:06 -08001077 len = v->map[i].end - v->map[i].start;
1078
1079 if (cpu_mode == CPM_ALL && num_cpus > 1) {
Ben Gardner90f7d112016-03-15 15:25:22 -05001080 switch (tseq[tst].pat) {
Martin Roth9b1b3352016-02-24 12:27:06 -08001081 case 2:
1082 case 4:
1083 case 5:
1084 case 6:
1085 case 9:
1086 case 10:
Ben Gardner90f7d112016-03-15 15:25:22 -05001087 len /= act_cpus;
1088 break;
Martin Roth9b1b3352016-02-24 12:27:06 -08001089 case 7:
1090 case 8:
Ben Gardner90f7d112016-03-15 15:25:22 -05001091 len /= act_cpus;
1092 break;
Martin Roth9b1b3352016-02-24 12:27:06 -08001093 }
1094 }
1095 ch += (len + SPINSZ -1)/SPINSZ;
1096 }
1097 }
1098 return(ch);
1099}
1100
1101/* Compute the total number of ticks per pass */
1102void find_ticks_for_pass(void)
1103{
1104 int i;
1105
1106 v->pptr = 0;
1107 v->pass_ticks = 0;
1108 v->total_ticks = 0;
1109 cprint(1, COL_MID+8, " ");
1110 i = 0;
1111 while (tseq[i].cpu_sel != 0) {
1112 /* Skip tests 2 and 4 if we are using 1 cpu */
Martin Roth4dcd13d2016-02-24 13:53:07 -08001113 if (act_cpus == 1 && (i == 2 || i == 4)) {
Ben Gardner90f7d112016-03-15 15:25:22 -05001114 i++;
1115 continue;
Martin Roth9b1b3352016-02-24 12:27:06 -08001116 }
1117 v->pass_ticks += find_ticks_for_test(i);
1118 i++;
1119 }
1120}
1121
1122static int find_ticks_for_test(int tst)
1123{
1124 int ticks=0, c, ch;
1125
1126 if (tseq[tst].sel == 0) {
1127 return(0);
1128 }
1129
1130 /* Determine the number of chunks for this test */
1131 ch = find_chunks(tst);
1132
1133 /* Set the number of iterations. We only do 1/2 of the iterations */
Ben Gardner90f7d112016-03-15 15:25:22 -05001134 /* on the first pass */
Martin Roth9b1b3352016-02-24 12:27:06 -08001135 if (v->pass == 0) {
1136 c = tseq[tst].iter/FIRST_DIVISER;
1137 } else {
1138 c = tseq[tst].iter;
1139 }
1140
Ben Gardner90f7d112016-03-15 15:25:22 -05001141 switch (tseq[tst].pat) {
Martin Roth9b1b3352016-02-24 12:27:06 -08001142 case 0: /* Address test, walking ones */
1143 ticks = 2;
1144 break;
1145 case 1: /* Address test, own address */
1146 case 2:
1147 ticks = 2;
1148 break;
1149 case 3: /* Moving inversions, all ones and zeros */
1150 case 4:
1151 ticks = 2 + 4 * c;
1152 break;
1153 case 5: /* Moving inversions, 8 bit walking ones and zeros */
1154 ticks = 24 + 24 * c;
1155 break;
1156 case 6: /* Random Data */
1157 ticks = c + 4 * c;
1158 break;
1159 case 7: /* Block move */
1160 ticks = (ch + ch/act_cpus + c*ch);
1161 break;
1162 case 8: /* Moving inversions, 32 bit shifting pattern */
1163 ticks = (1 + c * 2) * 64;
1164 break;
1165 case 9: /* Random Data Sequence */
1166 ticks = 3 * c;
1167 break;
1168 case 10: /* Modulo 20 check, Random pattern */
1169 ticks = 4 * 40 * c;
1170 break;
1171 case 11: /* Bit fade test */
1172 ticks = c * 2 + 4 * ch;
1173 break;
1174 case 90: /* Modulo 20 check, all ones and zeros (unused) */
1175 ticks = (2 + c) * 40;
1176 break;
1177 case 91: /* Modulo 20 check, 8 bit pattern (unused) */
1178 ticks = (2 + c) * 40 * 8;
1179 break;
1180 }
1181 if (cpu_mode == CPM_SEQ || tseq[tst].cpu_sel == -1) {
1182 ticks *= act_cpus;
1183 }
1184 if (tseq[tst].pat == 7 || tseq[tst].pat == 11) {
1185 return ticks;
1186 }
1187 return ticks*ch;
1188}
1189
1190static int compute_segments(struct pmap win, int me)
1191{
1192 unsigned long wstart, wend;
1193 int i, sg;
1194
1195 /* Compute the window I am testing memory in */
1196 wstart = win.start;
1197 wend = win.end;
1198 sg = 0;
1199
1200 /* Now reduce my window to the area of memory I want to test */
1201 if (wstart < v->plim_lower) {
1202 wstart = v->plim_lower;
1203 }
1204 if (wend > v->plim_upper) {
1205 wend = v->plim_upper;
1206 }
1207 if (wstart >= wend) {
1208 return(0);
1209 }
1210 /* List the segments being tested */
1211 for (i=0; i< v->msegs; i++) {
1212 unsigned long start, end;
1213 start = v->pmap[i].start;
1214 end = v->pmap[i].end;
1215 if (start <= wstart) {
1216 start = wstart;
1217 }
1218 if (end >= wend) {
1219 end = wend;
1220 }
1221#if 0
1222 cprint(LINE_SCROLL+(2*i), 0, " (");
1223 hprint(LINE_SCROLL+(2*i), 2, start);
1224 cprint(LINE_SCROLL+(2*i), 10, ", ");
1225 hprint(LINE_SCROLL+(2*i), 12, end);
1226 cprint(LINE_SCROLL+(2*i), 20, ") ");
1227
1228 cprint(LINE_SCROLL+(2*i), 22, "r(");
1229 hprint(LINE_SCROLL+(2*i), 24, wstart);
1230 cprint(LINE_SCROLL+(2*i), 32, ", ");
1231 hprint(LINE_SCROLL+(2*i), 34, wend);
1232 cprint(LINE_SCROLL+(2*i), 42, ") ");
1233
1234 cprint(LINE_SCROLL+(2*i), 44, "p(");
1235 hprint(LINE_SCROLL+(2*i), 46, v->plim_lower);
1236 cprint(LINE_SCROLL+(2*i), 54, ", ");
1237 hprint(LINE_SCROLL+(2*i), 56, v->plim_upper);
1238 cprint(LINE_SCROLL+(2*i), 64, ") ");
1239
1240 cprint(LINE_SCROLL+(2*i+1), 0, "w(");
1241 hprint(LINE_SCROLL+(2*i+1), 2, win.start);
1242 cprint(LINE_SCROLL+(2*i+1), 10, ", ");
1243 hprint(LINE_SCROLL+(2*i+1), 12, win.end);
1244 cprint(LINE_SCROLL+(2*i+1), 20, ") ");
1245
1246 cprint(LINE_SCROLL+(2*i+1), 22, "m(");
1247 hprint(LINE_SCROLL+(2*i+1), 24, v->pmap[i].start);
1248 cprint(LINE_SCROLL+(2*i+1), 32, ", ");
1249 hprint(LINE_SCROLL+(2*i+1), 34, v->pmap[i].end);
1250 cprint(LINE_SCROLL+(2*i+1), 42, ") ");
1251
1252 cprint(LINE_SCROLL+(2*i+1), 44, "i=");
1253 hprint(LINE_SCROLL+(2*i+1), 46, i);
Martin Roth4dcd13d2016-02-24 13:53:07 -08001254
1255 cprint(LINE_SCROLL+(2*i+2), 0,
Ben Gardner90f7d112016-03-15 15:25:22 -05001256 " "
1257 " ");
Martin Roth4dcd13d2016-02-24 13:53:07 -08001258 cprint(LINE_SCROLL+(2*i+3), 0,
Ben Gardner90f7d112016-03-15 15:25:22 -05001259 " "
1260 " ");
Martin Roth9b1b3352016-02-24 12:27:06 -08001261#endif
1262 if ((start < end) && (start < wend) && (end > wstart)) {
1263 v->map[sg].pbase_addr = start;
1264 v->map[sg].start = mapping(start);
1265 v->map[sg].end = emapping(end);
1266#if 0
Ben Gardner90f7d112016-03-15 15:25:22 -05001267 hprint(LINE_SCROLL+(sg+1), 0, sg);
1268 hprint(LINE_SCROLL+(sg+1), 12, v->map[sg].pbase_addr);
1269 hprint(LINE_SCROLL+(sg+1), 22, start);
1270 hprint(LINE_SCROLL+(sg+1), 32, end);
1271 hprint(LINE_SCROLL+(sg+1), 42, mapping(start));
1272 hprint(LINE_SCROLL+(sg+1), 52, emapping(end));
1273 cprint(LINE_SCROLL+(sg+2), 0,
1274 " "
1275 " ");
Martin Roth9b1b3352016-02-24 12:27:06 -08001276#endif
1277#if 0
Ben Gardner90f7d112016-03-15 15:25:22 -05001278 cprint(LINE_SCROLL+(2*i+1), 54, ", sg=");
1279 hprint(LINE_SCROLL+(2*i+1), 59, sg);
Martin Roth9b1b3352016-02-24 12:27:06 -08001280#endif
1281 sg++;
1282 }
1283 }
1284 return (sg);
1285}