First Commit of my working state
[simh.git] / VAX / vax_mmu.c
1 /* vax_mmu.c - VAX memory management
2
3 Copyright (c) 1998-2008, Robert M Supnik
4
5 Permission is hereby granted, free of charge, to any person obtaining a
6 copy of this software and associated documentation files (the "Software"),
7 to deal in the Software without restriction, including without limitation
8 the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 and/or sell copies of the Software, and to permit persons to whom the
10 Software is furnished to do so, subject to the following conditions:
11
12 The above copyright notice and this permission notice shall be included in
13 all copies or substantial portions of the Software.
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 ROBERT M SUPNIK BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
19 IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20 CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
21
22 Except as contained in this notice, the name of Robert M Supnik shall not be
23 used in advertising or otherwise to promote the sale, use or other dealings
24 in this Software without prior written authorization from Robert M Supnik.
25
26 28-May-08 RMS Inlined physical memory routines
27 29-Apr-07 RMS Added address masking for system page table reads
28 22-Sep-05 RMS Fixed declarations (from Sterling Garwood)
29 30-Sep-04 RMS Comment and formating changes
30 19-Sep-03 RMS Fixed upper/lower case linkage problems on VMS
31 01-Jun-03 RMS Fixed compilation problem with USE_ADDR64
32
33 This module contains the instruction simulators for
34
35 Read - read virtual
36 Write - write virtual
37 ReadL(P) - read aligned physical longword (physical context)
38 WriteL(P) - write aligned physical longword (physical context)
39 ReadB(W) - read aligned physical byte (word)
40 WriteB(W) - write aligned physical byte (word)
41 Test - test acccess
42
43 zap_tb - clear TB
44 zap_tb_ent - clear TB entry
45 chk_tb_ent - check TB entry
46 set_map_reg - set up working map registers
47 */
48
49 #include "vax_defs.h"
50 #include <setjmp.h>
51
52 typedef struct {
53 int32 tag; /* tag */
54 int32 pte; /* pte */
55 } TLBENT;
56
57 extern uint32 *M;
58 extern const uint32 align[4];
59 extern int32 PSL;
60 extern int32 mapen;
61 extern int32 p1, p2;
62 extern int32 P0BR, P0LR;
63 extern int32 P1BR, P1LR;
64 extern int32 SBR, SLR;
65 extern int32 SISR;
66 extern jmp_buf save_env;
67 extern UNIT cpu_unit;
68
69 int32 d_p0br, d_p0lr; /* dynamic copies */
70 int32 d_p1br, d_p1lr; /* altered per ucode */
71 int32 d_sbr, d_slr;
72 extern int32 mchk_va, mchk_ref; /* for mcheck */
73 TLBENT stlb[VA_TBSIZE], ptlb[VA_TBSIZE];
74 static const int32 insert[4] = {
75 0x00000000, 0x000000FF, 0x0000FFFF, 0x00FFFFFF
76 };
77 static const int32 cvtacc[16] = { 0, 0,
78 TLB_ACCW (KERN)+TLB_ACCR (KERN),
79 TLB_ACCR (KERN),
80 TLB_ACCW (KERN)+TLB_ACCW (EXEC)+TLB_ACCW (SUPV)+TLB_ACCW (USER)+
81 TLB_ACCR (KERN)+TLB_ACCR (EXEC)+TLB_ACCR (SUPV)+TLB_ACCR (USER),
82 TLB_ACCW (KERN)+TLB_ACCW (EXEC)+TLB_ACCR (KERN)+TLB_ACCR (EXEC),
83 TLB_ACCW (KERN)+TLB_ACCR (KERN)+TLB_ACCR (EXEC),
84 TLB_ACCR (KERN)+TLB_ACCR (EXEC),
85 TLB_ACCW (KERN)+TLB_ACCW (EXEC)+TLB_ACCW (SUPV)+
86 TLB_ACCR (KERN)+TLB_ACCR (EXEC)+TLB_ACCR (SUPV),
87 TLB_ACCW (KERN)+TLB_ACCW (EXEC)+
88 TLB_ACCR (KERN)+TLB_ACCR (EXEC)+TLB_ACCR (SUPV),
89 TLB_ACCW (KERN)+TLB_ACCR (KERN)+TLB_ACCR (EXEC)+TLB_ACCR (SUPV),
90 TLB_ACCR (KERN)+TLB_ACCR (EXEC)+TLB_ACCR (SUPV),
91 TLB_ACCW (KERN)+TLB_ACCW (EXEC)+TLB_ACCW (SUPV)+
92 TLB_ACCR (KERN)+TLB_ACCR (EXEC)+TLB_ACCR (SUPV)+TLB_ACCR (USER),
93 TLB_ACCW (KERN)+TLB_ACCW (EXEC)+
94 TLB_ACCR (KERN)+TLB_ACCR (EXEC)+TLB_ACCR (SUPV)+TLB_ACCR (USER),
95 TLB_ACCW (KERN)+
96 TLB_ACCR (KERN)+TLB_ACCR (EXEC)+TLB_ACCR (SUPV)+TLB_ACCR (USER),
97 TLB_ACCR (KERN)+TLB_ACCR (EXEC)+TLB_ACCR (SUPV)+TLB_ACCR (USER)
98 };
99
100 t_stat tlb_ex (t_value *vptr, t_addr addr, UNIT *uptr, int32 sw);
101 t_stat tlb_dep (t_value val, t_addr addr, UNIT *uptr, int32 sw);
102 t_stat tlb_reset (DEVICE *dptr);
103
104 TLBENT fill (uint32 va, int32 lnt, int32 acc, int32 *stat);
105 extern int32 ReadIO (uint32 pa, int32 lnt);
106 extern void WriteIO (uint32 pa, int32 val, int32 lnt);
107 extern int32 ReadReg (uint32 pa, int32 lnt);
108 extern void WriteReg (uint32 pa, int32 val, int32 lnt);
109
110 /* TLB data structures
111
112 tlb_dev pager device descriptor
113 tlb_unit pager units
114 pager_reg pager register list
115 */
116
117 UNIT tlb_unit[] = {
118 { UDATA (NULL, UNIT_FIX, VA_TBSIZE * 2) },
119 { UDATA (NULL, UNIT_FIX, VA_TBSIZE * 2) }
120 };
121
122 REG tlb_reg[] = {
123 { NULL }
124 };
125
126 DEVICE tlb_dev = {
127 "TLB", tlb_unit, tlb_reg, NULL,
128 2, 16, VA_N_TBI * 2, 1, 16, 32,
129 &tlb_ex, &tlb_dep, &tlb_reset,
130 NULL, NULL, NULL
131 };
132
133 /* Read and write virtual
134
135 These routines logically fall into three phases:
136
137 1. Look up the virtual address in the translation buffer, calling
138 the fill routine on a tag mismatch or access mismatch (invalid
139 tlb entries have access = 0 and thus always mismatch). The
140 fill routine handles all errors. If the resulting physical
141 address is aligned, do an aligned physical read or write.
142 2. Test for unaligned across page boundaries. If cross page, look
143 up the physical address of the second page. If not cross page,
144 the second physical address is the same as the first.
145 3. Using the two physical addresses, do an unaligned read or
146 write, with three cases: unaligned long, unaligned word within
147 a longword, unaligned word crossing a longword boundary.
148
149 Note that these routines do not handle quad or octa references.
150 */
151
152 /* Read virtual
153
154 Inputs:
155 va = virtual address
156 lnt = length code (BWL)
157 acc = access code (KESU)
158 Output:
159 returned data, right justified in 32b longword
160 */
161
162 int32 Read (uint32 va, int32 lnt, int32 acc)
163 {
164 int32 vpn, off, tbi, pa;
165 int32 pa1, bo, sc, wl, wh;
166 TLBENT xpte;
167
168 mchk_va = va;
169 if (mapen) { /* mapping on? */
170 vpn = VA_GETVPN (va); /* get vpn, offset */
171 off = VA_GETOFF (va);
172 tbi = VA_GETTBI (vpn);
173 xpte = (va & VA_S0)? stlb[tbi]: ptlb[tbi]; /* access tlb */
174 if (((xpte.pte & acc) == 0) || (xpte.tag != vpn) ||
175 ((acc & TLB_WACC) && ((xpte.pte & TLB_M) == 0)))
176 xpte = fill (va, lnt, acc, NULL); /* fill if needed */
177 pa = (xpte.pte & TLB_PFN) | off; /* get phys addr */
178 }
179 else pa = va & PAMASK;
180 if ((pa & (lnt - 1)) == 0) { /* aligned? */
181 if (lnt >= L_LONG) return ReadL (pa); /* long, quad? */
182 if (lnt == L_WORD) return ReadW (pa); /* word? */
183 return ReadB (pa); /* byte */
184 }
185 if (mapen && ((off + lnt) > VA_PAGSIZE)) { /* cross page? */
186 vpn = VA_GETVPN (va + lnt); /* vpn 2nd page */
187 tbi = VA_GETTBI (vpn);
188 xpte = (va & VA_S0)? stlb[tbi]: ptlb[tbi]; /* access tlb */
189 if (((xpte.pte & acc) == 0) || (xpte.tag != vpn) ||
190 ((acc & TLB_WACC) && ((xpte.pte & TLB_M) == 0)))
191 xpte = fill (va + lnt, lnt, acc, NULL); /* fill if needed */
192 pa1 = (xpte.pte & TLB_PFN) | VA_GETOFF (va + 4);
193 }
194 else pa1 = (pa + 4) & PAMASK; /* not cross page */
195 bo = pa & 3;
196 if (lnt >= L_LONG) { /* lw unaligned? */
197 sc = bo << 3;
198 wl = ReadL (pa); /* read both lw */
199 wh = ReadL (pa1); /* extract */
200 return ((((wl >> sc) & align[bo]) | (wh << (32 - sc))) & LMASK);
201 }
202 else if (bo == 1) return ((ReadL (pa) >> 8) & WMASK);
203 else {
204 wl = ReadL (pa); /* word cross lw */
205 wh = ReadL (pa1); /* read, extract */
206 return (((wl >> 24) & 0xFF) | ((wh & 0xFF) << 8));
207 }
208 }
209
210 /* Write virtual
211
212 Inputs:
213 va = virtual address
214 val = data to be written, right justified in 32b lw
215 lnt = length code (BWL)
216 acc = access code (KESU)
217 Output:
218 none
219 */
220
221 void Write (uint32 va, int32 val, int32 lnt, int32 acc)
222 {
223 int32 vpn, off, tbi, pa;
224 int32 pa1, bo, sc, wl, wh;
225 TLBENT xpte;
226
227 mchk_va = va;
228 if (mapen) {
229 vpn = VA_GETVPN (va);
230 off = VA_GETOFF (va);
231 tbi = VA_GETTBI (vpn);
232 xpte = (va & VA_S0)? stlb[tbi]: ptlb[tbi]; /* access tlb */
233 if (((xpte.pte & acc) == 0) || (xpte.tag != vpn) ||
234 ((xpte.pte & TLB_M) == 0))
235 xpte = fill (va, lnt, acc, NULL);
236 pa = (xpte.pte & TLB_PFN) | off;
237 }
238 else pa = va & PAMASK;
239 if ((pa & (lnt - 1)) == 0) { /* aligned? */
240 if (lnt >= L_LONG) WriteL (pa, val); /* long, quad? */
241 else if (lnt == L_WORD) WriteW (pa, val); /* word? */
242 else WriteB (pa, val); /* byte */
243 return;
244 }
245 if (mapen && ((off + lnt) > VA_PAGSIZE)) {
246 vpn = VA_GETVPN (va + 4);
247 tbi = VA_GETTBI (vpn);
248 xpte = (va & VA_S0)? stlb[tbi]: ptlb[tbi]; /* access tlb */
249 if (((xpte.pte & acc) == 0) || (xpte.tag != vpn) ||
250 ((xpte.pte & TLB_M) == 0))
251 xpte = fill (va + lnt, lnt, acc, NULL);
252 pa1 = (xpte.pte & TLB_PFN) | VA_GETOFF (va + 4);
253 }
254 else pa1 = (pa + 4) & PAMASK;
255 bo = pa & 3;
256 wl = ReadL (pa);
257 if (lnt >= L_LONG) {
258 sc = bo << 3;
259 wh = ReadL (pa1);
260 wl = (wl & insert[bo]) | ((val << sc) & LMASK);
261 wh = (wh & ~insert[bo]) | ((val >> (32 - sc)) & insert[bo]);
262 WriteL (pa, wl);
263 WriteL (pa1, wh);
264 }
265 else if (bo == 1) {
266 wl = (wl & 0xFF0000FF) | (val << 8);
267 WriteL (pa, wl);
268 }
269 else {
270 wh = ReadL (pa1);
271 wl = (wl & 0x00FFFFFF) | ((val & 0xFF) << 24);
272 wh = (wh & 0xFFFFFF00) | ((val >> 8) & 0xFF);
273 WriteL (pa, wl);
274 WriteL (pa1, wh);
275 }
276 return;
277 }
278
279 /* Test access to a byte (VAX PROBEx) */
280
281 int32 Test (uint32 va, int32 acc, int32 *status)
282 {
283 int32 vpn, off, tbi;
284 TLBENT xpte;
285
286 *status = PR_OK; /* assume ok */
287 if (mapen) { /* mapping on? */
288 vpn = VA_GETVPN (va); /* get vpn, off */
289 off = VA_GETOFF (va);
290 tbi = VA_GETTBI (vpn);
291 xpte = (va & VA_S0)? stlb[tbi]: ptlb[tbi]; /* access tlb */
292 if ((xpte.pte & acc) && (xpte.tag == vpn)) /* TB hit, acc ok? */
293 return (xpte.pte & TLB_PFN) | off;
294 xpte = fill (va, L_BYTE, acc, status); /* fill TB */
295 if (*status == PR_OK) return (xpte.pte & TLB_PFN) | off;
296 else return -1;
297 }
298 return va & PAMASK; /* ret phys addr */
299 }
300
301 /* Read aligned physical (in virtual context, unless indicated)
302
303 Inputs:
304 pa = physical address, naturally aligned
305 Output:
306 returned data, right justified in 32b longword
307 */
308
309 SIM_INLINE int32 ReadB (uint32 pa)
310 {
311 int32 dat;
312
313 if (ADDR_IS_MEM (pa)) dat = M[pa >> 2];
314 else {
315 mchk_ref = REF_V;
316 if (ADDR_IS_IO (pa)) dat = ReadIO (pa, L_BYTE);
317 else dat = ReadReg (pa, L_BYTE);
318 }
319 return ((dat >> ((pa & 3) << 3)) & BMASK);
320 }
321
322 SIM_INLINE int32 ReadW (uint32 pa)
323 {
324 int32 dat;
325
326 if (ADDR_IS_MEM (pa)) dat = M[pa >> 2];
327 else {
328 mchk_ref = REF_V;
329 if (ADDR_IS_IO (pa)) dat = ReadIO (pa, L_WORD);
330 else dat = ReadReg (pa, L_WORD);
331 }
332 return ((dat >> ((pa & 2)? 16: 0)) & WMASK);
333 }
334
335 SIM_INLINE int32 ReadL (uint32 pa)
336 {
337 if (ADDR_IS_MEM (pa)) return M[pa >> 2];
338 mchk_ref = REF_V;
339 if (ADDR_IS_IO (pa)) return ReadIO (pa, L_LONG);
340 return ReadReg (pa, L_LONG);
341 }
342
343 SIM_INLINE int32 ReadLP (uint32 pa)
344 {
345 if (ADDR_IS_MEM (pa)) return M[pa >> 2];
346 mchk_va = pa;
347 mchk_ref = REF_P;
348 if (ADDR_IS_IO (pa)) return ReadIO (pa, L_LONG);
349 return ReadReg (pa, L_LONG);
350 }
351
352 /* Write aligned physical (in virtual context, unless indicated)
353
354 Inputs:
355 pa = physical address, naturally aligned
356 val = data to be written, right justified in 32b longword
357 Output:
358 none
359 */
360
361 SIM_INLINE void WriteB (uint32 pa, int32 val)
362 {
363 if (ADDR_IS_MEM (pa)) {
364 int32 id = pa >> 2;
365 int32 sc = (pa & 3) << 3;
366 int32 mask = 0xFF << sc;
367 M[id] = (M[id] & ~mask) | (val << sc);
368 }
369 else {
370 mchk_ref = REF_V;
371 if (ADDR_IS_IO (pa)) WriteIO (pa, val, L_BYTE);
372 else WriteReg (pa, val, L_BYTE);
373 }
374 return;
375 }
376
377 SIM_INLINE void WriteW (uint32 pa, int32 val)
378 {
379 if (ADDR_IS_MEM (pa)) {
380 int32 id = pa >> 2;
381 M[id] = (pa & 2)? (M[id] & 0xFFFF) | (val << 16):
382 (M[id] & ~0xFFFF) | val;
383 }
384 else {
385 mchk_ref = REF_V;
386 if (ADDR_IS_IO (pa)) WriteIO (pa, val, L_WORD);
387 else WriteReg (pa, val, L_WORD);
388 }
389 return;
390 }
391
392 SIM_INLINE void WriteL (uint32 pa, int32 val)
393 {
394 if (ADDR_IS_MEM (pa)) M[pa >> 2] = val;
395 else {
396 mchk_ref = REF_V;
397 if (ADDR_IS_IO (pa)) WriteIO (pa, val, L_LONG);
398 else WriteReg (pa, val, L_LONG);
399 }
400 return;
401 }
402
403 void WriteLP (uint32 pa, int32 val)
404 {
405 if (ADDR_IS_MEM (pa)) M[pa >> 2] = val;
406 else {
407 mchk_va = pa;
408 mchk_ref = REF_P;
409 if (ADDR_IS_IO (pa)) WriteIO (pa, val, L_LONG);
410 else WriteReg (pa, val, L_LONG);
411 }
412 return;
413 }
414
415 /* TLB fill
416
417 This routine fills the TLB after a tag or access mismatch, or
418 on a write if pte<m> = 0. It fills the TLB and returns the
419 pte to the caller. On an error, it aborts directly to the
420 fault handler in the CPU.
421
422 If called from map (VAX PROBEx), the error status is returned
423 to the caller, and no fault occurs.
424 */
425
426 #define MM_ERR(param) { \
427 if (stat) { *stat = param; return zero_pte; } \
428 p1 = MM_PARAM (acc & TLB_WACC, param); \
429 p2 = va; \
430 ABORT ((param & PR_TNV)? ABORT_TNV: ABORT_ACV); }
431
432 TLBENT fill (uint32 va, int32 lnt, int32 acc, int32 *stat)
433 {
434 int32 ptidx = (((uint32) va) >> 7) & ~03;
435 int32 tlbpte, ptead, pte, tbi, vpn;
436 static TLBENT zero_pte = { 0, 0 };
437
438 if (va & VA_S0) { /* system space? */
439 if (ptidx >= d_slr) /* system */
440 MM_ERR (PR_LNV);
441 ptead = (d_sbr + ptidx) & PAMASK;
442 }
443 else {
444 if (va & VA_P1) { /* P1? */
445 if (ptidx < d_p1lr) MM_ERR (PR_LNV);
446 ptead = d_p1br + ptidx;
447 }
448 else { /* P0 */
449 if (ptidx >= d_p0lr) MM_ERR (PR_LNV);
450 ptead = d_p0br + ptidx;
451 }
452 if ((ptead & VA_S0) == 0)
453 ABORT (STOP_PPTE); /* ppte must be sys */
454 vpn = VA_GETVPN (ptead); /* get vpn, tbi */
455 tbi = VA_GETTBI (vpn);
456 if (stlb[tbi].tag != vpn) { /* in sys tlb? */
457 ptidx = ((uint32) ptead) >> 7; /* xlate like sys */
458 if (ptidx >= d_slr)
459 MM_ERR (PR_PLNV);
460 pte = ReadLP ((d_sbr + ptidx) & PAMASK); /* get system pte */
461 #if defined (VAX_780)
462 if ((pte & PTE_ACC) == 0) MM_ERR (PR_PACV); /* spte ACV? */
463 #endif
464 if ((pte & PTE_V) == 0) MM_ERR (PR_PTNV); /* spte TNV? */
465 stlb[tbi].tag = vpn; /* set stlb tag */
466 stlb[tbi].pte = cvtacc[PTE_GETACC (pte)] |
467 ((pte << VA_N_OFF) & TLB_PFN); /* set stlb data */
468 }
469 ptead = (stlb[tbi].pte & TLB_PFN) | VA_GETOFF (ptead);
470 }
471 pte = ReadL (ptead); /* read pte */
472 tlbpte = cvtacc[PTE_GETACC (pte)] | /* cvt access */
473 ((pte << VA_N_OFF) & TLB_PFN); /* set addr */
474 if ((tlbpte & acc) == 0) MM_ERR (PR_ACV); /* chk access */
475 if ((pte & PTE_V) == 0) MM_ERR (PR_TNV); /* check valid */
476 if (acc & TLB_WACC) { /* write? */
477 if ((pte & PTE_M) == 0) WriteL (ptead, pte | PTE_M);
478 tlbpte = tlbpte | TLB_M; /* set M */
479 }
480 vpn = VA_GETVPN (va);
481 tbi = VA_GETTBI (vpn);
482 if ((va & VA_S0) == 0) { /* process space? */
483 ptlb[tbi].tag = vpn; /* store tlb ent */
484 ptlb[tbi].pte = tlbpte;
485 return ptlb[tbi];
486 }
487 stlb[tbi].tag = vpn; /* system space */
488 stlb[tbi].pte = tlbpte; /* store tlb ent */
489 return stlb[tbi];
490 }
491
492 /* Utility routines */
493
494 extern void set_map_reg (void)
495 {
496 d_p0br = P0BR & ~03;
497 d_p1br = (P1BR - 0x800000) & ~03; /* VA<30> >> 7 */
498 d_sbr = (SBR - 0x1000000) & ~03; /* VA<31> >> 7 */
499 d_p0lr = (P0LR << 2);
500 d_p1lr = (P1LR << 2) + 0x800000; /* VA<30> >> 7 */
501 d_slr = (SLR << 2) + 0x1000000; /* VA<31> >> 7 */
502 return;
503 }
504
505 /* Zap process (0) or whole (1) tb */
506
507 void zap_tb (int stb)
508 {
509 int32 i;
510
511 for (i = 0; i < VA_TBSIZE; i++) {
512 ptlb[i].tag = ptlb[i].pte = -1;
513 if (stb) stlb[i].tag = stlb[i].pte = -1;
514 }
515 return;
516 }
517
518 /* Zap single tb entry corresponding to va */
519
520 void zap_tb_ent (uint32 va)
521 {
522 int32 tbi = VA_GETTBI (VA_GETVPN (va));
523
524 if (va & VA_S0) stlb[tbi].tag = stlb[tbi].pte = -1;
525 else ptlb[tbi].tag = ptlb[tbi].pte = -1;
526 return;
527 }
528
529 /* Check for tlb entry corresponding to va */
530
531 t_bool chk_tb_ent (uint32 va)
532 {
533 int32 vpn = VA_GETVPN (va);
534 int32 tbi = VA_GETTBI (vpn);
535 TLBENT xpte;
536
537 xpte = (va & VA_S0)? stlb[tbi]: ptlb[tbi];
538 if (xpte.tag == vpn) return TRUE;
539 return FALSE;
540 }
541
542 /* TLB examine */
543
544 t_stat tlb_ex (t_value *vptr, t_addr addr, UNIT *uptr, int32 sw)
545 {
546 int32 tlbn = uptr - tlb_unit;
547 int32 idx = (uint32) addr >> 1;
548
549 if (idx >= VA_TBSIZE) return SCPE_NXM;
550 if (addr & 1) *vptr = ((uint32) (tlbn? stlb[idx].pte: ptlb[idx].pte));
551 else *vptr = ((uint32) (tlbn? stlb[idx].tag: ptlb[idx].tag));
552 return SCPE_OK;
553 }
554
555 /* TLB deposit */
556
557 t_stat tlb_dep (t_value val, t_addr addr, UNIT *uptr, int32 sw)
558 {
559 int32 tlbn = uptr - tlb_unit;
560 int32 idx = (uint32) addr >> 1;
561
562 if (idx >= VA_TBSIZE) return SCPE_NXM;
563 if (addr & 1) {
564 if (tlbn) stlb[idx].pte = (int32) val;
565 else ptlb[idx].pte = (int32) val;
566 }
567 else {
568 if (tlbn) stlb[idx].tag = (int32) val;
569 else ptlb[idx].tag = (int32) val;
570 }
571 return SCPE_OK;
572 }
573
574 /* TLB reset */
575
576 t_stat tlb_reset (DEVICE *dptr)
577 {
578 int32 i;
579
580 for (i = 0; i < VA_TBSIZE; i++)
581 stlb[i].tag = ptlb[i].tag = stlb[i].pte = ptlb[i].pte = -1;
582 return SCPE_OK;
583 }