First Commit of my working state
[simh.git] / VAX / vax_mmu.c
CommitLineData
196ba1fc
PH
1/* vax_mmu.c - VAX memory management\r
2\r
3 Copyright (c) 1998-2008, Robert M Supnik\r
4\r
5 Permission is hereby granted, free of charge, to any person obtaining a\r
6 copy of this software and associated documentation files (the "Software"),\r
7 to deal in the Software without restriction, including without limitation\r
8 the rights to use, copy, modify, merge, publish, distribute, sublicense,\r
9 and/or sell copies of the Software, and to permit persons to whom the\r
10 Software is furnished to do so, subject to the following conditions:\r
11\r
12 The above copyright notice and this permission notice shall be included in\r
13 all copies or substantial portions of the Software.\r
14\r
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r
16 IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\r
18 ROBERT M SUPNIK BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\r
19 IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
20 CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\r
21\r
22 Except as contained in this notice, the name of Robert M Supnik shall not be\r
23 used in advertising or otherwise to promote the sale, use or other dealings\r
24 in this Software without prior written authorization from Robert M Supnik.\r
25\r
26 28-May-08 RMS Inlined physical memory routines\r
27 29-Apr-07 RMS Added address masking for system page table reads\r
28 22-Sep-05 RMS Fixed declarations (from Sterling Garwood)\r
29 30-Sep-04 RMS Comment and formating changes\r
30 19-Sep-03 RMS Fixed upper/lower case linkage problems on VMS\r
31 01-Jun-03 RMS Fixed compilation problem with USE_ADDR64\r
32\r
33 This module contains the instruction simulators for\r
34\r
35 Read - read virtual\r
36 Write - write virtual\r
37 ReadL(P) - read aligned physical longword (physical context)\r
38 WriteL(P) - write aligned physical longword (physical context)\r
39 ReadB(W) - read aligned physical byte (word)\r
40 WriteB(W) - write aligned physical byte (word)\r
41 Test - test acccess\r
42\r
43 zap_tb - clear TB\r
44 zap_tb_ent - clear TB entry\r
45 chk_tb_ent - check TB entry\r
46 set_map_reg - set up working map registers\r
47*/\r
48\r
49#include "vax_defs.h"\r
50#include <setjmp.h>\r
51\r
52typedef struct {\r
53 int32 tag; /* tag */\r
54 int32 pte; /* pte */\r
55 } TLBENT;\r
56\r
57extern uint32 *M;\r
58extern const uint32 align[4];\r
59extern int32 PSL;\r
60extern int32 mapen;\r
61extern int32 p1, p2;\r
62extern int32 P0BR, P0LR;\r
63extern int32 P1BR, P1LR;\r
64extern int32 SBR, SLR;\r
65extern int32 SISR;\r
66extern jmp_buf save_env;\r
67extern UNIT cpu_unit;\r
68\r
69int32 d_p0br, d_p0lr; /* dynamic copies */\r
70int32 d_p1br, d_p1lr; /* altered per ucode */\r
71int32 d_sbr, d_slr;\r
72extern int32 mchk_va, mchk_ref; /* for mcheck */\r
73TLBENT stlb[VA_TBSIZE], ptlb[VA_TBSIZE];\r
74static const int32 insert[4] = {\r
75 0x00000000, 0x000000FF, 0x0000FFFF, 0x00FFFFFF\r
76 };\r
77static const int32 cvtacc[16] = { 0, 0,\r
78 TLB_ACCW (KERN)+TLB_ACCR (KERN),\r
79 TLB_ACCR (KERN),\r
80 TLB_ACCW (KERN)+TLB_ACCW (EXEC)+TLB_ACCW (SUPV)+TLB_ACCW (USER)+\r
81 TLB_ACCR (KERN)+TLB_ACCR (EXEC)+TLB_ACCR (SUPV)+TLB_ACCR (USER),\r
82 TLB_ACCW (KERN)+TLB_ACCW (EXEC)+TLB_ACCR (KERN)+TLB_ACCR (EXEC),\r
83 TLB_ACCW (KERN)+TLB_ACCR (KERN)+TLB_ACCR (EXEC),\r
84 TLB_ACCR (KERN)+TLB_ACCR (EXEC),\r
85 TLB_ACCW (KERN)+TLB_ACCW (EXEC)+TLB_ACCW (SUPV)+\r
86 TLB_ACCR (KERN)+TLB_ACCR (EXEC)+TLB_ACCR (SUPV),\r
87 TLB_ACCW (KERN)+TLB_ACCW (EXEC)+\r
88 TLB_ACCR (KERN)+TLB_ACCR (EXEC)+TLB_ACCR (SUPV),\r
89 TLB_ACCW (KERN)+TLB_ACCR (KERN)+TLB_ACCR (EXEC)+TLB_ACCR (SUPV),\r
90 TLB_ACCR (KERN)+TLB_ACCR (EXEC)+TLB_ACCR (SUPV),\r
91 TLB_ACCW (KERN)+TLB_ACCW (EXEC)+TLB_ACCW (SUPV)+\r
92 TLB_ACCR (KERN)+TLB_ACCR (EXEC)+TLB_ACCR (SUPV)+TLB_ACCR (USER),\r
93 TLB_ACCW (KERN)+TLB_ACCW (EXEC)+\r
94 TLB_ACCR (KERN)+TLB_ACCR (EXEC)+TLB_ACCR (SUPV)+TLB_ACCR (USER),\r
95 TLB_ACCW (KERN)+\r
96 TLB_ACCR (KERN)+TLB_ACCR (EXEC)+TLB_ACCR (SUPV)+TLB_ACCR (USER),\r
97 TLB_ACCR (KERN)+TLB_ACCR (EXEC)+TLB_ACCR (SUPV)+TLB_ACCR (USER)\r
98 };\r
99\r
100t_stat tlb_ex (t_value *vptr, t_addr addr, UNIT *uptr, int32 sw);\r
101t_stat tlb_dep (t_value val, t_addr addr, UNIT *uptr, int32 sw);\r
102t_stat tlb_reset (DEVICE *dptr);\r
103\r
104TLBENT fill (uint32 va, int32 lnt, int32 acc, int32 *stat);\r
105extern int32 ReadIO (uint32 pa, int32 lnt);\r
106extern void WriteIO (uint32 pa, int32 val, int32 lnt);\r
107extern int32 ReadReg (uint32 pa, int32 lnt);\r
108extern void WriteReg (uint32 pa, int32 val, int32 lnt);\r
109\r
110/* TLB data structures\r
111\r
112 tlb_dev pager device descriptor\r
113 tlb_unit pager units\r
114 pager_reg pager register list\r
115*/\r
116\r
117UNIT tlb_unit[] = {\r
118 { UDATA (NULL, UNIT_FIX, VA_TBSIZE * 2) },\r
119 { UDATA (NULL, UNIT_FIX, VA_TBSIZE * 2) }\r
120 };\r
121\r
122REG tlb_reg[] = {\r
123 { NULL }\r
124 };\r
125\r
126DEVICE tlb_dev = {\r
127 "TLB", tlb_unit, tlb_reg, NULL,\r
128 2, 16, VA_N_TBI * 2, 1, 16, 32,\r
129 &tlb_ex, &tlb_dep, &tlb_reset,\r
130 NULL, NULL, NULL\r
131 };\r
132\r
133/* Read and write virtual\r
134\r
135 These routines logically fall into three phases:\r
136\r
137 1. Look up the virtual address in the translation buffer, calling\r
138 the fill routine on a tag mismatch or access mismatch (invalid\r
139 tlb entries have access = 0 and thus always mismatch). The\r
140 fill routine handles all errors. If the resulting physical\r
141 address is aligned, do an aligned physical read or write.\r
142 2. Test for unaligned across page boundaries. If cross page, look\r
143 up the physical address of the second page. If not cross page,\r
144 the second physical address is the same as the first.\r
145 3. Using the two physical addresses, do an unaligned read or\r
146 write, with three cases: unaligned long, unaligned word within\r
147 a longword, unaligned word crossing a longword boundary.\r
148\r
149 Note that these routines do not handle quad or octa references.\r
150*/\r
151\r
152/* Read virtual\r
153\r
154 Inputs:\r
155 va = virtual address\r
156 lnt = length code (BWL)\r
157 acc = access code (KESU)\r
158 Output:\r
159 returned data, right justified in 32b longword\r
160*/\r
161\r
162int32 Read (uint32 va, int32 lnt, int32 acc)\r
163{\r
164int32 vpn, off, tbi, pa;\r
165int32 pa1, bo, sc, wl, wh;\r
166TLBENT xpte;\r
167\r
168mchk_va = va;\r
169if (mapen) { /* mapping on? */\r
170 vpn = VA_GETVPN (va); /* get vpn, offset */\r
171 off = VA_GETOFF (va);\r
172 tbi = VA_GETTBI (vpn);\r
173 xpte = (va & VA_S0)? stlb[tbi]: ptlb[tbi]; /* access tlb */\r
174 if (((xpte.pte & acc) == 0) || (xpte.tag != vpn) ||\r
175 ((acc & TLB_WACC) && ((xpte.pte & TLB_M) == 0)))\r
176 xpte = fill (va, lnt, acc, NULL); /* fill if needed */\r
177 pa = (xpte.pte & TLB_PFN) | off; /* get phys addr */\r
178 }\r
179else pa = va & PAMASK;\r
180if ((pa & (lnt - 1)) == 0) { /* aligned? */\r
181 if (lnt >= L_LONG) return ReadL (pa); /* long, quad? */\r
182 if (lnt == L_WORD) return ReadW (pa); /* word? */\r
183 return ReadB (pa); /* byte */\r
184 }\r
185if (mapen && ((off + lnt) > VA_PAGSIZE)) { /* cross page? */\r
186 vpn = VA_GETVPN (va + lnt); /* vpn 2nd page */\r
187 tbi = VA_GETTBI (vpn);\r
188 xpte = (va & VA_S0)? stlb[tbi]: ptlb[tbi]; /* access tlb */\r
189 if (((xpte.pte & acc) == 0) || (xpte.tag != vpn) ||\r
190 ((acc & TLB_WACC) && ((xpte.pte & TLB_M) == 0)))\r
191 xpte = fill (va + lnt, lnt, acc, NULL); /* fill if needed */\r
192 pa1 = (xpte.pte & TLB_PFN) | VA_GETOFF (va + 4);\r
193 }\r
194else pa1 = (pa + 4) & PAMASK; /* not cross page */\r
195bo = pa & 3;\r
196if (lnt >= L_LONG) { /* lw unaligned? */\r
197 sc = bo << 3;\r
198 wl = ReadL (pa); /* read both lw */\r
199 wh = ReadL (pa1); /* extract */\r
200 return ((((wl >> sc) & align[bo]) | (wh << (32 - sc))) & LMASK);\r
201 }\r
202else if (bo == 1) return ((ReadL (pa) >> 8) & WMASK);\r
203else {\r
204 wl = ReadL (pa); /* word cross lw */\r
205 wh = ReadL (pa1); /* read, extract */\r
206 return (((wl >> 24) & 0xFF) | ((wh & 0xFF) << 8));\r
207 }\r
208}\r
209\r
210/* Write virtual\r
211\r
212 Inputs:\r
213 va = virtual address\r
214 val = data to be written, right justified in 32b lw\r
215 lnt = length code (BWL)\r
216 acc = access code (KESU)\r
217 Output:\r
218 none\r
219*/\r
220\r
221void Write (uint32 va, int32 val, int32 lnt, int32 acc)\r
222{\r
223int32 vpn, off, tbi, pa;\r
224int32 pa1, bo, sc, wl, wh;\r
225TLBENT xpte;\r
226\r
227mchk_va = va;\r
228if (mapen) {\r
229 vpn = VA_GETVPN (va);\r
230 off = VA_GETOFF (va);\r
231 tbi = VA_GETTBI (vpn);\r
232 xpte = (va & VA_S0)? stlb[tbi]: ptlb[tbi]; /* access tlb */\r
233 if (((xpte.pte & acc) == 0) || (xpte.tag != vpn) ||\r
234 ((xpte.pte & TLB_M) == 0))\r
235 xpte = fill (va, lnt, acc, NULL);\r
236 pa = (xpte.pte & TLB_PFN) | off;\r
237 }\r
238else pa = va & PAMASK;\r
239if ((pa & (lnt - 1)) == 0) { /* aligned? */\r
240 if (lnt >= L_LONG) WriteL (pa, val); /* long, quad? */\r
241 else if (lnt == L_WORD) WriteW (pa, val); /* word? */\r
242 else WriteB (pa, val); /* byte */\r
243 return;\r
244 }\r
245if (mapen && ((off + lnt) > VA_PAGSIZE)) {\r
246 vpn = VA_GETVPN (va + 4);\r
247 tbi = VA_GETTBI (vpn);\r
248 xpte = (va & VA_S0)? stlb[tbi]: ptlb[tbi]; /* access tlb */\r
249 if (((xpte.pte & acc) == 0) || (xpte.tag != vpn) ||\r
250 ((xpte.pte & TLB_M) == 0))\r
251 xpte = fill (va + lnt, lnt, acc, NULL);\r
252 pa1 = (xpte.pte & TLB_PFN) | VA_GETOFF (va + 4);\r
253 }\r
254else pa1 = (pa + 4) & PAMASK;\r
255bo = pa & 3;\r
256wl = ReadL (pa);\r
257if (lnt >= L_LONG) {\r
258 sc = bo << 3;\r
259 wh = ReadL (pa1);\r
260 wl = (wl & insert[bo]) | ((val << sc) & LMASK);\r
261 wh = (wh & ~insert[bo]) | ((val >> (32 - sc)) & insert[bo]);\r
262 WriteL (pa, wl);\r
263 WriteL (pa1, wh);\r
264 }\r
265else if (bo == 1) {\r
266 wl = (wl & 0xFF0000FF) | (val << 8);\r
267 WriteL (pa, wl);\r
268 }\r
269else {\r
270 wh = ReadL (pa1);\r
271 wl = (wl & 0x00FFFFFF) | ((val & 0xFF) << 24);\r
272 wh = (wh & 0xFFFFFF00) | ((val >> 8) & 0xFF);\r
273 WriteL (pa, wl);\r
274 WriteL (pa1, wh);\r
275 }\r
276return;\r
277}\r
278\r
279/* Test access to a byte (VAX PROBEx) */\r
280\r
281int32 Test (uint32 va, int32 acc, int32 *status)\r
282{\r
283int32 vpn, off, tbi;\r
284TLBENT xpte;\r
285\r
286*status = PR_OK; /* assume ok */\r
287if (mapen) { /* mapping on? */\r
288 vpn = VA_GETVPN (va); /* get vpn, off */\r
289 off = VA_GETOFF (va);\r
290 tbi = VA_GETTBI (vpn);\r
291 xpte = (va & VA_S0)? stlb[tbi]: ptlb[tbi]; /* access tlb */\r
292 if ((xpte.pte & acc) && (xpte.tag == vpn)) /* TB hit, acc ok? */ \r
293 return (xpte.pte & TLB_PFN) | off;\r
294 xpte = fill (va, L_BYTE, acc, status); /* fill TB */\r
295 if (*status == PR_OK) return (xpte.pte & TLB_PFN) | off;\r
296 else return -1;\r
297 }\r
298return va & PAMASK; /* ret phys addr */\r
299}\r
300\r
301/* Read aligned physical (in virtual context, unless indicated)\r
302\r
303 Inputs:\r
304 pa = physical address, naturally aligned\r
305 Output:\r
306 returned data, right justified in 32b longword\r
307*/\r
308\r
309SIM_INLINE int32 ReadB (uint32 pa)\r
310{\r
311int32 dat;\r
312\r
313if (ADDR_IS_MEM (pa)) dat = M[pa >> 2];\r
314else {\r
315 mchk_ref = REF_V;\r
316 if (ADDR_IS_IO (pa)) dat = ReadIO (pa, L_BYTE);\r
317 else dat = ReadReg (pa, L_BYTE);\r
318 }\r
319return ((dat >> ((pa & 3) << 3)) & BMASK);\r
320}\r
321\r
322SIM_INLINE int32 ReadW (uint32 pa)\r
323{\r
324int32 dat;\r
325\r
326if (ADDR_IS_MEM (pa)) dat = M[pa >> 2];\r
327else {\r
328 mchk_ref = REF_V;\r
329 if (ADDR_IS_IO (pa)) dat = ReadIO (pa, L_WORD);\r
330 else dat = ReadReg (pa, L_WORD);\r
331 }\r
332return ((dat >> ((pa & 2)? 16: 0)) & WMASK);\r
333}\r
334\r
335SIM_INLINE int32 ReadL (uint32 pa)\r
336{\r
337if (ADDR_IS_MEM (pa)) return M[pa >> 2];\r
338mchk_ref = REF_V;\r
339if (ADDR_IS_IO (pa)) return ReadIO (pa, L_LONG);\r
340return ReadReg (pa, L_LONG);\r
341}\r
342\r
343SIM_INLINE int32 ReadLP (uint32 pa)\r
344{\r
345if (ADDR_IS_MEM (pa)) return M[pa >> 2];\r
346mchk_va = pa;\r
347mchk_ref = REF_P;\r
348if (ADDR_IS_IO (pa)) return ReadIO (pa, L_LONG);\r
349return ReadReg (pa, L_LONG);\r
350}\r
351\r
352/* Write aligned physical (in virtual context, unless indicated)\r
353\r
354 Inputs:\r
355 pa = physical address, naturally aligned\r
356 val = data to be written, right justified in 32b longword\r
357 Output:\r
358 none\r
359*/\r
360\r
361SIM_INLINE void WriteB (uint32 pa, int32 val)\r
362{\r
363if (ADDR_IS_MEM (pa)) {\r
364 int32 id = pa >> 2;\r
365 int32 sc = (pa & 3) << 3;\r
366 int32 mask = 0xFF << sc;\r
367 M[id] = (M[id] & ~mask) | (val << sc);\r
368 }\r
369else {\r
370 mchk_ref = REF_V;\r
371 if (ADDR_IS_IO (pa)) WriteIO (pa, val, L_BYTE);\r
372 else WriteReg (pa, val, L_BYTE);\r
373 }\r
374return;\r
375}\r
376\r
377SIM_INLINE void WriteW (uint32 pa, int32 val)\r
378{\r
379if (ADDR_IS_MEM (pa)) {\r
380 int32 id = pa >> 2;\r
381 M[id] = (pa & 2)? (M[id] & 0xFFFF) | (val << 16):\r
382 (M[id] & ~0xFFFF) | val;\r
383 }\r
384else {\r
385 mchk_ref = REF_V;\r
386 if (ADDR_IS_IO (pa)) WriteIO (pa, val, L_WORD);\r
387 else WriteReg (pa, val, L_WORD);\r
388 }\r
389return;\r
390}\r
391\r
392SIM_INLINE void WriteL (uint32 pa, int32 val)\r
393{\r
394if (ADDR_IS_MEM (pa)) M[pa >> 2] = val;\r
395else {\r
396 mchk_ref = REF_V;\r
397 if (ADDR_IS_IO (pa)) WriteIO (pa, val, L_LONG);\r
398 else WriteReg (pa, val, L_LONG);\r
399 }\r
400return;\r
401}\r
402\r
403void WriteLP (uint32 pa, int32 val)\r
404{\r
405if (ADDR_IS_MEM (pa)) M[pa >> 2] = val;\r
406else {\r
407 mchk_va = pa;\r
408 mchk_ref = REF_P;\r
409 if (ADDR_IS_IO (pa)) WriteIO (pa, val, L_LONG);\r
410 else WriteReg (pa, val, L_LONG);\r
411 }\r
412return;\r
413}\r
414\r
415/* TLB fill\r
416\r
417 This routine fills the TLB after a tag or access mismatch, or\r
418 on a write if pte<m> = 0. It fills the TLB and returns the\r
419 pte to the caller. On an error, it aborts directly to the\r
420 fault handler in the CPU.\r
421\r
422 If called from map (VAX PROBEx), the error status is returned\r
423 to the caller, and no fault occurs.\r
424*/\r
425\r
426#define MM_ERR(param) { \\r
427 if (stat) { *stat = param; return zero_pte; } \\r
428 p1 = MM_PARAM (acc & TLB_WACC, param); \\r
429 p2 = va; \\r
430 ABORT ((param & PR_TNV)? ABORT_TNV: ABORT_ACV); }\r
431\r
432TLBENT fill (uint32 va, int32 lnt, int32 acc, int32 *stat)\r
433{\r
434int32 ptidx = (((uint32) va) >> 7) & ~03;\r
435int32 tlbpte, ptead, pte, tbi, vpn;\r
436static TLBENT zero_pte = { 0, 0 };\r
437\r
438if (va & VA_S0) { /* system space? */\r
439 if (ptidx >= d_slr) /* system */\r
440 MM_ERR (PR_LNV);\r
441 ptead = (d_sbr + ptidx) & PAMASK;\r
442 }\r
443else {\r
444 if (va & VA_P1) { /* P1? */\r
445 if (ptidx < d_p1lr) MM_ERR (PR_LNV);\r
446 ptead = d_p1br + ptidx;\r
447 }\r
448 else { /* P0 */\r
449 if (ptidx >= d_p0lr) MM_ERR (PR_LNV);\r
450 ptead = d_p0br + ptidx;\r
451 }\r
452 if ((ptead & VA_S0) == 0)\r
453 ABORT (STOP_PPTE); /* ppte must be sys */\r
454 vpn = VA_GETVPN (ptead); /* get vpn, tbi */\r
455 tbi = VA_GETTBI (vpn);\r
456 if (stlb[tbi].tag != vpn) { /* in sys tlb? */\r
457 ptidx = ((uint32) ptead) >> 7; /* xlate like sys */\r
458 if (ptidx >= d_slr)\r
459 MM_ERR (PR_PLNV);\r
460 pte = ReadLP ((d_sbr + ptidx) & PAMASK); /* get system pte */\r
461#if defined (VAX_780)\r
462 if ((pte & PTE_ACC) == 0) MM_ERR (PR_PACV); /* spte ACV? */\r
463#endif\r
464 if ((pte & PTE_V) == 0) MM_ERR (PR_PTNV); /* spte TNV? */\r
465 stlb[tbi].tag = vpn; /* set stlb tag */\r
466 stlb[tbi].pte = cvtacc[PTE_GETACC (pte)] |\r
467 ((pte << VA_N_OFF) & TLB_PFN); /* set stlb data */\r
468 }\r
469 ptead = (stlb[tbi].pte & TLB_PFN) | VA_GETOFF (ptead);\r
470 }\r
471pte = ReadL (ptead); /* read pte */\r
472tlbpte = cvtacc[PTE_GETACC (pte)] | /* cvt access */\r
473 ((pte << VA_N_OFF) & TLB_PFN); /* set addr */\r
474if ((tlbpte & acc) == 0) MM_ERR (PR_ACV); /* chk access */\r
475if ((pte & PTE_V) == 0) MM_ERR (PR_TNV); /* check valid */\r
476if (acc & TLB_WACC) { /* write? */\r
477 if ((pte & PTE_M) == 0) WriteL (ptead, pte | PTE_M);\r
478 tlbpte = tlbpte | TLB_M; /* set M */\r
479 }\r
480vpn = VA_GETVPN (va);\r
481tbi = VA_GETTBI (vpn);\r
482if ((va & VA_S0) == 0) { /* process space? */\r
483 ptlb[tbi].tag = vpn; /* store tlb ent */\r
484 ptlb[tbi].pte = tlbpte;\r
485 return ptlb[tbi];\r
486 }\r
487stlb[tbi].tag = vpn; /* system space */\r
488stlb[tbi].pte = tlbpte; /* store tlb ent */\r
489return stlb[tbi];\r
490}\r
491\r
492/* Utility routines */\r
493\r
494extern void set_map_reg (void)\r
495{\r
496d_p0br = P0BR & ~03;\r
497d_p1br = (P1BR - 0x800000) & ~03; /* VA<30> >> 7 */\r
498d_sbr = (SBR - 0x1000000) & ~03; /* VA<31> >> 7 */\r
499d_p0lr = (P0LR << 2);\r
500d_p1lr = (P1LR << 2) + 0x800000; /* VA<30> >> 7 */\r
501d_slr = (SLR << 2) + 0x1000000; /* VA<31> >> 7 */\r
502return;\r
503}\r
504\r
505/* Zap process (0) or whole (1) tb */\r
506\r
507void zap_tb (int stb)\r
508{\r
509int32 i;\r
510\r
511for (i = 0; i < VA_TBSIZE; i++) {\r
512 ptlb[i].tag = ptlb[i].pte = -1;\r
513 if (stb) stlb[i].tag = stlb[i].pte = -1;\r
514 }\r
515return;\r
516}\r
517\r
518/* Zap single tb entry corresponding to va */\r
519\r
520void zap_tb_ent (uint32 va)\r
521{\r
522int32 tbi = VA_GETTBI (VA_GETVPN (va));\r
523\r
524if (va & VA_S0) stlb[tbi].tag = stlb[tbi].pte = -1;\r
525else ptlb[tbi].tag = ptlb[tbi].pte = -1;\r
526return;\r
527}\r
528\r
529/* Check for tlb entry corresponding to va */\r
530\r
531t_bool chk_tb_ent (uint32 va)\r
532{\r
533int32 vpn = VA_GETVPN (va);\r
534int32 tbi = VA_GETTBI (vpn);\r
535TLBENT xpte;\r
536\r
537xpte = (va & VA_S0)? stlb[tbi]: ptlb[tbi];\r
538if (xpte.tag == vpn) return TRUE;\r
539return FALSE;\r
540}\r
541\r
542/* TLB examine */\r
543\r
544t_stat tlb_ex (t_value *vptr, t_addr addr, UNIT *uptr, int32 sw)\r
545{\r
546int32 tlbn = uptr - tlb_unit;\r
547int32 idx = (uint32) addr >> 1;\r
548\r
549if (idx >= VA_TBSIZE) return SCPE_NXM;\r
550if (addr & 1) *vptr = ((uint32) (tlbn? stlb[idx].pte: ptlb[idx].pte));\r
551else *vptr = ((uint32) (tlbn? stlb[idx].tag: ptlb[idx].tag));\r
552return SCPE_OK;\r
553}\r
554\r
555/* TLB deposit */\r
556\r
557t_stat tlb_dep (t_value val, t_addr addr, UNIT *uptr, int32 sw)\r
558{\r
559int32 tlbn = uptr - tlb_unit;\r
560int32 idx = (uint32) addr >> 1;\r
561\r
562if (idx >= VA_TBSIZE) return SCPE_NXM;\r
563if (addr & 1) {\r
564 if (tlbn) stlb[idx].pte = (int32) val;\r
565 else ptlb[idx].pte = (int32) val;\r
566 }\r
567else {\r
568 if (tlbn) stlb[idx].tag = (int32) val;\r
569 else ptlb[idx].tag = (int32) val;\r
570 }\r
571return SCPE_OK;\r
572}\r
573\r
574/* TLB reset */\r
575\r
576t_stat tlb_reset (DEVICE *dptr)\r
577{\r
578int32 i;\r
579\r
580for (i = 0; i < VA_TBSIZE; i++)\r
581 stlb[i].tag = ptlb[i].tag = stlb[i].pte = ptlb[i].pte = -1;\r
582return SCPE_OK;\r
583}\r