lab1&2&osdev

42 阅读11分钟

Unix System Firewall

Overview

The firewall is implemented as a Linux kernel module using Netfilter hooks and communicates with user space via Netlink sockets. Below is an example of how to set up Netfilter hooks, use Netlink for communication, and implement basic packet filtering and NAT functionality.

1. Netfilter Hook Example

// Filename: firewall_module.c
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>static struct nf_hook_ops nfho;
​
// Function to be called by hook
unsigned int main_hook(void *priv,
                       struct sk_buff *skb,
                       const struct nf_hook_state *state)
{
    // Packet filtering logic goes here
    // For example, drop all outgoing packets to a specific IP
    struct iphdr *ip_header = ip_hdr(skb);
    if (ip_header && ip_header->daddr == htonl(0xC0A80001)) { // 192.168.0.1
        printk(KERN_INFO "Dropping packet to 192.168.0.1\n");
        return NF_DROP;
    }
    return NF_ACCEPT;
}
​
// Module initialization
static int __init firewall_init(void)
{
    nfho.hook = main_hook;
    nfho.hooknum = NF_INET_POST_ROUTING; // Hook point
    nfho.pf = PF_INET;
    nfho.priority = NF_IP_PRI_FIRST;
    nf_register_net_hook(&init_net, &nfho);
    printk(KERN_INFO "Firewall module loaded.\n");
    return 0;
}
​
// Module cleanup
static void __exit firewall_exit(void)
{
    nf_unregister_net_hook(&init_net, &nfho);
    printk(KERN_INFO "Firewall module unloaded.\n");
}
​
module_init(firewall_init);
module_exit(firewall_exit);
​
MODULE_LICENSE("GPL");

Explanation:

  • nf_hook_ops: A structure that defines the hook function and where it hooks into the network stack.
  • main_hook: The function that is called for each packet at the specified hook point. Here, it checks if the destination IP is 192.168.0.1 and drops the packet if it is.
  • nf_register_net_hook: Registers the hook with the kernel.
  • nf_unregister_net_hook: Unregisters the hook when the module is unloaded.

2. Netlink Socket Communication

// Filename: netlink_kernel_module.c
#include <linux/module.h>
#include <net/sock.h>
#include <linux/netlink.h>#define NETLINK_USER 31
​
struct sock *nl_sk = NULL;
​
// Function to receive messages from user space
static void nl_recv_msg(struct sk_buff *skb)
{
    struct nlmsghdr *nlh;
    int pid;
    struct sk_buff *skb_out;
    char *msg = "Kernel received your message";
    int msg_size = strlen(msg);
    int res;
​
    nlh = (struct nlmsghdr *)skb->data;
    printk(KERN_INFO "Netlink received msg payload:%s\n", (char *)nlmsg_data(nlh));
    pid = nlh->nlmsg_pid; // PID of sending process
​
    skb_out = nlmsg_new(msg_size, 0);
    if (!skb_out) {
        printk(KERN_ERR "Failed to allocate new skb\n");
        return;
    }
    nlh = nlmsg_put(skb_out, 0, 0, NLMSG_DONE, msg_size, 0);
    NETLINK_CB(skb_out).dst_group = 0; // Not in multicast group
    strncpy(nlmsg_data(nlh), msg, msg_size);
​
    res = nlmsg_unicast(nl_sk, skb_out, pid);
    if (res < 0)
        printk(KERN_INFO "Error while sending back to user\n");
}
​
static int __init netlink_init(void)
{
    struct netlink_kernel_cfg cfg = {
        .input = nl_recv_msg, // Set callback function
    };
​
    nl_sk = netlink_kernel_create(&init_net, NETLINK_USER, &cfg);
    if (!nl_sk) {
        printk(KERN_ALERT "Error creating Netlink socket.\n");
        return -10;
    }
    printk(KERN_INFO "Netlink socket created.\n");
    return 0;
}
​
static void __exit netlink_exit(void)
{
    netlink_kernel_release(nl_sk);
    printk(KERN_INFO "Netlink socket released.\n");
}
​
module_init(netlink_init);
module_exit(netlink_exit);
​
MODULE_LICENSE("GPL");

Explanation:

  • NETLINK_USER: A protocol number for custom Netlink protocols.
  • nl_recv_msg: The callback function that processes incoming messages from user space.
  • netlink_kernel_create: Creates a Netlink socket bound to the specified protocol.
  • nlmsg_unicast: Sends a message back to the user-space process.

3. User-Space Application

// Filename: netlink_user.c
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <linux/netlink.h>
#include <sys/socket.h>
#include <stdlib.h>#define NETLINK_USER 31
​
int main()
{
    struct sockaddr_nl src_addr, dest_addr;
    struct nlmsghdr *nlh = NULL;
    struct iovec iov;
    int sock_fd;
    struct msghdr msg;
    char *message = "Hello from user space";
​
    sock_fd = socket(PF_NETLINK, SOCK_RAW, NETLINK_USER);
    if (sock_fd < 0)
        return -1;
​
    memset(&src_addr, 0, sizeof(src_addr));
    src_addr.nl_family = AF_NETLINK;
    bind(sock_fd, (struct sockaddr *)&src_addr, sizeof(src_addr));
​
    memset(&dest_addr, 0, sizeof(dest_addr));
    dest_addr.nl_family = AF_NETLINK;
​
    nlh = (struct nlmsghdr *)malloc(NLMSG_SPACE(1024));
    memset(nlh, 0, NLMSG_SPACE(1024));
    nlh->nlmsg_len = NLMSG_SPACE(1024);
    nlh->nlmsg_pid = getpid();
    nlh->nlmsg_flags = 0;
    strcpy(NLMSG_DATA(nlh), message);
​
    iov.iov_base = (void *)nlh;
    iov.iov_len = nlh->nlmsg_len;
    memset(&msg, 0, sizeof(msg));
    msg.msg_name = (void *)&dest_addr;
    msg.msg_namelen = sizeof(dest_addr);
    msg.msg_iov = &iov;
    msg.msg_iovlen = 1;
​
    printf("Sending message to kernel: %s\n", message);
    sendmsg(sock_fd, &msg, 0);
​
    // Read message from kernel
    recvmsg(sock_fd, &msg, 0);
    printf("Received message from kernel: %s\n", (char *)NLMSG_DATA(nlh));
    close(sock_fd);
    return 0;
}

Explanation:

  • Creates a Netlink socket to communicate with the kernel module.
  • Sends a message to the kernel and waits for a response.

4. Implementing NAT Functionality

In the kernel module, you can manipulate packets to perform NAT:

#include <linux/netfilter_ipv4.h>// Inside your hook function
#include <linux/netfilter.h>
#include <linux/ip.h>
#include <linux/tcp.h>unsigned int nat_hook(void *priv,
                      struct sk_buff *skb,
                      const struct nf_hook_state *state)
{
    struct iphdr *iph;
    struct tcphdr *tcph;
​
    if (!skb)
        return NF_ACCEPT;
​
    iph = ip_hdr(skb);
    if (iph->protocol == IPPROTO_TCP) {
        tcph = tcp_hdr(skb);
​
        // Example: Change source IP address
        if (iph->saddr == htonl(0xC0A80002)) { // If source IP is 192.168.0.2
            iph->saddr = htonl(0x0A000001);    // Change to 10.0.0.1
            // Recalculate checksums
            iph->check = 0;
            iph->check = ip_fast_csum(iph, iph->ihl);
            skb->ip_summed = CHECKSUM_UNNECESSARY;
            printk(KERN_INFO "NAT applied to packet\n");
        }
    }
    return NF_ACCEPT;
}

Explanation:

  • nat_hook: Modifies the source IP address of certain packets to perform NAT.
  • Recalculates IP checksum after modification.
  • Adjusts packet settings as necessary.

NachOS Operating System Enhancement

Overview

NachOS is an instructional operating system used for teaching. Below are code examples implementing system calls, scheduling algorithms, synchronization primitives, and memory management enhancements.

1. Implementing a System Call (e.g., Read and Write)

In userprog/exception.cc:

void ExceptionHandler(ExceptionType which) {
    int type = machine->ReadRegister(2);
​
    if (which == SyscallException) {
        switch(type) {
            case SC_Read:
                {
                    int bufferAddr = machine->ReadRegister(4);
                    int size = machine->ReadRegister(5);
                    OpenFileId id = machine->ReadRegister(6);
                    int numRead = SynchConsoleRead(bufferAddr, size, id);
                    machine->WriteRegister(2, numRead);
                    IncrementPC();
                    break;
                }
            case SC_Write:
                {
                    int bufferAddr = machine->ReadRegister(4);
                    int size = machine->ReadRegister(5);
                    OpenFileId id = machine->ReadRegister(6);
                    SynchConsoleWrite(bufferAddr, size, id);
                    IncrementPC();
                    break;
                }
            // Handle other system calls...
            default:
                printf("Unexpected syscall %d\n", type);
                ASSERT(false);
        }
    }
    // Handle other exceptions...
}

Explanation:

  • ExceptionHandler handles exceptions and system calls.
  • Reads arguments from registers and calls appropriate functions.
  • After handling, increments the PC to avoid repeating the syscall.

2. Implementing FCFS and Priority Scheduling

In threads/scheduler.h:

class Scheduler {
  public:
    Scheduler();          // Initialize list of ready threads
    ~Scheduler();         // De-allocate ready list
​
    void ReadyToRun(Thread* thread);  // Thread can be dispatched.
    Thread* FindNextToRun();          // Dequeue first thread on the ready list
    void Run(Thread* nextThread);     // Cause nextThread to start running
    void Print();                     // Print contents of ready list
​
  private:
    // For FCFS scheduling, use a simple list
    List<Thread *> *readyList;
};

In threads/scheduler.cc:

Thread* Scheduler::FindNextToRun()
{
    return readyList->RemoveFront();
}

For Priority Scheduling:

  • Modify Thread class to include a priority attribute.
  • Use a priority queue instead of a simple list.

In threads/thread.h:

class Thread {
  public:
    int getPriority() const { return priority; }
    void setPriority(int p) { priority = p; }
    // ...
​
  private:
    int priority;
};

In threads/scheduler.cc:

void Scheduler::ReadyToRun(Thread* thread)
{
    readyList->InsertSorted(thread, ThreadPriorityComp);
}
​
Thread* Scheduler::FindNextToRun()
{
    return readyList->RemoveFront();
}
​
// Comparison function for sorting
int ThreadPriorityComp(Thread* a, Thread* b)
{
    return b->getPriority() - a->getPriority(); // Higher priority first
}

3. Synchronization Primitives (Semaphore, Lock, Condition Variable)

Semaphore Implementation:

In threads/synch.h:

class Semaphore {
  public:
    Semaphore(const char* debugName, int initialValue);
    ~Semaphore();
​
    void P(); // Wait (decrement)
    void V(); // Signal (increment)
​
  private:
    const char* name;
    int value;
    List<Thread *> *queue;
};

In threads/synch.cc:

Semaphore::Semaphore(const char* debugName, int initialValue)
{
    name = debugName;
    value = initialValue;
    queue = new List<Thread *>;
}
​
void Semaphore::P()
{
    IntStatus oldLevel = interrupt->SetLevel(IntOff); // Disable interrupts
    value--;
    if (value < 0) {
        queue->Append(currentThread);
        currentThread->Sleep();
    }
    (void) interrupt->SetLevel(oldLevel); // Re-enable interrupts
}
​
void Semaphore::V()
{
    IntStatus oldLevel = interrupt->SetLevel(IntOff);
    value++;
    if (value <= 0) {
        Thread* thread = queue->RemoveFront();
        scheduler->ReadyToRun(thread);
    }
    (void) interrupt->SetLevel(oldLevel);
}

Explanation:

  • P() operation decrements the semaphore; if the value is less than zero, the thread blocks.
  • V() operation increments the semaphore; if there are waiting threads, it wakes one up.
  • Interrupts are disabled during these operations to prevent race conditions.

4. Memory Management with TLB and Page Replacement Algorithms

Implementing TLB Management in machine/mipssim.cc:

void Machine::Translate(unsigned int virtAddr, unsigned int* physAddr, int size, bool writing)
{
    int vpn = (unsigned) virtAddr / PageSize;
    int offset = (unsigned) virtAddr % PageSize;
​
    TranslationEntry* entry = NULL;
​
    // First, check TLB
    for (int i = 0; i < TLBSize; i++) {
        if (tlb[i].valid && tlb[i].virtualPage == vpn) {
            entry = &tlb[i]; // TLB hit
            break;
        }
    }
​
    if (entry == NULL) {
        // TLB miss, handle page fault
        RaiseException(PageFaultException, virtAddr);
        return;
    }
​
    if (entry->readOnly && writing)
        RaiseException(ReadOnlyException, virtAddr);
​
    *physAddr = entry->physicalPage * PageSize + offset;
    ASSERT(*physAddr >= 0 && ((*physAddr + size) <= MemorySize));
}

Implementing Page Replacement Algorithms (e.g., LRU, CLOCK):

  • Maintain data structures to keep track of page usage (e.g., a counter or a clock hand).
  • On a page fault, select the page to replace based on the algorithm.

Example for LRU:

// In page table entry
class TranslationEntry {
  public:
    // ...
    int lastUsedTime;
    // ...
};
​
// Update lastUsedTime on each access
void UpdateLastUsedTime(TranslationEntry* entry)
{
    entry->lastUsedTime = stats->totalTicks;
}
​
// On page fault, select the page with the oldest lastUsedTime
TranslationEntry* SelectLRUPage()
{
    TranslationEntry* victim = NULL;
    int oldestTime = stats->totalTicks;
​
    for (int i = 0; i < NumPhysPages; i++) {
        if (machine->pageTable[i].valid && machine->pageTable[i].lastUsedTime < oldestTime) {
            oldestTime = machine->pageTable[i].lastUsedTime;
            victim = &machine->pageTable[i];
        }
    }
    return victim;
}

Explanation:

  • Each page table entry keeps track of lastUsedTime.
  • On each memory access, lastUsedTime is updated.
  • When selecting a page to replace, choose the one with the oldest lastUsedTime.

Note: Implementing these algorithms requires careful synchronization and consideration of the simulated environment in NachOS.


These code snippets provide a foundation for the functionalities described in your projects. They can be expanded and adapted according to the specific requirements and features you aim to implement.

Disclaimer: The provided code is for educational purposes and may need adjustments to fit into your specific project structure.

Ensure you handle all necessary error checking and edge cases in your actual implementation.


1. 计算机体系结构概述

计算机体系结构是研究计算机系统的结构、组织和实现方式的学科。它主要关注以下几个方面:

  • 指令集架构(ISA) :包括指令集、数据类型、寄存器、寻址模式等,是软件与硬件之间的接口。
  • 微体系结构:实现指令集架构的硬件设计,包括处理器的流水线、缓存、分支预测等。
  • 系统架构:涉及整个计算系统的设计,如多核心、多处理器系统、存储层次结构等。

2. 指令集模拟器示例

为了深入理解计算机体系结构,可以尝试编写一个简单的指令集模拟器。以下是一个模拟简单指令执行的示例代码。

示例:模拟一个简单的RISC指令集

#include <stdio.h>
#include <stdint.h>
#include <stdbool.h>#define MEM_SIZE 256   // 内存大小
#define REG_COUNT 8    // 寄存器数量
​
// 定义操作码
typedef enum {
    NOP = 0x00, // 空操作
    ADD = 0x01, // 加法
    SUB = 0x02, // 减法
    LOAD = 0x03, // 加载
    STORE = 0x04, // 存储
    JMP = 0x05, // 跳转
    JMPZ = 0x06, // 0跳转
    HALT = 0xFF // 停机
} OpCode;
​
// 定义指令格式
typedef struct {
    OpCode op;
    uint8_t rd; // 目的寄存器
    uint8_t rs; // 源寄存器1
    uint8_t rt; // 源寄存器2或立即数
} Instruction;
​
// 模拟器状态
typedef struct {
    uint8_t memory[MEM_SIZE]; // 内存
    uint8_t registers[REG_COUNT]; // 寄存器
    uint16_t pc; // 程序计数器
    bool running; // 是否运行
} CPUState;
​
// 取指令
Instruction fetch(CPUState *state) {
    Instruction inst;
    uint8_t *mem = state->memory;
    inst.op = mem[state->pc++];
    inst.rd = mem[state->pc++];
    inst.rs = mem[state->pc++];
    inst.rt = mem[state->pc++];
    return inst;
}
​
// 执行指令
void execute(CPUState *state, Instruction inst) {
    switch (inst.op) {
        case NOP:
            // 什么也不做
            break;
        case ADD:
            state->registers[inst.rd] = state->registers[inst.rs] + state->registers[inst.rt];
            break;
        case SUB:
            state->registers[inst.rd] = state->registers[inst.rs] - state->registers[inst.rt];
            break;
        case LOAD:
            state->registers[inst.rd] = state->memory[state->registers[inst.rs] + inst.rt];
            break;
        case STORE:
            state->memory[state->registers[inst.rs] + inst.rt] = state->registers[inst.rd];
            break;
        case JMP:
            state->pc = inst.rd | (inst.rs << 8);
            break;
        case JMPZ:
            if (state->registers[inst.rd] == 0) {
                state->pc = inst.rs | (inst.rt << 8);
            }
            break;
        case HALT:
            state->running = false;
            break;
        default:
            printf("未知操作码: %02X\n", inst.op);
            state->running = false;
            break;
    }
}
​
// 模拟器主循环
void run(CPUState *state) {
    state->running = true;
    while (state->running) {
        Instruction inst = fetch(state);
        execute(state, inst);
    }
}
​
int main() {
    CPUState cpu = {0};
    cpu.pc = 0;
​
    // 加载程序到内存(简单的加法运算)
    uint8_t program[] = {
        LOAD, 0, 0, 10,    // 将内存地址10的值加载到寄存器0
        LOAD, 1, 0, 11,    // 将内存地址11的值加载到寄存器1
        ADD, 2, 0, 1,      // 寄存器0 + 寄存器1,结果存入寄存器2
        STORE, 2, 0, 12,   // 将寄存器2的值存储到内存地址12
        HALT, 0, 0, 0      // 停机
    };
    // 初始化内存数据
    cpu.memory[10] = 5;    // 内存地址10的值为5
    cpu.memory[11] = 10;   // 内存地址11的值为10
​
    // 加载程序
    for (int i = 0; i < sizeof(program); i++) {
        cpu.memory[i] = program[i];
    }
​
    // 运行模拟器
    run(&cpu);
​
    // 输出结果
    printf("Result at memory[12]: %d\n", cpu.memory[12]);
    return 0;
}

运行结果:

Result at memory[12]: 15

解释:

  • 该程序模拟了一个简单的CPU执行加法指令的过程。
  • 指令集包括基本的算术和内存操作,以及控制流指令。
  • 程序将两个数(5和10)从内存加载到寄存器,进行加法运算后,结果存回内存。

3. 流水线处理器模拟

流水线概念

在计算机体系结构中,流水线(Pipeline)技术被用来提高处理器的指令吞吐量。流水线将指令执行过程划分为不同的阶段,每个阶段可以与其他阶段并行工作。

示例:简易流水线模拟

以下是一个简单的五阶段流水线模拟,阶段包括取指(IF)、译码(ID)、执行(EX)、访存(MEM)、写回(WB)。

# Filename: pipeline_simulator.pyclass Instruction:
    def __init__(self, name, rd=None, rs=None, rt=None):
        self.name = name
        self.rd = rd
        self.rs = rs
        self.rt = rt
​
class PipelineStage:
    def __init__(self, name):
        self.name = name
        self.instruction = Noneclass PipelineSimulator:
    def __init__(self, instructions):
        self.instructions = instructions
        self.registers = [0] * 8
        self.cycles = 0
        # 初始化五个流水线阶段
        self.IF = PipelineStage("IF")
        self.ID = PipelineStage("ID")
        self.EX = PipelineStage("EX")
        self.MEM = PipelineStage("MEM")
        self.WB = PipelineStage("WB")
​
    def step(self):
        # 写回阶段
        if self.WB.instruction:
            inst = self.WB.instruction
            if inst.name == 'ADD':
                self.registers[inst.rd] = self.registers[inst.rs] + self.registers[inst.rt]
            print(f"WB: 写回 {inst.name} 到寄存器 {inst.rd}")
​
        # 阶段移动
        self.WB.instruction = self.MEM.instruction
        self.MEM.instruction = self.EX.instruction
        self.EX.instruction = self.ID.instruction
        self.ID.instruction = self.IF.instruction
​
        # 取指阶段
        if self.instructions:
            self.IF.instruction = self.instructions.pop(0)
            print(f"IF: 取指 {self.IF.instruction.name}")
        else:
            self.IF.instruction = None
​
        self.cycles += 1
​
    def run(self):
        while any([self.IF.instruction, self.ID.instruction, self.EX.instruction, self.MEM.instruction, self.WB.instruction]) or self.instructions:
            self.step()
            print(f"Cycle {self.cycles} completed.\n")
​
# 示例指令序列
instructions = [
    Instruction('ADD', rd=0, rs=1, rt=2),
    Instruction('ADD', rd=3, rs=0, rt=4),
    Instruction('ADD', rd=5, rs=3, rt=6),
]
​
simulator = PipelineSimulator(instructions)
simulator.run()

解释:

  • 模拟了简单的流水线,各个指令在流水线中按阶段推进。
  • 这里只展示了基本的流水线过程,没有考虑数据冒险和控制冒险等问题。

4. 缓存模拟

缓存是计算机体系结构中重要的部分,用于弥合CPU与主存之间的速度差异。以下是一个直接映射缓存的简单模拟。

示例:直接映射缓存

# Filename: cache_simulator.pyclass DirectMappedCache:
    def __init__(self, cache_size, block_size, memory_size):
        self.cache_size = cache_size
        self.block_size = block_size
        self.num_blocks = cache_size // block_size
        self.cache = [None] * self.num_blocks
        self.memory = [i for i in range(memory_size)]
​
    def access(self, address):
        block_number = address // self.block_size
        index = block_number % self.num_blocks
        tag = block_number // self.num_blocks
​
        if self.cache[index] == tag:
            print(f"Cache Hit at address {address}")
            return True
        else:
            print(f"Cache Miss at address {address}")
            self.cache[index] = tag
            return False# 示例
cache = DirectMappedCache(cache_size=32, block_size=8, memory_size=256)
​
addresses = [0, 8, 16, 24, 32, 0, 8, 64, 72, 80]
​
for addr in addresses:
    cache.access(addr)

解释:

  • 直接映射缓存将内存地址映射到缓存中的固定位置。
  • 当访问的内存块不在缓存中时,会发生缓存未命中(Miss),需要将块加载到缓存中。

5. 多线程与同步

在多核处理器中,多线程和同步机制至关重要。以下是使用锁和条件变量实现线程同步的示例。

示例:生产者-消费者问题

import threading
import time
import random
​
buffer = []
buffer_size = 5
​
empty = threading.Semaphore(buffer_size)
full = threading.Semaphore(0)
mutex = threading.Lock()
​
def producer():
    while True:
        item = random.randint(1, 100)
        empty.acquire()
        mutex.acquire()
        buffer.append(item)
        print(f"Produced: {item}")
        mutex.release()
        full.release()
        time.sleep(random.random())
​
def consumer():
    while True:
        full.acquire()
        mutex.acquire()
        item = buffer.pop(0)
        print(f"Consumed: {item}")
        mutex.release()
        empty.release()
        time.sleep(random.random())
​
prod_thread = threading.Thread(target=producer)
cons_thread = threading.Thread(target=consumer)
​
prod_thread.start()
cons_thread.start()

解释:

  • 使用信号量emptyfull控制缓冲区的可用空间。
  • 使用mutex保护对缓冲区的访问,防止数据竞争。

6. 现代体系结构特性

分支预测

为了提高流水线的效率,现代处理器使用分支预测来猜测程序的执行路径。

示例:两位饱和计数器分支预测器

class BranchPredictor:
    def __init__(self):
        # 初始化计数器(00-强不跳转,01-弱不跳转,10-弱跳转,11-强跳转)
        self.counter = 2  # 初始为弱跳转
​
    def predict(self):
        return self.counter >=2  # 计数器值大于等于2时预测跳转
​
    def update(self, taken):
        if taken:
            if self.counter < 3:
                self.counter += 1
        else:
            if self.counter > 0:
                self.counter -= 1# 示例使用
bp = BranchPredictor()
branches = [True, True, False, True, False, False, True]
​
for idx, taken in enumerate(branches):
    prediction = bp.predict()
    print(f"Branch {idx}: Prediction: {'Taken' if prediction else 'Not Taken'}, Actual: {'Taken' if taken else 'Not Taken'}")
    bp.update(taken)

解释:

  • 通过两位饱和计数器预测分支是否会被采取。
  • 分支预测器根据实际的分支结果更新其内部状态,以提高预测准确性。