Node 基础篇--http server

143 阅读2分钟

一、http 服务

1.  http 与https

const http = require('http');  // 创建本地服务器来从其接收数据
const server = http.createServer((req, res) => {  
    res.writeHead(200, { 'Content-Type': 'application/json' });   
    res.end(JSON.stringify({data: 'Hello World!'})); 
});  
server.listen(8000);

server.listen(port, [host], errorHandler)

通常来说,在容器中,我们不会指定host。这可以监听不同网段的请求

const https = require('node:https');
const fs = require('node:fs');

const options = {
  key: fs.readFileSync('test/fixtures/keys/agent2-key.pem'),
  cert: fs.readFileSync('test/fixtures/keys/agent2-cert.pem'),
};

https.createServer(options, (req, res) => {
  res.writeHead(200);
  res.end('hello world\n');
}).listen(8000);

2. http 请求

const http = require('http');
// http.get(options,cb) or http.get(url, options,cb)
// http.request(options, [cb]) or http.request(url, [options],[cb])

const req = http.request({
    host: 'xxx',
    path: '/aaa?bb=1',
    method: 'POST,
    headers: {},
}, res => {
    console.log(`STATUS: ${res.statusCode}`);   
    console.log(`HEADERS: ${JSON.stringify(res.headers)}`);          
    res.setEncoding('utf8');  
    res.on('data', (chunk) => {     
        console.log(`BODY: ${chunk}`);   
    });   
    res.on('end', () => {     
        console.log('No more data in response.');   
    });
})

req.wirte(data)
req.end() // 发起请求

注意,如果用http.get 它会自动调用req.end 方法

二、koa原理

1.  基本使用

const Koa = require('koa')
const app = new Koa();
app.use(async (ctx, next) => {
    try {
       await next()
    } catch(e) {
       report(e)
       ctx.body = {code: -1, error_msg: 'xxxx'}
    }
})
app.use(koaCompress(opt)) // 用于返回设置gzip流
app.use(koabody(opt)) // JSON数据解读; 文件上传; 设置ctx.body, ctx.query
app.use(kcors(opt)) // 跨域头配置
app.use(router.routes()) // koa-router配置,router中采用路由守卫进行权限校验
app.use(router.allowMethods()) // 设置allowMethod-options配置

app.listern(port, err => {
    report(err)
})

process.on('uncaughtException', (err) => {
    report(err)
});

2.  原理解读

● application.js

let http = require('http'); 
let context = require('./context'); 
let request = require('./request'); 
let response = require('./response');  

class Application {        
     constructor() {                  
         this.middleware = [];
         // 定义原型,也是提供原型扩展入口
         this.context = Object.create(require('./context.js'));
         this.request = Object.create(require('./request.js'));
         this.response = Object.create(require('./respone.js'));
       }     
      
      use(fn) {         
           this.middleware.push(fn);     
       }
       
      callback() {
           const fn = compose(this.middleware)  // compose参考中间件实现 
           return (req, res) => { 
               // 中间件; 每次请求过来,new 一个ctx对象
               const ctx = this.createContext(req, res);         
               return this.handleRequest(ctx, fn);
            };    
      }

      createContext(req, res) { 
          //根据原型实例对象
          let ctx = Object.create(this.context);
          ctx.request = Object.create(this.request);    
          ctx.response = Object.create(this.response);    
          ctx.req = ctx.request.req = req;    
          ctx.res = ctx.response.res = res; 
          return ctx; 
      }
      
      handleRequest(ctx, fn) {
          // 注意
          fn(ctx).then(response).catch(ctx.onerror)
      };

     listen(port) {                
          let server = http.createServer(this.callback());            
          server.listen(port);    
     } 
 } 

module.exports = Application;

● request.js、reponse.js、context.js

三者都是原型定义, 里面重点通过delegate对原生node req和res的方法属性代理

由于是原型定义, 我们可以通过app.context 、app.request、 app.response 去设置方法,属性, 扩展ctx, ctx.req, ctx.res 的原型

另外koa在context 原型定义中, 使用delegates对node原生request和reponse对象进行委托

当你访问 ctx.attachment时,是在访问 ctx.repsonse.attachment 而这种冒泡代理 实现原理如下:

let proto = {};  function delegateGet(property, name) {
    proto.__defineGetter__(name, function () {
        return this[property][name];
    }); 
}

function delegateSet(property, name) {
   proto.__defineSetter__(name, function (val) {
       this[property][name] = val; 
   }); 
}  

// 所以
delegate(contextPrototype, 'response')
    .method('attachment')// 就是
contextPrototype.__defineGetter__.attachment = contextPrototype.response.attachment

3.  中间件实现 -- 洋葱模型

use 将中间件函数添加进数组里

handleResuqest时, 从数组第一个函数才是调用,进入第二,直到最后一个函数执行完,一层层退出 (洋葱)

那如何实现compose 函数呢? fn = compose(fnArr)

function compose (middlewareArr) {
  return function (context: ApplicationContext) {
    return dispatch(0)    function dispatch (i) {
      let middlewareFn = middlewareArr[i]
      if (i === middlewareArr.length) return Promise.resolve()
      try {
        return Promise.resolve(
            middlewareFn(context, dispatch.bind(null, i + 1))
        );
      } catch (err) {
        return Promise.reject(err)
      }
    }
  }
}

三、node的进程管理

1.  子进程spawn、exec、 fork 区别

node 的 child_process模块中的spawn、exec、 fork 都支持我们创建子进程,这三种方法都会返回 child process instance, 不过三种有细致的区别 spawn:基于流处理, 比较适合需要会产生大量标准输出的子进程, 在创建过程可以通过options的studio 对子进程的标准输出和标准错误输出进行不同方式处理,常见有pipe(默认), ignore、inherit、ipc(若子进程是node进程,那么此时等于fork, 可以通过process.send发消息, 接受message信息) 和其他数字值 exec: 基于缓冲区, 默认用utf-8编码, 可以设置maxBuffer的大小,用于处理子进程的标准输出or错误输出,这样意味着不太适合会产生大量标准输出的子进程 fork: child_process.spawn()专门用于生成新的 Node.js 进程, 会在子进程和主进程建立ipc通道,用于子进程和主进程的通信;

官网demo

const { spawn } = require('node:child_process');
const ls = spawn('ls', ['-lh', '/usr']);

ls.stdout.on('data', (data) => {
  console.log(`stdout: ${data}`);
});

ls.stderr.on('data', (data) => {
  console.error(`stderr: ${data}`);
});

ls.on('close', (code) => {
  console.log(`child process exited with code ${code}`);
});
const { exec } = require('node:child_process');
exec('cat *.js missing_file | wc -l', (error, stdout, stderr) => {
  if (error) {
    console.error(`exec error: ${error}`);
    return;
  }
  console.log(`stdout: ${stdout}`);
  console.error(`stderr: ${stderr}`);
});
if (process.argv[2] === 'child') {
  setTimeout(() => {
    console.log(`Hello from ${process.argv[2]}!`);
  }, 1_000);
} else {
  const { fork } = require('node:child_process');
  const controller = new AbortController();
  const { signal } = controller;
  const child = fork(__filename, ['child'], { signal });
  child.on('error', (err) => {
    // This will be called with err being an AbortError if the controller aborts
  });
  controller.abort(); // Stops the child process
}

2、  spawnSync 、execSync 同步子进程

span和exec创建了异步子进程,不影响主进程的事件循环 spanSync 和execSync 创建同步子进程,阻塞主进程事件循环

3、cluser 模块

import cluster from 'node:cluster';
import http from 'node:http';
import { availableParallelism } from 'node:os';
import process from 'node:process';

const numCPUs = availableParallelism();

if (cluster.isPrimary) {
  console.log(`Primary ${process.pid} is running`);

  // Fork workers.
  for (let i = 0; i < numCPUs; i++) {
    cluster.fork();
  }

  cluster.on('exit', (worker, code, signal) => {
    console.log(`worker ${worker.process.pid} died`);
    console.log('Starting a new worker'); 
    cluster.fork();
  });
} else {
  http.createServer((req, res) => {
    res.writeHead(200);
    res.end('hello world\n');
  }).listen(8000);

  console.log(`Worker ${process.pid} started`);
}

通常我们用使用这个模块做node server 的进程管理,也是出于node 服务稳定性考虑。 使用 cluster 模块时,可以监听子进程的退出事件,再重新fork

四、  pm2原理

pm2 原理主要是利用cluser模块,对业务的index.js 进行worker管理,并且在自己主进程中常见websorket用于监听 pm2Client 的终端命令控制 pm2还有其他log模块,重现console.log , 写入对应日志分件,按日期区分

五、 stream流

stream 是一种抽象接口, 是否for 处理流式数据, 在 nodejs中,有4中类型:

● Writable: streams to which data can be written (for example, fs.createWriteStream()).

● Readable: streams from which data can be read (for example, fs.createReadStream()).

● Duplex: streams that are both Readable and Writable (for example, net.Socket).

● Transform: Duplex streams that can modify or transform the data as it is written and read (for example, zlib.createDeflate()).

作为 stream 有常见方法:

stream.pipeline(), stream.finished(), stream.Readable.from() stream.addAbortSignal()

const { pipeline } = require('node:stream/promises');
const fs = require('node:fs');
const zlib = require('node:zlib');

async function run() {
  await pipeline(
    fs.createReadStream('archive.tar'),
    zlib.createGzip(),
    fs.createWriteStream('archive.tar.gz'),
  );
  console.log('Pipeline succeeded.');
}

run().catch(console.error);