ovn-northd 源码分析

2023-05-16

ovn-northd是ovn中的核心后台进程,主要负责将ovn的高层配置转换成供ovn-controller后台进程使用的逻辑配置,更详细的说就是它将ovn northbound数据库中传统意义上的逻辑网络配置转换成ovn southbound数据库中的逻辑通路中的流表。

有如下两个选项用来指定如何连接到northbound和southbound数据库。

--ovnnb-db=database
  用来指定northbound数据库,默认路径为unix:/usr/local/var/run/ovn/ovnnb_db.sock
--ovnsb-db=database
  用来指定northbound数据库,默认路径为unix:/usr/local/var/run/ovn/ovnsb_db.sock

下面开始分析源码

int
main(int argc, char *argv[])
    //初始化到nbdb的连接
    //ovsdb_idl_create第一个参数ovnnb_db 指定连接nbdb数据库的路径
    //ovsdb_idl_create第二个参数nbrec_idl_class指定nbdb数据库的schema
    //ovsdb_idl_create第三个参数(monitor_everything_by_default)为true表示要默认监听数据库的所有内容
    /* We want to detect (almost) all changes to the ovn-nb db. */
    struct ovsdb_idl_loop ovnnb_idl_loop = OVSDB_IDL_LOOP_INITIALIZER(
        ovsdb_idl_create(ovnnb_db, &nbrec_idl_class, true, true));
    //可以选择忽略掉的字段
    ovsdb_idl_omit_alert(ovnnb_idl_loop.idl,
                         &nbrec_nb_global_col_nb_cfg_timestamp);
    ovsdb_idl_omit_alert(ovnnb_idl_loop.idl, &nbrec_nb_global_col_sb_cfg);
    ovsdb_idl_omit_alert(ovnnb_idl_loop.idl,
                         &nbrec_nb_global_col_sb_cfg_timestamp);
    ovsdb_idl_omit_alert(ovnnb_idl_loop.idl, &nbrec_nb_global_col_hv_cfg);
    ovsdb_idl_omit_alert(ovnnb_idl_loop.idl,
                         &nbrec_nb_global_col_hv_cfg_timestamp);

    //初始化到sbdb的连接
    //ovsdb_idl_create第一个参数ovnsb_db 指定连接sbdb数据库的路径
    //ovsdb_idl_create第二个参数sbrec_idl_class指定sbdb数据库的schema
    //ovsdb_idl_create第三个参数(monitor_everything_by_default)为false表示要默认不监听数据库的内容
    /* We want to detect only selected changes to the ovn-sb db. */
    struct ovsdb_idl_loop ovnsb_idl_loop = OVSDB_IDL_LOOP_INITIALIZER(
        ovsdb_idl_create(ovnsb_db, &sbrec_idl_class, false, true));
    //选择性的添加感兴趣的字段
    //监听global表中的部分字段
    ovsdb_idl_add_table(ovnsb_idl_loop.idl, &sbrec_table_sb_global);
    add_column_noalert(ovnsb_idl_loop.idl, &sbrec_sb_global_col_nb_cfg);
    add_column_noalert(ovnsb_idl_loop.idl, &sbrec_sb_global_col_options);
    add_column_noalert(ovnsb_idl_loop.idl, &sbrec_sb_global_col_ipsec);
    ...
    //监听logical_flow表中的部分字段
    ovsdb_idl_add_table(ovnsb_idl_loop.idl, &sbrec_table_logical_flow);
    add_column_noalert(ovnsb_idl_loop.idl,
                       &sbrec_logical_flow_col_logical_datapath);
    add_column_noalert(ovnsb_idl_loop.idl,
                       &sbrec_logical_flow_col_logical_dp_group);
    add_column_noalert(ovnsb_idl_loop.idl, &sbrec_logical_flow_col_pipeline);
    add_column_noalert(ovnsb_idl_loop.idl, &sbrec_logical_flow_col_table_id);
    add_column_noalert(ovnsb_idl_loop.idl, &sbrec_logical_flow_col_priority);
    add_column_noalert(ovnsb_idl_loop.idl, &sbrec_logical_flow_col_match);
    add_column_noalert(ovnsb_idl_loop.idl, &sbrec_logical_flow_col_actions);
    ...
    //监听port_binding表中的部分字段
    ovsdb_idl_add_table(ovnsb_idl_loop.idl, &sbrec_table_port_binding);
    add_column_noalert(ovnsb_idl_loop.idl, &sbrec_port_binding_col_datapath);
    add_column_noalert(ovnsb_idl_loop.idl,
                       &sbrec_port_binding_col_logical_port);
    add_column_noalert(ovnsb_idl_loop.idl,
                       &sbrec_port_binding_col_tunnel_key);
    add_column_noalert(ovnsb_idl_loop.idl,
                       &sbrec_port_binding_col_parent_port);
    ...
    //监听fdb表中的部分字段
    ovsdb_idl_add_table(ovnsb_idl_loop.idl, &sbrec_table_fdb);
    add_column_noalert(ovnsb_idl_loop.idl, &sbrec_fdb_col_mac);
    add_column_noalert(ovnsb_idl_loop.idl, &sbrec_fdb_col_dp_key);
    add_column_noalert(ovnsb_idl_loop.idl, &sbrec_fdb_col_port_key);

    while (!exiting) {
        //根据db内容进行更新
        ovn_db_run(&ctx, sbrec_chassis_by_name, &ovnsb_idl_loop,ovn_internal_version);
            //根据northbound数据库更新southbound数据库,详细分析见下面的1
            ovnnb_db_run(ctx, sbrec_chassis_by_name, ovnsb_idl_loop, &datapaths, &ports, &lr_list, start_time, ovn_internal_version);
            //根据southbound数据库更新northbound数据库,详细分析见下面的2
            ovnsb_db_run(ctx, ovnsb_idl_loop, &ports, start_time);
    }
  1. ovnnb_db_run
static void
ovnnb_db_run(struct northd_context *ctx,
             struct ovsdb_idl_index *sbrec_chassis_by_name,
             struct ovsdb_idl_loop *sb_loop,
             struct hmap *datapaths, struct hmap *ports,
             struct ovs_list *lr_list,
             int64_t loop_start_time,
             const char *ovn_internal_version)
    //将选项 mac_prefix 设置到全局变量 mac_prefix 中,如果选项 mac_prefix 为全0,则随机生成
    const char *mac_addr_prefix = set_mac_prefix(smap_get(&nb->options, "mac_prefix"));

    //svc_monitor_mac 地址的作用,主要是给 sbdb 的 Service_Monitor 用
    /* MAC allocated for service monitor usage. Just one mac is allocated
     * for this purpose and ovn-controller's on each chassis will make use
     * of this mac when sending out the packets to monitor the services
     * defined in Service_Monitor Southbound table. Since these packets
     * all locally handled, having just one mac is good enough. */
    static char svc_monitor_mac[ETH_ADDR_STRLEN + 1];
    static struct eth_addr svc_monitor_mac_ea;

    const struct nbrec_nb_global *nb = nbrec_nb_global_first(ctx->ovnnb_idl);
    //如果 nbdb 的 global table 设置了 svc_monitor_mac,则使用设置的值
    const char *monitor_mac = smap_get(&nb->options, "svc_monitor_mac");
    if (monitor_mac) {
        if (eth_addr_from_string(monitor_mac, &svc_monitor_mac_ea)) {
            snprintf(svc_monitor_mac, sizeof svc_monitor_mac,
                     ETH_ADDR_FMT, ETH_ADDR_ARGS(svc_monitor_mac_ea));
        } else {
            monitor_mac = NULL;
        }
    }

    struct smap options;
    smap_clone(&options, &nb->options);
    //保存 mac_prefix 到 options 中
    smap_add(&options, "mac_prefix", mac_addr_prefix);

    //如果 nbdb 的 global table 没有设置 svc_monitor_mac,则随机生成一个
    if (!monitor_mac) {
        eth_addr_random(&svc_monitor_mac_ea);
        snprintf(svc_monitor_mac, sizeof svc_monitor_mac,
                 ETH_ADDR_FMT, ETH_ADDR_ARGS(svc_monitor_mac_ea));
        //保存 svc_monitor_mac 到 options 中
        smap_replace(&options, "svc_monitor_mac", svc_monitor_mac);
    }

    char *max_tunid = xasprintf("%d", get_ovn_max_dp_key_local(ctx));
    //保存 max_tunid 到 options 中
    smap_replace(&options, "max_tunid", max_tunid);
    free(max_tunid);
    //保存 northd_internal_version 到 options 中
    smap_replace(&options, "northd_internal_version", ovn_internal_version);

    nbrec_nb_global_verify_options(nb);
    //最后将options更新到 global 表中
    nbrec_nb_global_set_options(nb, &options);
    
    //下面的函数build_xxx是ovn-northd的核心代码,操作类似,都是根据northbound中的table更新southbound的table,下面分析几个重点的流程
    
    //将 northbound 中最新的logical switch和router 更新到 southbound Datapath_Binding 表, 
    //同时为每个datapath分配结构struct ovn_datapath,并加入hmap datapaths。见下面的分析1.1
    build_datapaths(ctx, datapaths, lr_list);
    //根据nbdb logical_switch_port和logical_router_port表中的数据更新 sbdb 中的PORT_BINDING表,见下面的分析1.2
    build_ports(ctx, sbrec_chassis_by_name, datapaths, ports);
    build_ovn_lbs(ctx, datapaths, ports, &lbs);
    build_ipam(datapaths, ports);
    build_port_group_lswitches(ctx, &port_groups, ports);
    build_lrouter_groups(ports, lr_list);
    build_ip_mcast(ctx, datapaths);
    build_mcast_groups(ctx, datapaths, ports, &mcast_groups, &igmp_groups);
    build_meter_groups(ctx, &meter_groups);
    build_bfd_table(ctx, &bfd_connections, ports);
    //根据nbdb中的datapath,port及其他配置,生成sbdb中的logical_flow表,见下面的分析1.3
    build_lflows(ctx, datapaths, ports, &port_groups, &mcast_groups,&igmp_groups, &meter_groups, &lbs, &bfd_connections);

1.1 build_datapaths

//获取 nbdb 中所有的 datapath,每个 logical-switch 或者 logical-router 都认为是一个 datapath,
//并将这些 datapath 写到 sbdb 的 datapath_binding table 中,每行存放一个 datapath
build_datapaths(ctx, datapaths, lr_list);
    //读取 sbdb 的 datapath_binding table 的所有行,获取 logical-switch 和 logical-router
    //读取 nbdb 的 LOGICAL_SWITCH 和 LOGICAL_ROUTER table
    //只存在于sbdb的datapath,最终需要从 datapath_binding table 删除
    //只存在于nbdb的datapath,最终会插入 datapath_binding table
    //同时存在sbdb和nbdb中的datapath,可能需要更新 datapath_binding table
    join_datapaths(ctx, datapaths, &sb_only, &nb_only, &both, lr_list);

    /* Assign explicitly requested tunnel ids first. */
    struct hmap dp_tnlids = HMAP_INITIALIZER(&dp_tnlids);
    struct ovn_datapath *od, *next;
    LIST_FOR_EACH (od, list, &both) {
        ovn_datapath_assign_requested_tnl_id(&dp_tnlids, od);
    }
    LIST_FOR_EACH (od, list, &nb_only) {
        ovn_datapath_assign_requested_tnl_id(&dp_tnlids, od);
    }

    //保存已经存在的datapath中的所有的 tunnel_key 到 dp_tnlids
    /* Keep nonconflicting tunnel IDs that are already assigned. */
    LIST_FOR_EACH (od, list, &both) {
        if (!od->tunnel_key && ovn_add_tnlid(&dp_tnlids, od->sb->tunnel_key)) {
            od->tunnel_key = od->sb->tunnel_key;
        }
    }
    
    /* Assign new tunnel ids where needed. */
    uint32_t hint = 0;
    LIST_FOR_EACH_SAFE (od, next, list, &both) {
        ovn_datapath_allocate_key(ctx, datapaths, &dp_tnlids, od, &hint);
    }
    
    //为新的 datapath 分配 tunnel_key,从 1 开始递增
    LIST_FOR_EACH_SAFE (od, next, list, &nb_only) {
        ovn_datapath_allocate_key(ctx, datapaths, &dp_tnlids, od, &hint);
    }
    
    /* Sync tunnel ids from nb to sb. */
    LIST_FOR_EACH (od, list, &both) {
        if (od->sb->tunnel_key != od->tunnel_key) {
            sbrec_datapath_binding_set_tunnel_key(od->sb, od->tunnel_key);
        }
        ovn_datapath_update_external_ids(od);
    }
    //只存在于nbdb的datapath,最终会插入 datapath_binding table
    LIST_FOR_EACH (od, list, &nb_only) {
        od->sb = sbrec_datapath_binding_insert(ctx->ovnsb_txn);
        //更新 datapath_binding 的 external_ids
        ovn_datapath_update_external_ids(od);
        //更新 datapath_binding 的 tunnel_key
        sbrec_datapath_binding_set_tunnel_key(od->sb, od->tunnel_key);
    }
    ovn_destroy_tnlids(&dp_tnlids);

    //只存在于sbdb的datapath,最终需要从 datapath_binding table 删除
    /* Delete southbound records without northbound matches. */
    LIST_FOR_EACH_SAFE (od, next, list, &sb_only) {
        ovs_list_remove(&od->list);
        sbrec_datapath_binding_delete(od->sb);
        ovn_datapath_destroy(datapaths, od);
    }

1.2 build_ports

build_ports(ctx, sbrec_chassis_by_name, datapaths, ports);
    struct ovs_list sb_only, nb_only, both;
    struct hmap tag_alloc_table = HMAP_INITIALIZER(&tag_alloc_table);
    struct hmap chassis_qdisc_queues = HMAP_INITIALIZER(&chassis_qdisc_queues);

    /* sset which stores the set of ha chassis group names used. */
    struct sset active_ha_chassis_grps =
        SSET_INITIALIZER(&active_ha_chassis_grps);

    //读取 sbdb 的 PORT_BINDING table 的所有行
    //读取 datapaths 中所有的port,对于 logical switch datapath来说,就是读取 logical_switch_port 表,
    //对于 logical router datapath来说,就是读取 logical_router_port 表
    //将所有的port放入 ports
    //将只存在sbdb port_binding表中的放入 sb_only
    //将只存在nbdb logical_switch_port和logical_router_port表中的放入 nb_only
    //同时在 sbdb和nbdb的放入 both
    join_logical_ports(ctx, datapaths, ports, &chassis_qdisc_queues,
                       &tag_alloc_table, &sb_only, &nb_only, &both);

    //如果 sbdb PORT_BINDING table 中没有port,则需要将 MAC_BINDING 清空
    /* Purge stale Mac_Bindings if ports are deleted. */
    bool remove_mac_bindings = !ovs_list_is_empty(&sb_only);
    
    //如果有 requested-tnl-key 选项,则需要分配显示请求的 tunnel key,
    //如果成功,则将key保存到 op->tunnel_key。
    //需要为 both 和 nb_only 中的port分配/更新 tunnel_key,不需要为 sb_only 中的port分配/更新 tunnel_key,
    //因为 sb_only 中的port已经不再存在,需要删除的。
    /* Assign explicitly requested tunnel ids first. */
    struct ovn_port *op, *next;
    LIST_FOR_EACH (op, list, &both) {
        ovn_port_assign_requested_tnl_id(op);
    }
    LIST_FOR_EACH (op, list, &nb_only) {
        ovn_port_assign_requested_tnl_id(op);
    }

    /* Keep nonconflicting tunnel IDs that are already assigned. */
    LIST_FOR_EACH (op, list, &both) {
        if (!op->tunnel_key) {
            ovn_port_add_tnlid(op, op->sb->tunnel_key);
        }
    }

    //如果没有显示请求 tunnel key,需要自动分配一个
    /* Assign new tunnel ids where needed. */
    LIST_FOR_EACH_SAFE (op, next, list, &both) {
        ovn_port_allocate_key(ports, op);
    }
    LIST_FOR_EACH_SAFE (op, next, list, &nb_only) {
        ovn_port_allocate_key(ports, op);
    }

    //将 nbdb 中的port(包含lsp和lrp) 插入 sbdb 的 port_binding table,并更新相关字段
    /* Add southbound record for each unmatched northbound record. */
    LIST_FOR_EACH_SAFE (op, next, list, &nb_only) {
        op->sb = sbrec_port_binding_insert(ctx->ovnsb_txn);
        ovn_port_update_sbrec(ctx, sbrec_chassis_by_name, op,
                              &chassis_qdisc_queues,
                              &active_ha_chassis_grps);
        sbrec_port_binding_set_logical_port(op->sb, op->key);
    }

    //nbdb中不存在的port,sbdb中也不应该存在。所以将只存在sbdb中的port删除
    /* Delete southbound records without northbound matches. */
    if (!ovs_list_is_empty(&sb_only)) {
        LIST_FOR_EACH_SAFE (op, next, list, &sb_only) {
            ovs_list_remove(&op->list);
            sbrec_port_binding_delete(op->sb);
            ovn_port_destroy(ports, op);
        }
    }
    
    //将 MAC_BINDING 清空
    if (remove_mac_bindings) {
        cleanup_mac_bindings(ctx, datapaths, ports);
    }   

1.3 build_lflows
这里主要是调用 ovn_lflow_add 将流表信息暂时插入lflows中,再遍历lflows将流表插入sbdb的logical_flow表中。

先看一下ovn_lflow_add的实现,如下所示它只是个宏定义,又调用函数ovn_lflow_add_at,其参数为:
LFLOW_MAP: 用来保存流表信息
OD: 结构struct ovn_datapath,表示datapath
PRIORITY: 流表优先级
MATCH: 流表匹配域
ACTIONS: 流表的动作
STAGE: enum ovn_stage枚举类型,其组合了datapath,报文方向和table id。

#define ovn_lflow_add(LFLOW_MAP, OD, STAGE, PRIORITY, MATCH, ACTIONS) \
    ovn_lflow_add_at(LFLOW_MAP, OD, STAGE, PRIORITY, MATCH, ACTIONS, true, \
                     NULL, OVS_SOURCE_LOCATOR)

//存放流表的结构体
struct ovn_lflow {
    struct hmap_node hmap_node;

    struct ovn_datapath *od;     /* 'logical_datapath' in SB schema.  */
    struct hmapx od_group;       /* Hash map of 'struct ovn_datapath *'. */
    enum ovn_stage stage;
    uint16_t priority;
    char *match;
    char *actions;
    char *stage_hint;
    const char *where;
};

接下来重点看一下enum ovn_stage这个枚举类型,其定义如下,
包含datapath类型(logical switch或者logical router),匹配报文方向(ingress or egress)和table id(每个table实现不同功能)

/* A stage within an OVN logical switch or router.
 *
 * An "enum ovn_stage" indicates whether the stage is part of a logical switch
 * or router, whether the stage is part of the ingress or egress pipeline, and
 * the table within that pipeline.  The first three components are combined to
 * form the stage's full name, e.g. S_SWITCH_IN_PORT_SEC_L2,
 * S_ROUTER_OUT_DELIVERY. */
enum ovn_stage {
#define PIPELINE_STAGES                                                   \
    /* Logical switch ingress stages. */                                  \
    PIPELINE_STAGE(SWITCH, IN,  PORT_SEC_L2,    0, "ls_in_port_sec_l2")   \
    PIPELINE_STAGE(SWITCH, IN,  PORT_SEC_IP,    1, "ls_in_port_sec_ip")   \
    PIPELINE_STAGE(SWITCH, IN,  PORT_SEC_ND,    2, "ls_in_port_sec_nd")   \
    PIPELINE_STAGE(SWITCH, IN,  LOOKUP_FDB ,    3, "ls_in_lookup_fdb")    \
    PIPELINE_STAGE(SWITCH, IN,  PUT_FDB,        4, "ls_in_put_fdb")       \
    PIPELINE_STAGE(SWITCH, IN,  PRE_ACL,        5, "ls_in_pre_acl")       \
    PIPELINE_STAGE(SWITCH, IN,  PRE_LB,         6, "ls_in_pre_lb")        \
    PIPELINE_STAGE(SWITCH, IN,  PRE_STATEFUL,   7, "ls_in_pre_stateful")  \
    PIPELINE_STAGE(SWITCH, IN,  ACL_HINT,       8, "ls_in_acl_hint")      \
    PIPELINE_STAGE(SWITCH, IN,  ACL,            9, "ls_in_acl")           \
    PIPELINE_STAGE(SWITCH, IN,  QOS_MARK,      10, "ls_in_qos_mark")      \
    PIPELINE_STAGE(SWITCH, IN,  QOS_METER,     11, "ls_in_qos_meter")     \
    PIPELINE_STAGE(SWITCH, IN,  LB,            12, "ls_in_lb")            \
    PIPELINE_STAGE(SWITCH, IN,  STATEFUL,      13, "ls_in_stateful")      \
    PIPELINE_STAGE(SWITCH, IN,  PRE_HAIRPIN,   14, "ls_in_pre_hairpin")   \
    PIPELINE_STAGE(SWITCH, IN,  NAT_HAIRPIN,   15, "ls_in_nat_hairpin")   \
    PIPELINE_STAGE(SWITCH, IN,  HAIRPIN,       16, "ls_in_hairpin")       \
    PIPELINE_STAGE(SWITCH, IN,  ARP_ND_RSP,    17, "ls_in_arp_rsp")       \
    PIPELINE_STAGE(SWITCH, IN,  DHCP_OPTIONS,  18, "ls_in_dhcp_options")  \
    PIPELINE_STAGE(SWITCH, IN,  DHCP_RESPONSE, 19, "ls_in_dhcp_response") \
    PIPELINE_STAGE(SWITCH, IN,  DNS_LOOKUP,    20, "ls_in_dns_lookup")    \
    PIPELINE_STAGE(SWITCH, IN,  DNS_RESPONSE,  21, "ls_in_dns_response")  \
    PIPELINE_STAGE(SWITCH, IN,  EXTERNAL_PORT, 22, "ls_in_external_port") \
    PIPELINE_STAGE(SWITCH, IN,  L2_LKUP,       23, "ls_in_l2_lkup")       \
    PIPELINE_STAGE(SWITCH, IN,  L2_UNKNOWN,    24, "ls_in_l2_unknown")    \
                                                                          \
    /* Logical switch egress stages. */                                   \
    PIPELINE_STAGE(SWITCH, OUT, PRE_LB,       0, "ls_out_pre_lb")         \
    PIPELINE_STAGE(SWITCH, OUT, PRE_ACL,      1, "ls_out_pre_acl")        \
    PIPELINE_STAGE(SWITCH, OUT, PRE_STATEFUL, 2, "ls_out_pre_stateful")   \
    PIPELINE_STAGE(SWITCH, OUT, LB,           3, "ls_out_lb")             \
    PIPELINE_STAGE(SWITCH, OUT, ACL_HINT,     4, "ls_out_acl_hint")       \
    PIPELINE_STAGE(SWITCH, OUT, ACL,          5, "ls_out_acl")            \
    PIPELINE_STAGE(SWITCH, OUT, QOS_MARK,     6, "ls_out_qos_mark")       \
    PIPELINE_STAGE(SWITCH, OUT, QOS_METER,    7, "ls_out_qos_meter")      \
    PIPELINE_STAGE(SWITCH, OUT, STATEFUL,     8, "ls_out_stateful")       \
    PIPELINE_STAGE(SWITCH, OUT, PORT_SEC_IP,  9, "ls_out_port_sec_ip")    \
    PIPELINE_STAGE(SWITCH, OUT, PORT_SEC_L2, 10, "ls_out_port_sec_l2")    \
                                                                      \
    /* Logical router ingress stages. */                              \
    PIPELINE_STAGE(ROUTER, IN,  ADMISSION,       0, "lr_in_admission")    \
    PIPELINE_STAGE(ROUTER, IN,  LOOKUP_NEIGHBOR, 1, "lr_in_lookup_neighbor") \
    PIPELINE_STAGE(ROUTER, IN,  LEARN_NEIGHBOR,  2, "lr_in_learn_neighbor") \
    PIPELINE_STAGE(ROUTER, IN,  IP_INPUT,        3, "lr_in_ip_input")     \
    PIPELINE_STAGE(ROUTER, IN,  DEFRAG,          4, "lr_in_defrag")       \
    PIPELINE_STAGE(ROUTER, IN,  UNSNAT,          5, "lr_in_unsnat")       \
    PIPELINE_STAGE(ROUTER, IN,  DNAT,            6, "lr_in_dnat")         \
    PIPELINE_STAGE(ROUTER, IN,  ECMP_STATEFUL,   7, "lr_in_ecmp_stateful") \
    PIPELINE_STAGE(ROUTER, IN,  ND_RA_OPTIONS,   8, "lr_in_nd_ra_options") \
    PIPELINE_STAGE(ROUTER, IN,  ND_RA_RESPONSE,  9, "lr_in_nd_ra_response") \
    PIPELINE_STAGE(ROUTER, IN,  IP_ROUTING,      10, "lr_in_ip_routing")   \
    PIPELINE_STAGE(ROUTER, IN,  IP_ROUTING_ECMP, 11, "lr_in_ip_routing_ecmp") \
    PIPELINE_STAGE(ROUTER, IN,  POLICY,          12, "lr_in_policy")       \
    PIPELINE_STAGE(ROUTER, IN,  POLICY_ECMP,     13, "lr_in_policy_ecmp")  \
    PIPELINE_STAGE(ROUTER, IN,  ARP_RESOLVE,     14, "lr_in_arp_resolve")  \
    PIPELINE_STAGE(ROUTER, IN,  CHK_PKT_LEN   ,  15, "lr_in_chk_pkt_len")  \
    PIPELINE_STAGE(ROUTER, IN,  LARGER_PKTS,     16, "lr_in_larger_pkts")  \
    PIPELINE_STAGE(ROUTER, IN,  GW_REDIRECT,     17, "lr_in_gw_redirect")  \
    PIPELINE_STAGE(ROUTER, IN,  ARP_REQUEST,     18, "lr_in_arp_request")  \
                                                                      \
    /* Logical router egress stages. */                               \
    PIPELINE_STAGE(ROUTER, OUT, UNDNAT,    0, "lr_out_undnat")        \
    PIPELINE_STAGE(ROUTER, OUT, SNAT,      1, "lr_out_snat")          \
    PIPELINE_STAGE(ROUTER, OUT, EGR_LOOP,  2, "lr_out_egr_loop")      \
    PIPELINE_STAGE(ROUTER, OUT, DELIVERY,  3, "lr_out_delivery")

#define PIPELINE_STAGE(DP_TYPE, PIPELINE, STAGE, TABLE, NAME)   \
    S_##DP_TYPE##_##PIPELINE##_##STAGE                          \
        = OVN_STAGE_BUILD(DP_##DP_TYPE, P_##PIPELINE, TABLE),
    PIPELINE_STAGES
#undef PIPELINE_STAGE
};

//前8位表示table id,第8位表示 pipeline,第9位表示 dp 类型
/* Returns an "enum ovn_stage" built from the arguments.
 *
 * (It's better to use ovn_stage_build() for type-safety reasons, but inline
 * functions can't be used in enums or switch cases.) */
#define OVN_STAGE_BUILD(DP_TYPE, PIPELINE, TABLE) \
    (((DP_TYPE) << 9) | ((PIPELINE) << 8) | (TABLE))

将上面的宏展开后,ovn_stage 定义如下

enum ovn_stage {
    S_SWITCH_IN_PORT_SEC_L2 = OVN_STAGE_BUILD(DP_SWITCH, P_IN, 0) = 0 0 00000000
    S_SWITCH_IN_PORT_SEC_IP = 
    S_SWITCH_IN_PORT_SEC_ND = 
    S_SWITCH_IN_LOOKUP_FDB  =
    ....
}

由上可知ovn_stage包含了三个信息,所以提供了如下的函数可以根据ovn_stage获取这些信息

/* Returns the pipeline to which 'stage' belongs. */
static enum ovn_pipeline
ovn_stage_get_pipeline(enum ovn_stage stage)
{
    return (stage >> 8) & 1;
}

/* Returns the table to which 'stage' belongs. */
static uint8_t
ovn_stage_get_table(enum ovn_stage stage)
{
    return stage & 0xff;
}

build_lflows此函数中就是根据datapaths,ports等信息构造流表struct ovn_lflow,并将其插入sbdb的logical_switch表中

build_lflows(ctx, datapaths, ports, &port_groups, &mcast_groups,
             &igmp_groups, &meter_groups, &lbs, &bfd_connections);
    build_lswitch_and_lrouter_flows(datapaths, ports,
                                    port_groups, &lflows, mcgroups,
                                    igmp_groups, meter_groups, lbs,
                                    bfd_connections);
        char *svc_check_match = xasprintf("eth.dst == %s", svc_monitor_mac);
        struct lswitch_flow_build_info lsi = {
            .datapaths = datapaths,
            .ports = ports,
            .port_groups = port_groups,
            .lflows = lflows,
            .mcgroups = mcgroups,
            .igmp_groups = igmp_groups,
            .meter_groups = meter_groups,
            .lbs = lbs,
            .bfd_connections = bfd_connections,
            .svc_check_match = svc_check_match,
            .match = DS_EMPTY_INITIALIZER,
            .actions = DS_EMPTY_INITIALIZER,
        };
        /* Combined build - all lflow generation from lswitch and lrouter
         * will move here and will be reogranized by iterator type.
         */
        HMAP_FOR_EACH (od, key_node, datapaths) {
            build_lswitch_and_lrouter_iterate_by_od(od, &lsi);
                /* Build Logical Switch Flows. */
                build_lswitch_lflows_pre_acl_and_acl(od, lsi->port_groups, lsi->lflows,
                                                     lsi->meter_groups, lsi->lbs);

                build_fwd_group_lflows(od, lsi->lflows);
                build_lswitch_lflows_admission_control(od, lsi->lflows);
                    /* Logical VLANs not supported. */
                    if (!is_vlan_transparent(od)) {
                        /* Block logical VLANs. */
                        ovn_lflow_add(lflows, od, S_SWITCH_IN_PORT_SEC_L2, 100,
                                      "vlan.present", "drop;");
                    }

                    /* Broadcast/multicast source address is invalid. */
                    ovn_lflow_add(lflows, od, S_SWITCH_IN_PORT_SEC_L2, 100, "eth.src[40]", "drop;");
                build_lswitch_input_port_sec_od(od, lsi->lflows);
                build_lswitch_learn_fdb_od(od, lsi->lflows);
                build_lswitch_arp_nd_responder_default(od, lsi->lflows);
                build_lswitch_dns_lookup_and_response(od, lsi->lflows);
                build_lswitch_dhcp_and_dns_defaults(od, lsi->lflows);
                build_lswitch_destination_lookup_bmcast(od, lsi->lflows, &lsi->actions);
                build_lswitch_output_port_sec_od(od, lsi->lflows);

                /* Build Logical Router Flows. */
                build_adm_ctrl_flows_for_lrouter(od, lsi->lflows);
                build_neigh_learning_flows_for_lrouter(od, lsi->lflows, &lsi->match,
                                                       &lsi->actions);
                build_ND_RA_flows_for_lrouter(od, lsi->lflows);
                build_static_route_flows_for_lrouter(od, lsi->lflows, lsi->ports,
                                                     lsi->bfd_connections);
                build_mcast_lookup_flows_for_lrouter(od, lsi->lflows, &lsi->match,
                                                     &lsi->actions);
                build_ingress_policy_flows_for_lrouter(od, lsi->lflows, lsi->ports);
                build_arp_resolve_flows_for_lrouter(od, lsi->lflows);
                build_check_pkt_len_flows_for_lrouter(od, lsi->lflows, lsi->ports,
                                                      &lsi->match, &lsi->actions);
                build_gateway_redirect_flows_for_lrouter(od, lsi->lflows, &lsi->match,
                                                         &lsi->actions);
                build_arp_request_flows_for_lrouter(od, lsi->lflows, &lsi->match,
                                                    &lsi->actions);
                build_misc_local_traffic_drop_flows_for_lrouter(od, lsi->lflows);
                build_lrouter_arp_nd_for_datapath(od, lsi->lflows);
                build_lrouter_nat_defrag_and_lb(od, lsi->lflows, lsi->meter_groups,
                                                lsi->lbs, &lsi->match, &lsi->actions);
        }
        HMAP_FOR_EACH (op, key_node, ports) {
            build_lswitch_and_lrouter_iterate_by_op(op, &lsi);
        }
        HMAP_FOR_EACH (lb, hmap_node, lbs) {
            build_lswitch_arp_nd_service_monitor(lb, lsi.lflows,
                                                 &lsi.actions,
                                                 &lsi.match);
        }
        build_lswitch_flows(datapaths, lflows);

    //遍历lflows,将lfow信息插入sbdb的logical_flow表中
    struct ovn_lflow *next_lflow;
    HMAP_FOR_EACH_SAFE (lflow, next_lflow, hmap_node, &lflows) {
        //从lflow->stage中获取 pipeline,ingress or egress
        const char *pipeline = ovn_stage_get_pipeline_name(lflow->stage);
        //从lflow->stage中获取table id
        uint8_t table = ovn_stage_get_table(lflow->stage);
        
        //在sbdb中插入一行
        sbflow = sbrec_logical_flow_insert(ctx->ovnsb_txn);
        if (lflow->od) {
            //设置 logical_datapath 字段为 datapath_binding lflow->od->sb
            sbrec_logical_flow_set_logical_datapath(sbflow, lflow->od->sb);
        }
        //设置 logical_dp_group 字段为 lflow->od_group
        ovn_sb_set_lflow_logical_dp_group(ctx, &dp_groups, sbflow, &lflow->od_group);
        //设置 pipeline 字段
        sbrec_logical_flow_set_pipeline(sbflow, pipeline);
        //设置 table id
        sbrec_logical_flow_set_table_id(sbflow, table);
        //设置优先级
        sbrec_logical_flow_set_priority(sbflow, lflow->priority);
        //设置 match 匹配域
        sbrec_logical_flow_set_match(sbflow, lflow->match);
        //设置 action 动作
        sbrec_logical_flow_set_actions(sbflow, lflow->actions);

        /* Trim the source locator lflow->where, which looks something like
         * "ovn/northd/ovn-northd.c:1234", down to just the part following the
         * last slash, e.g. "ovn-northd.c:1234". */
        const char *slash = strrchr(lflow->where, '/');
        const char *where = slash ? slash + 1 : lflow->where;

        struct smap ids = SMAP_INITIALIZER(&ids);
        smap_add(&ids, "stage-name", ovn_stage_to_str(lflow->stage));
        smap_add(&ids, "source", where);
        if (lflow->stage_hint) {
            smap_add(&ids, "stage-hint", lflow->stage_hint);
        }
        //将其他信息设置到 external_ids
        sbrec_logical_flow_set_external_ids(sbflow, &ids);
        smap_destroy(&ids);

        ovn_lflow_destroy(&lflows, lflow);
    }
    hmap_destroy(&lflows);

比如通过ovn_lflow_add添加流表用来丢弃vlan报文,其对应的logical flow信息

ovn_lflow_add(lflows, od, S_SWITCH_IN_PORT_SEC_L2, 100, "vlan.present", "drop;");
//logical flow
table=0 (ls_in_port_sec_l2  ), priority=100  , match=(vlan.present), action=(drop;)
  1. ovnsb_db_run
    检测sbdb中Port_Binding表的chassis列,如果不为空,说明此逻辑端口已经有对应的物理端口,就需要设置nbdb中相应的逻辑端口为up
ovnsb_db_run(ctx, ovnsb_idl_loop, &ports, start_time);
    /* Handle changes to the 'chassis' column of the 'Port_Binding' table.  When
     * this column is not empty, it means we need to set the corresponding logical
     * port as 'up' in the northbound DB. */
    handle_port_binding_changes(ctx, ports, &ha_ref_chassis_map);
        const struct sbrec_port_binding *sb;
        SBREC_PORT_BINDING_FOR_EACH(sb, ctx->ovnsb_idl) {
            struct ovn_port *op = ovn_port_find(ports, sb->logical_port);

            //跳过非逻辑交换机端口
            if (!op || !op->nbsp) {
                /* The logical port doesn't exist for this port binding.  This can
                 * happen under normal circumstances when ovn-northd hasn't gotten
                 * around to pruning the Port_Binding yet. */
                continue;
            }

            //默认值false
            bool up = false;

            //router 类型的端口对端连接的是逻辑路由器端口,永远是up
            if (lsp_is_router(op->nbsp)) {
                up = true;
            //如果port_binding表的chassis列不为空,则设置up为true
            } else if (sb->chassis) {
                up = smap_get_bool(&sb->chassis->other_config,
                                   OVN_FEATURE_PORT_UP_NOTIF, false)
                     ? sb->n_up && sb->up[0]
                     : true;
            }

            //设置nbdb中逻辑交换机端口的up字段
            if (!op->nbsp->up || *op->nbsp->up != up) {
                nbrec_logical_switch_port_set_up(op->nbsp, &up, 1);
            }

            if (build_ha_chassis_ref && ctx->ovnsb_txn && sb->chassis) {
                /* Check and add the chassis which has claimed this 'sb'
                 * to the ha chassis group's ref_chassis if required. */
                build_ha_chassis_group_ref_chassis(ctx, sb, op,
                                                   ha_ref_chassis_map);
            }
        }

下面贴一下添加ls1上的logical flow信息

root@master:~# ovn-sbctl lflow-list
Datapath: "ls1" (845314a0-ad79-4ac8-ac44-9fe2421478c2)  Pipeline: ingress
  //eth.src[40] 表示源mac为组播/广播地址
  table=0 (ls_in_port_sec_l2  ), priority=100  , match=(eth.src[40]), action=(drop;)
  table=0 (ls_in_port_sec_l2  ), priority=100  , match=(vlan.present), action=(drop;)
  table=0 (ls_in_port_sec_l2  ), priority=50   , match=(inport == "ls1-vm1" && eth.src == {00:00:00:00:00:01}), action=(next;)
  table=0 (ls_in_port_sec_l2  ), priority=50   , match=(inport == "ls1-vm2" && eth.src == {00:00:00:00:00:02}), action=(next;)
  table=0 (ls_in_port_sec_l2  ), priority=50   , match=(inport == "ls1-vm3" && eth.src == {00:00:00:00:00:03}), action=(next;)
  //match 1 表示匹配所有报文
  table=1 (ls_in_port_sec_ip  ), priority=0    , match=(1), action=(next;)
  table=2 (ls_in_port_sec_nd  ), priority=90   , match=(inport == "ls1-vm1" && eth.src == 00:00:00:00:00:01 && arp.sha == 00:00:00:00:00:01), action=(next;)
  table=2 (ls_in_port_sec_nd  ), priority=90   , match=(inport == "ls1-vm1" && eth.src == 00:00:00:00:00:01 && ip6 && nd && ((nd.sll == 00:00:00:00:00:00 || nd.sll == 00:00:00:00:00:01) || ((nd.tll == 00:00:00:00:00:00 || nd.tll == 00:00:00:00:00:01)))), action=(next;)
  table=2 (ls_in_port_sec_nd  ), priority=90   , match=(inport == "ls1-vm2" && eth.src == 00:00:00:00:00:02 && arp.sha == 00:00:00:00:00:02), action=(next;)
  table=2 (ls_in_port_sec_nd  ), priority=90   , match=(inport == "ls1-vm2" && eth.src == 00:00:00:00:00:02 && ip6 && nd && ((nd.sll == 00:00:00:00:00:00 || nd.sll == 00:00:00:00:00:02) || ((nd.tll == 00:00:00:00:00:00 || nd.tll == 00:00:00:00:00:02)))), action=(next;)
  table=2 (ls_in_port_sec_nd  ), priority=90   , match=(inport == "ls1-vm3" && eth.src == 00:00:00:00:00:03 && arp.sha == 00:00:00:00:00:03), action=(next;)
  table=2 (ls_in_port_sec_nd  ), priority=90   , match=(inport == "ls1-vm3" && eth.src == 00:00:00:00:00:03 && ip6 && nd && ((nd.sll == 00:00:00:00:00:00 || nd.sll == 00:00:00:00:00:03) || ((nd.tll == 00:00:00:00:00:00 || nd.tll == 00:00:00:00:00:03)))), action=(next;)
  table=2 (ls_in_port_sec_nd  ), priority=80   , match=(inport == "ls1-vm1" && (arp || nd)), action=(drop;)
  table=2 (ls_in_port_sec_nd  ), priority=80   , match=(inport == "ls1-vm2" && (arp || nd)), action=(drop;)
  table=2 (ls_in_port_sec_nd  ), priority=80   , match=(inport == "ls1-vm3" && (arp || nd)), action=(drop;)
  table=2 (ls_in_port_sec_nd  ), priority=0    , match=(1), action=(next;)
  table=3 (ls_in_lookup_fdb   ), priority=0    , match=(1), action=(next;)
  table=4 (ls_in_put_fdb      ), priority=0    , match=(1), action=(next;)
  table=5 (ls_in_pre_acl      ), priority=110  , match=(eth.dst == $svc_monitor_mac), action=(next;)
  table=5 (ls_in_pre_acl      ), priority=0    , match=(1), action=(next;)
  table=6 (ls_in_pre_lb       ), priority=110  , match=(eth.dst == $svc_monitor_mac), action=(next;)
  table=6 (ls_in_pre_lb       ), priority=110  , match=(nd || nd_rs || nd_ra || mldv1 || mldv2), action=(next;)
  table=6 (ls_in_pre_lb       ), priority=0    , match=(1), action=(next;)
  table=7 (ls_in_pre_stateful ), priority=100  , match=(reg0[0] == 1), action=(ct_next;)
  table=7 (ls_in_pre_stateful ), priority=0    , match=(1), action=(next;)
  table=8 (ls_in_acl_hint     ), priority=0    , match=(1), action=(next;)
  table=9 (ls_in_acl          ), priority=34000, match=(eth.dst == $svc_monitor_mac), action=(next;)
  table=9 (ls_in_acl          ), priority=0    , match=(1), action=(next;)
  table=10(ls_in_qos_mark     ), priority=0    , match=(1), action=(next;)
  table=11(ls_in_qos_meter    ), priority=0    , match=(1), action=(next;)
  table=12(ls_in_lb           ), priority=0    , match=(1), action=(next;)
  table=13(ls_in_stateful     ), priority=100  , match=(reg0[1] == 1), action=(ct_commit { ct_label.blocked = 0; }; next;)
  table=13(ls_in_stateful     ), priority=100  , match=(reg0[2] == 1 && ip4 && sctp), action=(reg1 = ip4.dst; reg2[0..15] = sctp.dst; ct_lb;)
  table=13(ls_in_stateful     ), priority=100  , match=(reg0[2] == 1 && ip4 && tcp), action=(reg1 = ip4.dst; reg2[0..15] = tcp.dst; ct_lb;)
  table=13(ls_in_stateful     ), priority=100  , match=(reg0[2] == 1 && ip4 && udp), action=(reg1 = ip4.dst; reg2[0..15] = udp.dst; ct_lb;)
  table=13(ls_in_stateful     ), priority=100  , match=(reg0[2] == 1 && ip6 && sctp), action=(xxreg1 = ip6.dst; reg2[0..15] = sctp.dst; ct_lb;)
  table=13(ls_in_stateful     ), priority=100  , match=(reg0[2] == 1 && ip6 && tcp), action=(xxreg1 = ip6.dst; reg2[0..15] = tcp.dst; ct_lb;)
  table=13(ls_in_stateful     ), priority=100  , match=(reg0[2] == 1 && ip6 && udp), action=(xxreg1 = ip6.dst; reg2[0..15] = udp.dst; ct_lb;)
  table=13(ls_in_stateful     ), priority=0    , match=(1), action=(next;)
  table=14(ls_in_pre_hairpin  ), priority=0    , match=(1), action=(next;)
  table=15(ls_in_nat_hairpin  ), priority=0    , match=(1), action=(next;)
  table=16(ls_in_hairpin      ), priority=0    , match=(1), action=(next;)
  table=17(ls_in_arp_rsp      ), priority=0    , match=(1), action=(next;)
  table=18(ls_in_dhcp_options ), priority=0    , match=(1), action=(next;)
  table=19(ls_in_dhcp_response), priority=0    , match=(1), action=(next;)
  table=20(ls_in_dns_lookup   ), priority=0    , match=(1), action=(next;)
  table=21(ls_in_dns_response ), priority=0    , match=(1), action=(next;)
  table=22(ls_in_external_port), priority=0    , match=(1), action=(next;)
  table=23(ls_in_l2_lkup      ), priority=110  , match=(eth.dst == $svc_monitor_mac), action=(handle_svc_check(inport);)
  table=23(ls_in_l2_lkup      ), priority=70   , match=(eth.mcast), action=(outport = "_MC_flood"; output;)
  table=23(ls_in_l2_lkup      ), priority=50   , match=(eth.dst == 00:00:00:00:00:01), action=(outport = "ls1-vm1"; output;)
  table=23(ls_in_l2_lkup      ), priority=50   , match=(eth.dst == 00:00:00:00:00:02), action=(outport = "ls1-vm2"; output;)
  table=23(ls_in_l2_lkup      ), priority=50   , match=(eth.dst == 00:00:00:00:00:03), action=(outport = "ls1-vm3"; output;)
  table=23(ls_in_l2_lkup      ), priority=0    , match=(1), action=(outport = get_fdb(eth.dst); next;)
  table=24(ls_in_l2_unknown   ), priority=50   , match=(outport == "none"), action=(drop;)
  table=24(ls_in_l2_unknown   ), priority=0    , match=(1), action=(output;)
  
Datapath: "ls1" (845314a0-ad79-4ac8-ac44-9fe2421478c2)  Pipeline: egress
  table=0 (ls_out_pre_lb      ), priority=110  , match=(eth.src == $svc_monitor_mac), action=(next;)
  table=0 (ls_out_pre_lb      ), priority=110  , match=(nd || nd_rs || nd_ra || mldv1 || mldv2), action=(next;)
  table=0 (ls_out_pre_lb      ), priority=0    , match=(1), action=(next;)
  table=1 (ls_out_pre_acl     ), priority=110  , match=(eth.src == $svc_monitor_mac), action=(next;)
  table=1 (ls_out_pre_acl     ), priority=0    , match=(1), action=(next;)
  table=2 (ls_out_pre_stateful), priority=100  , match=(reg0[0] == 1), action=(ct_next;)
  table=2 (ls_out_pre_stateful), priority=0    , match=(1), action=(next;)
  table=3 (ls_out_lb          ), priority=0    , match=(1), action=(next;)
  table=4 (ls_out_acl_hint    ), priority=0    , match=(1), action=(next;)
  table=5 (ls_out_acl         ), priority=34000, match=(eth.src == $svc_monitor_mac), action=(next;)
  table=5 (ls_out_acl         ), priority=0    , match=(1), action=(next;)
  table=6 (ls_out_qos_mark    ), priority=0    , match=(1), action=(next;)
  table=7 (ls_out_qos_meter   ), priority=0    , match=(1), action=(next;)
  table=8 (ls_out_stateful    ), priority=100  , match=(reg0[1] == 1), action=(ct_commit { ct_label.blocked = 0; }; next;)
  table=8 (ls_out_stateful    ), priority=100  , match=(reg0[2] == 1), action=(ct_lb;)
  table=8 (ls_out_stateful    ), priority=0    , match=(1), action=(next;)
  table=9 (ls_out_port_sec_ip ), priority=0    , match=(1), action=(next;)
  table=10(ls_out_port_sec_l2 ), priority=100  , match=(eth.mcast), action=(output;)
  table=10(ls_out_port_sec_l2 ), priority=50   , match=(outport == "ls1-vm1" && eth.dst == {00:00:00:00:00:01}), action=(output;)
  table=10(ls_out_port_sec_l2 ), priority=50   , match=(outport == "ls1-vm2" && eth.dst == {00:00:00:00:00:02}), action=(output;)
  table=10(ls_out_port_sec_l2 ), priority=50   , match=(outport == "ls1-vm3" && eth.dst == {00:00:00:00:00:03}), action=(output;)

也可参考:ovn-northd 源码分析 - 简书

本文内容由网友自发贡献,版权归原作者所有,本站不承担相应法律责任。如您发现有涉嫌抄袭侵权的内容,请联系:hwhale#tublm.com(使用前将#替换为@)

ovn-northd 源码分析 的相关文章

  • PX4源码分析12:怎样得到某个版本的PX4源码?(本文以v1.10.0为例)

    Get a Specific Release To get the source code for a specific older release 参考链接 xff1a https blog csdn net weixin 4186976
  • 【5G核心网】free5GC AMF源码分析

    free5gc AMF 源码分析 结构体 Sbi type Sbi struct Scheme string 96 yaml 34 scheme 34 96 RegisterIPv4 string 96 yaml 34 registerIP
  • OVS Bond lacp源码分析

    近期项目中要使用Ovs bond接口 xff0c Ovs Bond 只有三种模式 xff1a balance tcp xff0c balance slb xff0c active backup 这三种模式的工作方式如下 xff1a 1 ac
  • 微信 Mars Android Sample 源码分析

    注 xff1a 原文首发地址 零 前言 Mars 是微信官方开源的跨平台跨业务的终端基础组件 xff0c 具有高质量网络连接模块 长短连接 智能心跳机制 高性能日志模块和网络监测组件等 而整个 Android Sample 是基于 Mars
  • lemon源码分析

    基本概念见上篇 lemon源码基本概念整理 1 follow集 对于如下4条产生式 program 61 expr TK SEM expr 61 expr TK IMPL expr expr 61 TK LPAREN expr TK RPA
  • 【Linux】mjpg-streamer 源码分析

    文章目录 1 总体流程2 主进程的源码分析2 1 参数接收与解析2 2 获取参数2 3 调用输入函数2 3 1 程序手动中断信号2 3 2 strchr 函数2 3 3 strndup 函数2 3 4 分离参数 3 输入通道源码分析3 1
  • telegram android 源码分析 (一)自动设置代理

    比如自动设置mtproxy代理 xff0c 冗长的代码我们怎么去找 xff1f 1 xff09 首先我们发现点代理链接能弹对话框 xff0c 们可以在strings xml中搜索得到 xff1a lt string name 61 34 U
  • MAVROS 源码分析

    一 安装 MAVROS 利用机载计算机对 PX4 飞控进行 OFFBOARD 控制 xff0c 需要在机载计算机上安装 ROS 的 MAVROS 包 安装方式可以参考 PX4 开发者网站 xff0c 有二进制文件安装和源码安装两种方式 xf
  • PX4源码分析6_uorb通信机制

    一 创建流程 xff1a 在Firmware msg下创建msg文件 xff0c eg xff1a xxx msg xff0c 内容格式仿照原有msg文件在Firmware msg CMakeLists txt中将对应的msg文件添加 xf
  • PX4源码分析7_添加mavlink自定义消息

    一 自定义mavlink消息 xff1a 根据uorb消息 xff08 msg xff09 自定义mavlink消息 方法为利用mavlink generator工具在xml文件生成mavlink所需相应的头文件 二 发送自定义mavlin
  • Android 11 PackageManagerService源码分析(二):Packages.xml详解

    1 开篇 在上一篇文章中提到Settings类会在PackageManagerService启动过程中对packages xml等一些列xml文件进行解析 那么有以下问题 xff1a 这些文件记录了什么内容 xff1f 为什么需要这些文件
  • 反序列化漏洞攻击原理(Dubbo反序列化漏洞剖析)

    关联文章 给服务端发送自定义类实例序列化数据实现反序列化攻击 一 前言 最近大家都在讨论Dubbo反序列化漏洞问题 想必各个大V也都推送了相关文章 看了下各大文章差不多都是一个套路 两个步骤 第一步开始描述下Dubbo的反序列化漏洞 几乎都
  • 如何动态调试Python的第三方库

    注意 本文方法仅限于调试安装时附带py源码的库 如sklearn 引入 用sklearn中的sklearn feature extraction text TfidfTransformer来获取TF特征 但发现sklearn的计算结果与我手
  • HashMap底层源码分析

    HashMap HashMap 是一个散列表 它存储的内容是键值对 key value 映射 HashMap是非线程安全的 实现了 Map 接口 根据键的 HashCode 值存储数据 具有很快的访问速度 最多允许一条记录的键为 null
  • Faster RCNN 源码解读(1) -- 文件结构分析

    基本介绍 Faster RCNN 在目标检测及其相关领域得到了广泛的运用 其原型RCNN是将CNN引入到目标检测的开山之作 此外基于region proposal是一种十分值得学习的思想 因此将在后续的系列章节中 基于源码 对Faster
  • 深度详解 View.post() 为何能够获取到 View 的宽高值?

    文章目录 1 简介 1 1 问题描述 1 2 结果展示 2 源码分析 2 1 View post 方法添加任务 2 2 HandlerActionQueue post 方法添加任务 2 3 探究 AttachInfo 的由来 2 3 1 A
  • 汇编 - ORG指令详解

    ORG指令 ORG是Origin的缩写 起始地址 源 在汇编语言源程序的开始通常都用一条ORG伪指令来实现规定程序的起始地址 如果不用ORG规定则汇编得到的目标程序将从0000H开始 例如 ORG 2000H START MOV AX 00
  • android ARouter源码分析

    背景 随着项目越做越大 代码量越来越多 项目也随之改造成组件化的开发模式 组件化开发非常适合庞大的项目 将每个业务模块 功能模块解耦 抽离成组件的形式 各个组件遵循严格的依赖关系 因为这层严格的依赖关系 使得组件化比模块化结构更加简洁和清晰
  • IDEA国际化资源Key无法全局重命名的解决方案

    一 前言 最近在开发中使用到了HibernateValidator进行入参校验以及错误消息的国际化支持 大家应该都知道在使用HibernateValidator进行校验的时候 我们只需在需要在校验的变量上添加相应的注解 同时在message
  • GTest源码剖析(四)——TEST_P宏

    GTest源码剖析 TEST P宏 GTest源码剖析TEST P宏 TEST P宏用法 TestWithParam 类 1 TestWithParam 类定义 2 WithParamInterface 模版类定义 INSTANTIATE

随机推荐

  • OpenCV图像处理学习十九,像素重映射cv::remap

    一 像素重映射概念 重映射就是把输入图像中各个像素按照制定的规则顺序映射到另外一张图像的对应位置上去 xff0c 形成一张新的图像 二 像素映射API函数接口 cv remap xff08 InputArray src 输入图像 Outpu
  • OpenCV图像处理学习二十一,直方图比较方法

    一 直方图比较 直方图比较是对输入的两张图像进行计算得到直方图H1与H2 xff0c 归一化到相同的尺度空间 xff0c 然后可以通过计算H1与H2的之间的距离得到两个直方图的相似程度 xff08 每张图像都有唯一的直方图与之对应 xff0
  • 嵌入式FreeRTOS学习八,xTaskCreate创建任务的细节以及恢复中断任务实现

    一 创建任务函数xTaskCreate 任务也不是很复杂的东西 xff0c 任务也就是一个函数xTaskCreate 简单得说 xff0c 创建一个任务 xff0c 你得提供它的执行函数 xff0c 你得提供它的栈的大小 xff0c 函数的
  • 嵌入式FreeRTOS学习一,FreeRTOS概述和代码结构

    一 FreeRTOS是什么 FreeRTOS 是由 Real Time Engineers Ltd 出品 xff0c 是一款市场领先的 RTOS 现在已经支持几十种 处理器架构 Free即免费的 xff0c RTOS全称是 Real Tim
  • 嵌入式FreeRTOS学习二,FreeRTOS任务的创建和删除

    一 任务的创建和删除 1 1 函数xTaskCreate 此函数用来创建一个任务 xff0c 任务需要RAM来保存与任务有关的状态信息 任务控制块 xff0c 任务也需要一定的RAM 来作为任务堆栈 如果使用函数xTaskCreate 来创
  • 嵌入式FreeRTOS学习五,FreeRTOS任务调度器

    四 调度器 FreeRTOS 操作系统支持三种调度方式 xff1a 抢占式调度 xff0c 时间片调度和合作式调度 实际应用主要是抢占式调度和时间片调度 xff0c 合作式调度用到的很少 启动调度器的API函数vTaskStartSched
  • 嵌入式FreeRTOS学习九,任务链表的构成,TICK时间中断和任务状态切换调度

    一 tskTaskControlBlock 函数结构体 在tskTaskControlBlock 任务控制块结构体中 xff0c 其中有任务状态链表和事件链表两个链表成员 xff0c 首先介绍任务状态链表这个结构 xff0c 这个链表通常用
  • 嵌入式FreeRTOS学习十,任务调度和任务就绪链表任务调度过程

    一 main函数里面的栈是哪里分配的 main函数里面用到的栈 xff0c 假设为msp 是汇编代码里面设定的 xff0c 对于STM32F103 在汇编代码里的向量表设置了一个栈 initial sp 那这个栈是给谁用的呢 xff1f 可
  • ovn 通过网关虚拟路由器连接外部网络

    本文实验如何通过ovn的网关逻辑路由器将ovn网络连接到外部网络 前面讲过ovn的逻辑路由器是分布式的 xff0c 这意味着它没有绑定到某个节点上 xff0c 而是存在于所有节点上的 xff0c 同时它是通过每个节点的openflow流表来
  • 嵌入式FreeRTOS学习十一,深入理解任务调度机制

    一 任务调度机制 可抢占 xff1a 高优先级的任务先运行时间片轮转 xff1a 同优先级的任务轮流执行空闲任务礼让 xff1a 如果有同是优先级为0的其他就绪任务 xff0c 空闲任务主动放弃一次运行机会函数调用vTaskDelay xD
  • SOAP传输协议

    一 HTTP传输协议 超文本传输协议 xff08 HyperText Transfer Protocol xff0c 缩写 xff1a HTTP xff09 xff0c 它是基于请求 响应的模式协议 xff0c 客户端发出请求 xff0c
  • ONVIF简介

    一 什么是ONVIF ONVIF规范描述了网络视频的模型 接口 数据类型以及数据交互的模式 并复用了一些现有的标准 xff0c 如WS系列标准等 ONVIF规范的目标是实现一个网络视频框架协议 xff0c 使不同厂商所生产的网络视频产品 x
  • gsoap工具生成onvif设备搜索(remotediscovery)代码框架

    什么是gsoap工具 xff1f gSOAP 提供了两个工具来方便开发人员使用 C C 43 43 语言快速开发Web 服务应用 xff0c 通过 gSOAP 提供的这两个工具 xff0c 开发人员可以快速生成服务端与客户端代码框架 xff
  • 001_初识_飞航科技光标飞控

    这两天老潘给我一块飞控 xff0c 让我练手 xff0c 为电赛做准备 xff0c 拿到控挺开心的 xff0c 毕竟省了一笔RMB 本来想着买块正点原子的飞控 资料 xff1a 说起资料简单看了一下发现还蛮全的 xff0c 但是这个资料我居
  • 写出C语言的第一个程序“Hello World”

    这里写自定义目录标题 写出C语言的第一个程序 Hello World 写出C语言的第一个程序 Hello World 下面展示一些 内联代码片 span class token comment A code block span span
  • Eigen库的安装攻略

    Eigen库的安装攻略 转载 xff1a Eigen库安装 xff08 两种方式 xff09 转载 xff1a Eigen库安装 xff08 两种方式 xff09 链接 link
  • 【ROS2基础学习】

    入门篇 前言一 创建一个功能包二 编译三 source总结 前言 提示 xff1a 这里是记录的大概内容 xff1a 随着机器人技术的不断发展 xff0c ROS也越来越重要 xff0c 很多人都开启了学习ROS xff0c 本文就介绍了R
  • Arduino 外部中断重置内部定时器

    Arduino 外部中断重置内部定时器 文章目录 Arduino 外部中断重置内部定时器前言一 准备工作二 代码三 实验效果1 设置1Hz的方波 xff08 外部中断触发 xff09 xff1a 2 观察示波器各个波形 xff1a 3 延迟
  • ALUBI LPMS-IG1 RS232 IMU ROS2驱动安装

    文章目录 前言一 下载官方系列文档二 windows上的上位机程序安装2 Ubuntu上的ROS2驱动安装Offset Mode 2 总结 前言 IMU在自动驾驶领域广泛应用 xff0c 本文主要记录了在ROS2中使用ALUBI LPMS
  • ovn-northd 源码分析

    ovn northd是ovn中的核心后台进程 xff0c 主要负责将ovn的高层配置转换成供ovn controller后台进程使用的逻辑配置 xff0c 更详细的说就是它将ovn northbound数据库中传统意义上的逻辑网络配置转换成