Multiple External Media (Servers) IP

Greetings,
I am trying to setup an Voice Infrastructure with 1 Main and 1 external Media Nodes, Located in different Data Centers to interact specific traffic.

Public IPs are replaced accordingly
172.121.123.35 Main
142.121.120.42 HK Node

Primary Server has Yeti-Admin, CDRs, Redis & SEMS (standalone)
Funtionality : Primary Signaling & Media, DB + Admin

PoP Nodes RPC Entry under system–>Components

   Main   5         5                   127.0.0.1:7080
   HK     9          9                  142.121.120.42:7080

sems.conf for Primary Server

general {
    stderr = no
    syslog_loglevel = 2
    syslog_facility = LOCAL0

    node_id = 5

    shutdown_mode {
        code = 508
        reason = "Yeti node in shutdown mode"
        allow_uac = true
    }

    media_processor_threads = 4
    rtp_receiver_threads = 4
    session_processor_threads = 20
    sip_udp_server_threads = 2
    sip_tcp_server_threads = 2

    dead_rtp_time = 30

    default_bl_ttl = 0

    symmetric_rtp_mode = packets
    symmetric_rtp_packets = 20
}

signaling-interfaces {
    interface primary {
        default-media-interface = primary
        ip4 {
            sip-udp {
                address = 172.121.123.35
                port = 5060
                use-raw-sockets = off
            }
            sip-tcp {
                address = 172.121.123.35
                port = 5060
                connect-timeout = 2000
                static-client-port = on
                idle-timeout=900000
                use-raw-sockets = off
            }
        }
    }
}

media-interfaces {
    interface primary {
        ip4 {
            rtp {
                address = 172.121.123.35
                low-port = 16384
                high-port = 32769
                dscp = 46
                use-raw-sockets = off
            }
        }
    }
}

modules {
    module "di_log" {}

    module "mp3" {}
    module "opus" {}
    module "wav" {}
    module "gsm" {}
    module "ilbc" {}
    module "adpcm" {}
    module "l16" {}
    module "g722" {}
    module "g729bcg" {}
    module "registrar_client" {}
    module "postgresql" {}

    module "session_timer" {}
    module "jsonrpc" {
        listen{
            address = 127.0.0.1
            port = 7080
        }
        server_threads = 1
    }

    module "http_client" {}

    module-global "uac_auth" {}
    module "options_prober" {}

    module "radius_client" {}

    module "redis" {
        max_batch_size = 10
        batch_timeout = 100
        max_queue_size = 1000
    }

    module "registrar" {
        redis {
            use_functions = no
            write {
                hosts = { 127.0.0.1:6379 }
            }
            read {
                hosts = { 127.0.0.1:6379 }
            }
       }
    }

    module "yeti" {
        pop_id = 5
        lega_cdr_headers {
            header(p-charge-info, string)
            header(diversion, array)
        }

        auth {
            realm = yeti-switch
        }

        msg_logger_dir = /var/spool/sems/dump
        audio_recorder_dir = /var/spool/sems/records
        audio_recorder_compress = false

        db_refresh_interval = 5
        ip_auth_reject_if_no_matched = true

        routing {
            schema = switch21
            function = route_release
            init = init
            pass_input_interface_name = true
            master_pool {
                host = 127.0.0.1
                port = 5432
                name = master
                user =  whlsl_su
                pass = xxxxxxxxxxxx
                size = 4
                check_interval = 10
                max_exceptions = 0
                statement_timeout = 3000
            }
            failover_to_slave = false
            slave_pool {
              host = 127.0.0.1
                port = 5432
                name = master
                user =  whlsl_su
                pass = xxxxxxxxxxxx
                size = 4
                check_interval = 10
                max_exceptions = 0
                statement_timeout = 3000
            }
        }
        cdr {
            dir = /var/spool/sems/cdrs
            completed_dir = /var/spool/sems/cdrs/completed
            pool_size = 2
            auth_pool_size = 2
            schema = switch
            function = writecdr
            master {
                host = 127.0.0.1
                port = 5432
                name = cdr
                user = whlsl_su
                pass = xxxxxxxxxx
            }
            failover_to_slave = false
            slave {
                host = 127.0.0.1
                port = 5432
                name =  cdr
                user =  whlsl_su
                pass = xxxxxxxxxx
            }
            failover_requeue = true
            failover_to_file = false
            serialize_dynamic_fields = true
            batch_size = 1
            batch_timeout = 5000
            auth_batch_size = 1
            auth_batch_timeout = 20000
        }
        resources {
            reject_on_error = false
            reduce_operations = true
            write {
                hosts = { 127.0.0.1:6379 }
                timeout = 500
            }
            read {
                hosts = { 127.0.0.1:6379 }
                timeout = 1000
            }
        }
        registrations {
            check_interval = 5000
        }
        rpc {
            calls_show_limit = 10000
        }
        core_options_handling = yes
    }
}

routing {
    application = yeti
}

Node B is Located in Hong Kong with (Pgqd, Redis, Sems)
Pgqd Is connected to Primary, DB successfully

● pgqd.service - PgQ maintenance daemon
     Loaded: loaded (/lib/systemd/system/pgqd.service; enabled; preset: enabled)
     Active: active (running) since Sun 2025-07-13 04:49:50 GMT; 3 days ago
   Main PID: 3283 (pgqd)
      Tasks: 1 (limit: 9468)
     Memory: 2.7M
        CPU: 2min 47.178s
     CGroup: /system.slice/pgqd.service
             └─3283 /usr/bin/pgqd /etc/pgqd.ini

sems.conf for Node B

general {
    stderr = no
    syslog_loglevel = 2
    syslog_facility = LOCAL0

    node_id = 9

    shutdown_mode {
        code = 508
        reason = "Yeti node in shutdown mode"
        allow_uac = true
    }

    media_processor_threads = 4
    rtp_receiver_threads = 4
    session_processor_threads = 20
    sip_udp_server_threads = 2
    sip_tcp_server_threads = 2

    dead_rtp_time = 30

    default_bl_ttl = 0

    symmetric_rtp_mode = packets
    symmetric_rtp_packets = 20
}

signaling-interfaces {
    interface primary {
        default-media-interface = primary
        ip4 {
            sip-udp {
                address = 142.121.120.42                        #Note: Changing this to 172.121.123.35 throws error :failed to find interface with address: 172.121.123.35
                port = 5060
                use-raw-sockets = off
            }
            sip-tcp {
                address = 142.121.120.42                       #Note: Changing this to 172.121.123.35 throws error :failed to find interface with address: 172.121.123.35
                port = 5060
                connect-timeout = 2000
                static-client-port = on
                idle-timeout=900000
                use-raw-sockets = off
            }
        }
    }
       default-media-interface = main
        ip4 {
            sip-udp {
                address = 142.121.120.42                        #Note: Changing this to 172.121.123.35 throws error :failed to find interface with address: 172.121.123.35
                port = 5060
                use-raw-sockets = off
            }
            sip-tcp {
                address = 142.121.120.42                       #Note: Changing this to 172.121.123.35 throws error :failed to find interface with address: 172.121.123.35
                port = 5060
                connect-timeout = 2000
                static-client-port = on
                idle-timeout=900000
                use-raw-sockets = off
            }
        }
    }


}

media-interfaces {
    interface primary {
        ip4 {
            rtp {
                address = 142.121.120.42
                low-port = 16384
                high-port = 32769
                dscp = 46
                use-raw-sockets = off
            }
        }
    }
}

modules {
    module "di_log" {}

    module "mp3" {}
    module "opus" {}
    module "wav" {}
    module "gsm" {}
    module "ilbc" {}
    module "adpcm" {}
    module "l16" {}
    module "g722" {}
    module "g729bcg" {}
    module "registrar_client" {}
    module "postgresql" {}

    module "session_timer" {}
    module "jsonrpc" {
        listen{
            address = 142.121.120.42
            port = 7080
        }
        server_threads = 1
    }

    module "http_client" {}

    module-global "uac_auth" {}
    module "options_prober" {}

    module "radius_client" {}

    module "redis" {
        max_batch_size = 10
        batch_timeout = 100
        max_queue_size = 1000
    }

    module "registrar" {
        redis {
            use_functions = no
            write {
                hosts = { 127.0.0.1:6379 }
            }
            read {
                hosts = { 127.0.0.1:6379 }
            }
       }
    }

    module "yeti" {
        pop_id = 9
        lega_cdr_headers {
            header(p-charge-info, string)
            header(diversion, array)
        }

        auth {
            realm = yeti-switch
        }

        msg_logger_dir = /var/spool/sems/dump
        audio_recorder_dir = /var/spool/sems/records
        audio_recorder_compress = false

        db_refresh_interval = 5
        ip_auth_reject_if_no_matched = true

        routing {
            schema = switch21
            function = route_release
            init = init
            pass_input_interface_name = true
                master_pool {
                host = 172.121.123.35
                port = 5432
                name =master
                user =whlsl_su
                pass = xxxxxxxxxxxxx
                size = 4
                check_interval = 10
                max_exceptions = 0
                statement_timeout = 3000
            }
            failover_to_slave = false
            slave_pool {
                host = 172.121.123.35
                port = 5432
                name = master
                user = whlsl_su
                pass = xxxxxxxxxxxxx
                size = 4
                check_interval = 10
                max_exceptions = 0
                statement_timeout = 3000
            }
        }
        cdr {
            dir = /var/spool/sems/cdrs
            completed_dir = /var/spool/sems/cdrs/completed
            pool_size = 2
            auth_pool_size = 2
            schema = switch
            function = writecdr
        master {
               host = 172.121.123.35
                port = 5432
                name = cdr
                user = whlsl_su
                pass = xxxxxxxxxxxxx
            }
            failover_to_slave = false
            slave {
                 host = 172.121.123.35
                port = 5432
                name = cdr
                user = whlsl_su
                pass = xxxxxxxxxxxxx
            }
            failover_requeue = true
            failover_to_file = false
            serialize_dynamic_fields = true
            batch_size = 1
            batch_timeout = 5000
            auth_batch_size = 1
            auth_batch_timeout = 20000
        }
        resources {
            reject_on_error = false
            reduce_operations = true
            write {
                hosts = { 127.0.0.1:6379 }
                timeout = 500
            }
            read {
                hosts = { 127.0.0.1:6379 }
                timeout = 1000
            }
        }
        registrations {
            check_interval = 5000
        }
        rpc {
            calls_show_limit = 10000
        }
        core_options_handling = yes
    }
}

routing {
    application = yeti
}

Result The Primary server is not able to send/connect calls/media to Node B, The Dashboard on Primary server goes in loop/procesing only & not able to pull data.

Required:
What configuration changes in sems.conf are required to make this functional without overloading Yeti-Admin & Nginx on each of the node.

Objective
There are some conditions where Telco Partners require Local Media Terminations/Origination.

Looking forward to have some valuable inputs at earliest.
Thanks

Important Note I added the following line in sems.conf of Node B here but in real this comment is not there to cause any error

#Note: Changing this to 172.121.123.35 throws error :failed to find interface with address: 172.121.123.35

It is not clear why primary server should send calls to node B and what does it mean “not able”

It is not clear why primary server should send calls to node B

There are Operators who only allow Media with local IP’s Instead of International IP.

and what does it mean “not able”

No RTP Transactions are happening between A–>B with the current configuration.

Adding second sems node will not cause any changes in RTP processing. So it is not clear why you expecting one node send rtp to another one. Where you configured it?

This is some business requirement. It is not related to yeti configuration. My question - according to what yeti configuration you expect nodeA will send calls(or just RTP?) to nodeB?

Adding second sems node will not cause any changes in RTP processing. So it is not clear why you expecting one node send rtp to another one. Where you configured it?

We are looking for a call flow on these conditions:
Calls Originating/Terminating for HK will only go through Selected POP under Gateway

So Primary should act as Signaling and Media should be Node B

142.121.120.42 HK Node

This is not how it works. POP under Gateway has completely different meaning and selecting other POP will not route anything via other node. You could read about it there Gateways — Yeti documentation

So Primary should act as Signaling and Media should be Node B

This is also not how it works. Media always processed on same node with signaling.

If you need dedicated RTP IP for some gateway - you have to add additional RTP Interface, not node. And use this interface in gateway configuration.

Our Scanrio, Telco Partner hand-off SIP trunk in meet-me room, where we are required to setup our own switching/Media to meet their compliance. so this Location will be our setup and not any virtually connected Interface. Our Primary server (Standalone- Yeti) is in Germany and Node-B/ SEMS is in Hong Kong.

Since, we need Central access to Signaling/Billing/CDRs instead of Multiple/Individual SBCs/Media GW. There would be multiple locations with such requirements making it difficult for management.

So, now as per your suggestion, we need only RTP Interface!
but how will this interface physically connect to Remote Media/SEMS just by adding Interface? where would be physical layer or how will Primary SBC will connect/route media to Remotely Located SEMS for Media Relay?

I may sound novice to this but, really not able to understand how a Virtual Interface will interact to physical Layer which is connected in remote DC/Meet me room?

Thanks

You could install hw router, switch, or software router on some server in your remote location, establish physical interconnections and configure IP routing to deliver traffic to your sems node.

okay, What if i want Yeti to work like this.
1.1.1.1 Master Server 1
2.2.2.2 Slave Server 2

I want 2.2.2.2’s SEMS to connect to 1.1.1.1’s DB and Yeti-WEB.
So i will have 2 SEMS servers 1.1.1.1 and 2.2.2.2 but everything would be managed on 1.1.1.1’s postgresql and yeti-web.

I only want 2.2.2.2 to handle SIP flow and RTP
And All other things would be managed by 1.1.1.1 example auth check billing cdr etc

Of course you can install second node and configure routing to send calls from nodeA to nodeB and nodeB to some sip trunk. But in this scenario you will have multiple CDRs, so your statistics will be incorrect and it will be not easy to manage such configuration

You already have this configuration.

Can you share/point what will sems.conf look like with such configuration?

you already have this configuration. There is nothing special in sems.conf.

so this is expected according to your routing configuration.

The Dashboard on Primary server goes in loop/procesing only & not able to pull data.

This may be caused by networking issues. Web interface has no access to jrpc endpoint.

Of course you can install second node and configure routing to send calls from nodeA to nodeB and nodeB to some sip trunk. But in this scenario you will have multiple CDRs, so your statistics will be incorrect and it will be not easy to manage such configuration

There is only One CDR hosted in Primary SBC, Each Remote SEMS is handling their own data. Stats wont matter in this case, or we can pull GW stats if we really need it that way.

so this is expected according to your routing configuration

.The SBC/Signaling is not able to complete call as its not able to find Configured Remote RTP/SEMS

Or we consider, its not technically possible to configure SBC/Signalling with Multiple Remote/Geographically distributed SEMS/Media/RTP.

If this can be achieved, i would really appreciate if the correct configuration/sems.conf profiles to manage such infrastructure.

Thanks,

.The SBC/Signaling is not able to complete call as its not able to find Configured Remote RTP/SEMS

Could you read documentation about POP parameter in gateway. By some reason you have some expectation there that are not linked to reality. There is no any “Remote RTP/SEMS” and configuration for it.

I proposed you two possible solutions:

  1. Use IP routing to deliver traffic from HK to additional rtp interface of Main node. You have to add additional rtp interface in sems.conf and configure IP routing on your network.
  2. Send call to from Main node to HK node. No sems.conf changed required

Thanks Dmitry, i will try this way and update.