; /etc/my.cnf.d/vicidial.cnf
[mysqld]
innodb_buffer_pool_size = 48G
innodb_log_file_size = 1G
innodb_flush_log_at_trx_commit = 2
innodb_flush_method = O_DIRECT
max_connections = 500
table_open_cache = 4096
wait_timeout = 300
; /etc/my.cnf.d/vicidial.cnf
[mysqld]
innodb_buffer_pool_size = 48G
innodb_log_file_size = 1G
innodb_flush_log_at_trx_commit = 2
innodb_flush_method = O_DIRECT
max_connections = 500
table_open_cache = 4096
wait_timeout = 300
; /etc/my.cnf.d/vicidial.cnf
[mysqld]
innodb_buffer_pool_size = 48G
innodb_log_file_size = 1G
innodb_flush_log_at_trx_commit = 2
innodb_flush_method = O_DIRECT
max_connections = 500
table_open_cache = 4096
wait_timeout = 300
from faster_whisper import WhisperModel
import requests model = WhisperModel("large-v3", device="cuda", compute_type="int8") def process_recording(filepath): segments, info = model.transcribe(filepath, beam_size=5, language="en") transcript = " ".join([s.text for s in segments]) resp = requests.post("http://gpu1:11434/api/generate", json={ "model": "llama3.2:8b", "prompt": f"Summarize this call in 2-3 sentences:\n\n{transcript}", "stream": False }) return resp.json()["response"]
from faster_whisper import WhisperModel
import requests model = WhisperModel("large-v3", device="cuda", compute_type="int8") def process_recording(filepath): segments, info = model.transcribe(filepath, beam_size=5, language="en") transcript = " ".join([s.text for s in segments]) resp = requests.post("http://gpu1:11434/api/generate", json={ "model": "llama3.2:8b", "prompt": f"Summarize this call in 2-3 sentences:\n\n{transcript}", "stream": False }) return resp.json()["response"]
from faster_whisper import WhisperModel
import requests model = WhisperModel("large-v3", device="cuda", compute_type="int8") def process_recording(filepath): segments, info = model.transcribe(filepath, beam_size=5, language="en") transcript = " ".join([s.text for s in segments]) resp = requests.post("http://gpu1:11434/api/generate", json={ "model": "llama3.2:8b", "prompt": f"Summarize this call in 2-3 sentences:\n\n{transcript}", "stream": False }) return resp.json()["response"]
; /etc/asterisk/sip.conf
[telnyx](!)
type=peer
host=sip.telnyx.com
fromdomain=sip.telnyx.com
qualify=yes
dtmfmode=rfc2833
disallow=all
allow=ulaw
allow=g729
nat=force_rport,comedia
; /etc/asterisk/sip.conf
[telnyx](!)
type=peer
host=sip.telnyx.com
fromdomain=sip.telnyx.com
qualify=yes
dtmfmode=rfc2833
disallow=all
allow=ulaw
allow=g729
nat=force_rport,comedia
; /etc/asterisk/sip.conf
[telnyx](!)
type=peer
host=sip.telnyx.com
fromdomain=sip.telnyx.com
qualify=yes
dtmfmode=rfc2833
disallow=all
allow=ulaw
allow=g729
nat=force_rport,comedia - Database: 8-16 cores, 64 GB RAM, 2x 1TB NVMe RAID1 (InnoDB buffer pool eats 48G)
- Dialer: 8 cores, 16-32 GB RAM, 500 GB NVMe, sub-5ms jitter to SIP provider
- Web/Admin: 4-8 cores, 16 GB RAM
- AI/GPU: 8-16 cores, 64 GB RAM, RTX 4090 or used RTX 3090 (~$700-1,400)