How create a template to my recipe in CHEF for two different configuration files? -


i have 2 different configuration files, ganglia monitoring daemon, gmond.

the first (this configuration file came ganglia-gmond-3.17, rpmforge package):

/* configuration close 2.5.x default behavior possible    values closely match ./gmond/metric.h definitions in 2.5.x */ globals {   daemonize = yes   setuid = yes   user = ganglia   debug_level = 0   max_udp_msg_len = 1472   mute = no   deaf = yes   allow_extra_data = yes   host_dmax = 86400 /*secs */   cleanup_threshold = 300 /*secs */   gexec = no   send_metadata_interval = 30 /*secs */ }  /*  * cluster attributes specified used part of <cluster>  * tag wrap hosts collected instance.  */ cluster {   name = "datac1"   owner = "valter"   latlong = "unspecified"   url = "" }  /* host section describes attributes of host, location */ host {   location = "unspecified" }  /* feel free specify many udp_send_channels like.  gmond    used support having single channel */ udp_send_channel {   host = datac1.ganglia.valter.com   port = 8660   ttl = 1 }  /* can specify many udp_recv_channels well. */ udp_recv_channel {   port = 8660 }  /* can specify many tcp_accept_channels share    xml description of state of cluster */ tcp_accept_channel {   port = 8660 }  /* each metrics module referenced gmond must specified ,    loaded. if module has been statically linked gmond,    not require load path. dynamically loadable modules must    include load path. */ modules {   module {     name = "core_metrics"   }   module {     name = "cpu_module"     path = "modcpu.so"   }   module {     name = "disk_module"     path = "moddisk.so"   }   module {     name = "load_module"     path = "modload.so"   }   module {     name = "mem_module"     path = "modmem.so"   }   module {     name = "net_module"     path = "modnet.so"   }   module {     name = "proc_module"     path = "modproc.so"   }   module {     name = "sys_module"     path = "modsys.so"   } }  include ('/etc/ganglia/conf.d/*.conf')  /* old internal 2.5.x metric array has been replaced following    collection_group directives.  follows default behavior    collecting , sending metrics close 2.5.x behavior    possible. */  /* collection group cause heartbeat (or beacon) sent every    20 seconds.  in heartbeat gmond_started data expresses    age of running gmond. */ collection_group {   collect_once = yes   time_threshold = 20   metric {     name = "heartbeat"   } }  /* collection group send general info host every    1200 secs.    information doesn't change between reboots , collected    once. */ collection_group {   collect_once = yes   time_threshold = 1200   metric {     name = "cpu_num"     title = "cpu count"   }   metric {     name = "cpu_speed"     title = "cpu speed"   }   metric {     name = "mem_total"     title = "memory total"   }   /* should here? swap can added/removed between reboots. */   metric {     name = "swap_total"     title = "swap space total"   }   metric {     name = "boottime"     title = "last boot time"   }   metric {     name = "machine_type"     title = "machine type"   }   metric {     name = "os_name"     title = "operating system"   }   metric {     name = "os_release"     title = "operating system release"   }   metric {     name = "location"     title = "location"   } }  /* collection group send status of gexecd host    every 300 secs.*/ /* unlike 2.5.x default behavior report gexecd off. */ collection_group {   collect_once = yes   time_threshold = 300   metric {     name = "gexec"     title = "gexec status"   } }  /* collection group collect cpu status info every 20 secs.    time threshold set 90 seconds.  in honesty,    time_threshold set higher reduce    unneccessary  network chatter. */ collection_group {   collect_every = 20   time_threshold = 90   /* cpu status */   metric {     name = "cpu_user"     value_threshold = "1.0"     title = "cpu user"   }   metric {     name = "cpu_system"     value_threshold = "1.0"     title = "cpu system"   }   metric {     name = "cpu_idle"     value_threshold = "5.0"     title = "cpu idle"   }   metric {     name = "cpu_nice"     value_threshold = "1.0"     title = "cpu nice"   }   metric {     name = "cpu_aidle"     value_threshold = "5.0"     title = "cpu aidle"   }   metric {     name = "cpu_wio"     value_threshold = "1.0"     title = "cpu wio"   }   /* next 2 metrics optional if want more detail...      ... since accounted in cpu_system.   metric {     name = "cpu_intr"     value_threshold = "1.0"     title = "cpu intr"   }   metric {     name = "cpu_sintr"     value_threshold = "1.0"     title = "cpu sintr"   }   */ }  collection_group {   collect_every = 20   time_threshold = 90   /* load averages */   metric {     name = "load_one"     value_threshold = "1.0"     title = "one minute load average"   }   metric {     name = "load_five"     value_threshold = "1.0"     title = "five minute load average"   }   metric {     name = "load_fifteen"     value_threshold = "1.0"     title = "fifteen minute load average"   } }  /* group collects number of running , total processes */ collection_group {   collect_every = 80   time_threshold = 950   metric {     name = "proc_run"     value_threshold = "1.0"     title = "total running processes"   }   metric {     name = "proc_total"     value_threshold = "1.0"     title = "total processes"   } }  /* collection group grabs volatile memory metrics every 40 secs ,    sends them @ least every 180 secs.  time_threshold can increased    reduce unneeded network traffic. */ collection_group {   collect_every = 40   time_threshold = 180   metric {     name = "mem_free"     value_threshold = "1024.0"     title = "free memory"   }   metric {     name = "mem_shared"     value_threshold = "1024.0"     title = "shared memory"   }   metric {     name = "mem_buffers"     value_threshold = "1024.0"     title = "memory buffers"   }   metric {     name = "mem_cached"     value_threshold = "1024.0"     title = "cached memory"   }   metric {     name = "swap_free"     value_threshold = "1024.0"     title = "free swap space"   } }  collection_group {   collect_every = 40   time_threshold = 300   metric {     name = "bytes_out"     value_threshold = 4096     title = "bytes sent"   }   metric {     name = "bytes_in"     value_threshold = 4096     title = "bytes received"   }   metric {     name = "pkts_in"     value_threshold = 256     title = "packets received"   }   metric {     name = "pkts_out"     value_threshold = 256     title = "packets sent"   } }  /* different 2.5.x default since old config made no sense */ collection_group {   collect_every = 1800   time_threshold = 3600   metric {     name = "disk_total"     value_threshold = 1.0     title = "total disk space"   } }  collection_group {   collect_every = 40   time_threshold = 180   metric {     name = "disk_free"     value_threshold = 1.0     title = "disk space available"   }   metric {     name = "part_max_used"     value_threshold = 1.0     title = "maximum disk space used"   } } 

the second (this configuration came ganglia-gmond-3.1.7, but amazon package, have own package ganglia-gmond):

/* configuration close 2.5.x default behavior possible    values closely match ./gmond/metric.h definitions in 2.5.x */ globals {   daemonize = yes   setuid = yes   user = ganglia   debug_level = 0   max_udp_msg_len = 1472   mute = no   deaf = yes   allow_extra_data = yes   host_dmax = 86400 /*secs */   cleanup_threshold = 300 /*secs */   gexec = no   send_metadata_interval = 30 /*secs */ }  /*  * cluster attributes specified used part of <cluster>  * tag wrap hosts collected instance.  */ cluster {   name = "datac2"   owner = "valter"   latlong = "unspecified"   url = "" }  /* host section describes attributes of host, location */ host {   location = "unspecified" }  /* feel free specify many udp_send_channels like.  gmond    used support having single channel */ udp_send_channel {   host = datac2.ganglia.valter.com   port = 8662   ttl = 1 }  /* can specify many udp_recv_channels well. */ udp_recv_channel {   port = 8662 }  /* can specify many tcp_accept_channels share    xml description of state of cluster */ tcp_accept_channel {   port = 8662 }  /* each metrics module referenced gmond must specified ,    loaded. if module has been statically linked gmond,    not require load path. dynamically loadable modules must    include load path. */ modules {   module {     name = "core_metrics"   }   module {     name = "cpu_module"     path = "modcpu.so"   }   module {     name = "disk_module"     path = "moddisk.so"   }   module {     name = "load_module"     path = "modload.so"   }   module {     name = "mem_module"     path = "modmem.so"   }   module {     name = "net_module"     path = "modnet.so"   }   module {     name = "proc_module"     path = "modproc.so"   }   module {     name = "sys_module"     path = "modsys.so"   } }  include ('/etc/ganglia/conf.d/*.conf')  /* old internal 2.5.x metric array has been replaced following    collection_group directives.  follows default behavior    collecting , sending metrics close 2.5.x behavior    possible. */  /* collection group cause heartbeat (or beacon) sent every    20 seconds.  in heartbeat gmond_started data expresses    age of running gmond. */ collection_group {   collect_once = yes   time_threshold = 20   metric {     name = "heartbeat"   } }  /* collection group send general info host every    1200 secs.    information doesn't change between reboots , collected    once. */ collection_group {   collect_once = yes   time_threshold = 1200   metric {     name = "cpu_num"     title = "cpu count"   }   metric {     name = "cpu_speed"     title = "cpu speed"   }   metric {     name = "mem_total"     title = "memory total"   }   /* should here? swap can added/removed between reboots. */   metric {     name = "swap_total"     title = "swap space total"   }   metric {     name = "boottime"     title = "last boot time"   }   metric {     name = "machine_type"     title = "machine type"   }   metric {     name = "os_name"     title = "operating system"   }   metric {     name = "os_release"     title = "operating system release"   }   metric {     name = "location"     title = "location"   } }  /* collection group send status of gexecd host    every 300 secs.*/ /* unlike 2.5.x default behavior report gexecd off. */ collection_group {   collect_once = yes   time_threshold = 300   metric {     name = "gexec"     title = "gexec status"   } }  /* collection group collect cpu status info every 20 secs.    time threshold set 90 seconds.  in honesty,    time_threshold set higher reduce    unneccessary  network chatter. */ collection_group {   collect_every = 20   time_threshold = 90   /* cpu status */   metric {     name = "cpu_user"     value_threshold = "1.0"     title = "cpu user"   }   metric {     name = "cpu_system"     value_threshold = "1.0"     title = "cpu system"   }   metric {     name = "cpu_idle"     value_threshold = "5.0"     title = "cpu idle"   }   metric {     name = "cpu_nice"     value_threshold = "1.0"     title = "cpu nice"   }   metric {     name = "cpu_aidle"     value_threshold = "5.0"     title = "cpu aidle"   }   metric {     name = "cpu_wio"     value_threshold = "1.0"     title = "cpu wio"   }   /* next 2 metrics optional if want more detail...      ... since accounted in cpu_system.   metric {     name = "cpu_intr"     value_threshold = "1.0"     title = "cpu intr"   }   metric {     name = "cpu_sintr"     value_threshold = "1.0"     title = "cpu sintr"   }   */ }  collection_group {   collect_every = 20   time_threshold = 90   /* load averages */   metric {     name = "load_one"     value_threshold = "1.0"     title = "one minute load average"   }   metric {     name = "load_five"     value_threshold = "1.0"     title = "five minute load average"   }   metric {     name = "load_fifteen"     value_threshold = "1.0"     title = "fifteen minute load average"   } }  /* group collects number of running , total processes */ collection_group {   collect_every = 80   time_threshold = 950   metric {     name = "proc_run"     value_threshold = "1.0"     title = "total running processes"   }   metric {     name = "proc_total"     value_threshold = "1.0"     title = "total processes"   } }  /* collection group grabs volatile memory metrics every 40 secs ,    sends them @ least every 180 secs.  time_threshold can increased    reduce unneeded network traffic. */ collection_group {   collect_every = 40   time_threshold = 180   metric {     name = "mem_free"     value_threshold = "1024.0"     title = "free memory"   }   metric {     name = "mem_shared"     value_threshold = "1024.0"     title = "shared memory"   }   metric {     name = "mem_buffers"     value_threshold = "1024.0"     title = "memory buffers"   }   metric {     name = "mem_cached"     value_threshold = "1024.0"     title = "cached memory"   }   metric {     name = "swap_free"     value_threshold = "1024.0"     title = "free swap space"   } }  collection_group {   collect_every = 40   time_threshold = 300   metric {     name = "bytes_out"     value_threshold = 4096     title = "bytes sent"   }   metric {     name = "bytes_in"     value_threshold = 4096     title = "bytes received"   }   metric {     name = "pkts_in"     value_threshold = 256     title = "packets received"   }   metric {     name = "pkts_out"     value_threshold = 256     title = "packets sent"   } }  /* different 2.5.x default since old config made no sense */ collection_group {   collect_every = 1800   time_threshold = 3600   metric {     name = "disk_total"     value_threshold = 1.0     title = "total disk space"   } }  collection_group {   collect_every = 40   time_threshold = 180   metric {     name = "disk_free"     value_threshold = 1.0     title = "disk space available"   }   metric {     name = "part_max_used"     value_threshold = 1.0     title = "maximum disk space used"   } } 

using meld saw difference aren't much, enough don't make application run, that's why can't make same .conf file. use template this. how can ? because it's not variable differs 1 file another, adding/removing lines, , that's stuck.

any idea, suggestion ?

let's start diff:

--- gmond.amazon    2013-05-14 23:00:23.534421793 +0400 +++ gmond.rpmforge  2013-05-14 22:59:21.614420963 +0400 @@ -20,7 +20,7 @@   * tag wrap hosts collected instance.   */  cluster { -  name = "datac2" +  name = "datac1"    owner = "valter"    latlong = "unspecified"    url = "" @@ -34,20 +34,20 @@  /* feel free specify many udp_send_channels like.  gmond     used support having single channel */  udp_send_channel { -  host = datac2.ganglia.valter.com -  port = 8662 +  host = datac1.ganglia.valter.com +  port = 8660    ttl = 1  }   /* can specify many udp_recv_channels well. */  udp_recv_channel { -  port = 8662 +  port = 8660  }   /* can specify many tcp_accept_channels share     xml description of state of cluster */  tcp_accept_channel { -  port = 8662 +  port = 8660  } 

so gmond.conf.erb template (missing equal parts of files):

cluster {   name = "<%=@cluster[:name]%>"   owner = "valter"   latlong = "unspecified"   url = "" }  /* host section describes attributes of host, location */ host {   location = "unspecified" }  /* feel free specify many udp_send_channels like.  gmond    used support having single channel */ udp_send_channel {   name = <%=@udp_send_channel[:name]%>   port = <%=@udp_send_channel[:port]%>   ttl = 1 }  /* can specify many udp_recv_channels well. */ udp_recv_channel {   port = <%=@udp_recv_channel[:port]%> }  /* can specify many tcp_accept_channels share    xml description of state of cluster */ tcp_accept_channel {   port = <%=@tcp_accept_channel[:port]%> } 

it called like

template "/etc/whatever/gmond.conf"   source "gmond.conf.erb"   mode 0644   owner "root"   group "root"   variables({     :cluster => {       :name => "something"     },     :udp_send_channel => {       :name => "name",       :port => 1234     },     :udp_recv_channel => {       :port => 2345     },     :tcp_accept_channel => {       :port => 3456     }   }) end 

Comments

Popular posts from this blog

jquery - How can I dynamically add a browser tab? -

keyboard - C++ GetAsyncKeyState alternative -

android - java.net.UnknownHostException(Unable to resolve host “URL”: No address associated with hostname) -