Import rest of modules.

This commit is contained in:
Timo Mkinen 2009-08-20 00:24:14 +03:00
parent 02fa10f33c
commit 3f225ced9b
39 changed files with 2056 additions and 0 deletions

14
apcupsd/manifests/init.pp Normal file
View file

@ -0,0 +1,14 @@
class apcupsd {
package { "apcupsd":
ensure => installed,
}
service { "apcupsd":
ensure => running,
enable => true,
require => Package["apcupsd"],
}
}

View file

@ -0,0 +1 @@

56
backuppc/files/hosts.in Normal file
View file

@ -0,0 +1,56 @@
#============================================================= -*-perl-*-
#
# Host file list for BackupPC.
#
# DESCRIPTION
#
# This file lists all the hosts that should be backed up by
# BackupPC.
#
# Each line in the hosts file contains three fields, separated
# by white space:
#
# - The host name. If this host is a static IP address this
# must the machine's IP host name (ie: something that can
# be looked up using nslookup or DNS). If this is a DHCP
# host then the host name must be the netbios name of the
# machine. It is possible to have a host name that contains
# spaces, but that is discouraged. Escape a space with "\", eg:
#
# craigs\ pc
#
# - DHCP flag. Set to 0 if this is a static IP address host
# or if the machine can be found using nmblookup. Otherwise,
# if the client can only be found by looking through the DHCP
# pool then set this to 1.
#
# - User name (unix login/email name) of the user who "owns"
# or uses this machine. This is the user who will be sent
# email about this machine, and this user will have permission
# to stop/start/browse/restore backups for this host. This
# user name must match the name the user authenticates with
# via apache.
#
# - Optional additional user names (comma separated, no white space) of
# users who are also allowed to stop/start/browse/restore backups
# for this client via the CGI interface. These users are not sent
# email. These do not need to be valid email names; they simply
# need to match the name the user authenticates with via apache.
#
# AUTHOR
# Craig Barratt <craig@arraycomm.com>
#
# COPYRIGHT
# Copyright (C) 2001 Craig Barratt
#
# See http://backuppc.sourceforge.net.
#
#========================================================================
#
# The first non-comment non-empty line gives the field names and should
# not be edited!!
#
host dhcp user moreUsers # <--- do not edit this line
#farside 0 craig jill,jeff # <--- example static IP host entry
#larson 1 bill # <--- example DHCP host entry

102
backuppc/manifests/init.pp Normal file
View file

@ -0,0 +1,102 @@
define backuppc::manualclient($ensure = "present", $operatingsystem = "default") {
@@file { "/etc/BackupPC/pc/${name}.pl":
ensure => "${ensure}",
source => [ "puppet:///files/backuppc/${fqdn}.pl",
"puppet:///files/backuppc/${operatingsystem}.pl",
"puppet:///files/backuppc/default.pl",
"puppet:///backuppc/default.pl", ],
mode => 0640,
owner => root,
group => backuppc,
tag => "backuppc",
require => File["/etc/BackupPC/pc"],
notify => Exec["generate-backuppc-hosts"],
}
}
class backuppc::client {
backuppc::manualclient { "${fqdn}":
ensure => present,
operatingsystem => "${operatingsystem}",
}
}
class backuppc::server {
package { "BackupPC":
ensure => installed,
}
file { [ "/export/backuppc",
"/export/backuppc/cpool",
"/export/backuppc/pc",
"/export/backuppc/pool",
"/export/backuppc/trash", ]:
ensure => directory,
mode => 0750,
owner => backuppc,
group => root,
require => Package["BackupPC"],
}
file { "/srv/backuppc":
ensure => "/export/backuppc",
require => File["/export/backuppc"],
}
file { "/var/lib/BackupPC":
ensure => "/srv/backuppc",
force => true,
require => File["/srv/backuppc"],
}
file { "/etc/BackupPC/config.pl":
ensure => present,
source => "puppet:///files/backuppc/config.pl",
mode => 0640,
owner => root,
group => backuppc,
require => Package["BackupPC"],
notify => Service["backuppc"],
}
file { "/etc/BackupPC/hosts.in":
ensure => present,
source => [ "puppet:///files/backuppc/hosts.in",
"puppet:///backuppc/hosts.in", ],
mode => 0644,
owner => root,
group => backuppc,
require => Package["BackupPC"],
notify => Exec["generate-backuppc-hosts"],
}
file { "/etc/BackupPC/pc":
ensure => directory,
purge => true,
mode => 0750,
owner => root,
group => backuppc,
require => Package["BackupPC"],
}
exec { "generate-backuppc-hosts":
command => '(cat /etc/BackupPC/hosts.in ; find /etc/BackupPC/pc/ -name \*.pl -exec basename {} .pl \; | sed -e "s/$/ 0 adm/") > /etc/BackupPC/hosts',
path => "/bin:/usr/bin:/sbin:/usr/sbin",
refreshonly => true,
require => File["/etc/BackupPC/hosts.in"],
notify => Service["backuppc"],
}
File <<| tag == "backuppc" |>>
service { "backuppc":
ensure => running,
enable => true,
require => Package["BackupPC"],
}
}

20
custom/manifests/init.pp Normal file
View file

@ -0,0 +1,20 @@
class custom {
file { "/srv":
ensure => directory,
mode => 0755,
owner => root,
group => $operatingsystem ? {
OpenBSD => wheel,
default => root,
},
}
if $kernel == OpenBSD {
Service {
provider => openbsd,
}
}
}

View file

@ -0,0 +1,8 @@
require 'puppet'
Facter.add('puppet_ssldir') do
setcode do
Puppet.parse_config
Puppet.settings.value('ssldir')
end
end

View file

@ -0,0 +1,106 @@
# Manage OpenBSD services. Enable/disable using /etc/rc.local, /etc/rc.conf.local
Puppet::Type.type(:service).provide :openbsd, :parent => :base do
desc "OpenBSD service management."
defaultfor :operatingsystem => [:openbsd]
@@rclocal = "/etc/rc.local"
@@rcconf = "/etc/rc.conf"
@@rcconflocal = "/etc/rc.conf.local"
def getrcconf
File.readlines(@@rcconf).each { |line|
if line =~ /^#{@resource[:name]}_flags=.*/
return "#{@resource[:name]}_flags"
elsif line =~ /^#{@resource[:name]}=.*/
return @resource[:name]
end
}
return false
end
def enabled?
if not defined? @resource[:start]
raise Puppet::Error,
"Services must specify a start command or a binary"
end
flag = getrcconf()
if flag
File.readlines(@@rcconflocal).each { |line|
line = line.strip.split(/=/, 2)
next unless line[0] == flag
if line[1] == "NO"
return :false
end
return :true
}
return :false
else
inlocal = false
File.readlines(@@rclocal).each { |line|
line = line.strip
if not inlocal
next unless \
line == "# Add your local startup actions here."
inlocal = true
else
if line == "echo '.'"
inlocal = false
break
end
next unless line =~ /^echo -n \" #{@resource[:name]}\" ; .*/
return :true
end
}
return :false
end
end
def enable
flag = getrcconf()
if flag
newdata = ""
File.readlines(@@rcconflocal).each { |line|
if line.strip.split(/=/, 2)[0] == flag
next
else
newdata += line
end
}
if flag == @resource[:name]
newdata += "%s=YES\n" % flag
elsif flag == "#{@resource[:name]}_flags"
if @resource[:start] != nil and @resource[:binary] != nil
args = @resource[:start][/^#{@resource[:binary]} (.*)/, 1]
end
newdata += "%s=\"%s\"\n" % [flag, args]
end
f = File.open(@@rcconflocal, "w")
f.write(newdata)
f.close
else
newdata = ""
inlocal = false
File.readlines(@@rclocal).each { |line|
if line == "# Add your local startup actions here.\n"
newdata += line
newdata += "echo -n \" %s\" ; %s\n" % [@resource[:name],
@resource[:start]]
next
end
newdata += line
}
f = File.open(@@rclocal, "w")
f.write(newdata)
f.close
end
return :true
end
def disable
print "disabling #{@resource[:name]}\n"
return :true
end
end

52
dhcp/files/dhcpdump.py Executable file
View file

@ -0,0 +1,52 @@
#!/usr/bin/env python
import re
import sys
from subprocess import Popen, PIPE
def main():
if len(sys.argv) < 2:
print >>sys.stderr, 'Usage: %s <template>' % sys.argv[0]
sys.exit(1)
for template in sys.argv[1:]:
template = open(template, 'r')
for line in template.readlines():
m = re.match('([ \t]*)--(.+)--[ \t]*$', line)
if m is not None:
indent = m.group(1)
for entry in ldapsearch(m.group(2)):
print '%s%s' % (indent, entry)
else:
sys.stdout.write(line)
template.close()
def ldapsearch(filter):
p = Popen(['ldapsearch', '-x', '-LLL', filter, 'cn', 'macAddress', 'ipHostNumber'],
bufsize=1024, stdout=PIPE, close_fds=True)
ret = []
cur = {}
for l in p.stdout.readlines():
l = l.strip()
if l == '':
try:
ret.append('host %s { option host-name "%s"; hardware ethernet %s; fixed-address %s; }' % (
cur["cn"], cur["cn"].split('.')[0], cur["macAddress"], cur["ipHostNumber"]))
except KeyError:
print "foo"
cur = {}
continue
l = l.split()
if l[0] in ('cn:', 'macAddress:', 'ipHostNumber:'):
cur[l[0][0:-1]] = l[1]
return ret
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
sys.exit()

75
dhcp/manifests/init.pp Normal file
View file

@ -0,0 +1,75 @@
class dhcp::server {
package { "dhcp":
name => $operatingsystem ? {
OpenBSD => "isc-dhcp-server",
default => "dhcp",
},
ensure => installed,
}
file { "/usr/local/sbin/dhcpdump.py":
ensure => present,
source => "puppet:///dhcp/dhcpdump.py",
mode => 0755,
owner => root,
group => $operatingsystem ? {
OpenBSD => wheel,
default => root,
},
}
file { "/etc/dhcpd.conf.in":
ensure => present,
source => [ "puppet:///files/dhcp/dhcpd.conf.in.${hostname}",
"puppet:///files/dhcp/dhcpd.conf.in", ],
mode => 0644,
owner => root,
group => $operatingsystem ? {
OpenBSD => wheel,
default => root,
},
require => Package["dhcp"],
}
file { "dhcpd.leases":
name => $operatingsystem ? {
OpenBSD => "/var/db/dhcpd.leases",
default => "/var/lib/dhcpd/dhcpd.leases",
},
ensure => present,
owner => root,
group => $operatingsystem ? {
OpenBSD => wheel,
default => root,
},
require => Package["dhcp"],
before => Service["dhcpd"],
}
service { "dhcpd":
ensure => running,
enable => true,
require => Package["dhcp"],
}
case $operatingsystem {
OpenBSD: {
Service["dhcpd"] {
name => "isc-dhcpd",
binary => "/usr/local/sbin/dhcpd",
start => "/usr/local/sbin/dhcpd > /dev/null",
}
}
}
exec { "generate-dhcp-conf":
path => "/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin",
command => "dhcpdump.py /etc/dhcpd.conf.in* > /etc/dhcpd.conf",
onlyif => "! dhcpdump.py /etc/dhcpd.conf.in* | diff /etc/dhcpd.conf -",
require => [ File["/etc/dhcpd.conf.in"],
File["/usr/local/sbin/dhcpdump.py"], ],
notify => Service["dhcpd"],
}
}

View file

@ -0,0 +1,3 @@
# directory containing git repositories
$projectroot = "/srv/git"

37
git/manifests/init.pp Normal file
View file

@ -0,0 +1,37 @@
class git::client {
package { "git":
ensure => installed,
}
}
class git::server inherits git::client {
include inetd::server
package { ["git-daemon", "gitweb"]:
ensure => installed,
}
file { "/git":
ensure => "/srv/git",
}
file { "/var/www/git/gitweb_config.perl":
ensure => present,
source => "puppet:///git/gitweb_config.perl",
mode => 0644,
owner => root,
group => root,
require => Package["gitweb"],
}
inetd::service { "git":
ensure => present,
require => Package["git-daemon"],
}
}

91
inetd/manifests/init.pp Normal file
View file

@ -0,0 +1,91 @@
# Install inetd server.
#
# This class is wrapper for installing inetd superserver.
#
class inetd::server {
case $operatingsystem {
centos,fedora: {
include inetd::server::xinetd
}
openbsd: {
include inetd::server::inetd
}
default: {
fail("Inetd module not supported in ${operatingsystem}")
}
}
}
# Install xinetd server.
#
# This class should not be invoked directly. Instead use
# inetd::server which installs xinetd or normal inetd
# depending on running operatingsystem.
#
class inetd::server::xinetd {
package { "xinetd":
ensure => installed,
}
service { "xinetd":
ensure => running,
enable => true,
require => Package["xinetd"],
}
}
# Install inetd server.
#
# This class should not be invoked directly. Instead use
# inetd::server which installs xinetd or normal inetd
# depending on running operatingsystem.
#
class inetd::server::inetd {
service { "inetd":
ensure => running,
enable => true,
}
}
# Configure inetd service.
#
# === Parameters
#
# $name:
# Service name
# $ensure:
# Set to present to enable service and absent to disable.
#
# === Sample usage
#
# inetd::service { "time-stream":
# ensure => present,
# }
#
define inetd::service($ensure = present) {
case $operatingsystem {
centos,fedora: {
service { "${name}":
enable => $ensure ? {
present => true,
absent => false,
},
notify => Service["xinetd"],
}
}
default: {
fail("Inetd module not supported in ${operatingsystem}")
}
}
}

5
iscsi/files/targets.conf Normal file
View file

@ -0,0 +1,5 @@
#
# TGTD targets file
#
include /etc/tgt/target.d/*.conf

98
iscsi/manifests/init.pp Normal file
View file

@ -0,0 +1,98 @@
class iscsi::server {
package { "scsi-target-utils":
ensure => installed,
}
service { "tgtd":
ensure => running,
enable => true,
require => Package["scsi-target-utils"],
}
file { "/etc/tgt/targets.conf":
ensure => present,
source => [ "puppet:///files/iscsi/targets.conf.${fqdn}",
"puppet:///files/iscsi/targets.conf",
"puppet:///iscsi/targets.conf", ],
mode => 0600,
owner => root,
group => root,
require => Package["scsi-target-utils"],
}
file { "/etc/tgt/target.d":
ensure => directory,
mode => 0700,
owner => root,
group => root,
require => Package["scsi-target-utils"],
}
exec { "iscsi-refresh":
command => "tgt-admin -e",
path => "/bin:/usr/bin:/sbin:/usr/sbin",
onlyif => "tgt-admin -e -p | egrep '^tgtadm '",
require => Service["tgtd"],
}
}
define iscsi::target($tid, $initiator = "ALL", $ensure = "present") {
include iscsi::server
$iqn = sprintf("iqn.2005-08.tgt:%s%s", $hostname, regsubst($name, "/", ".", "G"))
case $ensure {
"present": {
file { "/etc/tgt/target.d/${tid}.conf":
ensure => present,
content => template("iscsi/tid.conf.erb"),
mode => 0600,
owner => root,
group => root,
require => File["/etc/tgt/target.d"],
before => Exec["iscsi-refresh"],
}
}
"absent": {
file { "/etc/tgt/target.d/${tid}.conf":
ensure => absent,
before => Exec["iscsi-refresh"],
}
}
}
}
class iscsi::initiator {
package { "iscsi-initiator-utils":
ensure => installed,
}
service { "iscsid":
ensure => running,
enable => true,
require => Package["iscsi-initiator-utils"],
}
}
define iscsi::connect($portal) {
include iscsi::initiator
exec { "iscsi-connect-${name}":
path => "/bin:/usr/bin:/sbin:/usr/sbin",
command => "iscsiadm --mode discovery --type sendtargets --portal ${portal} && iscsiadm --mode node --targetname ${name} --portal ${portal} --login",
onlyif => "! iscsiadm --mode session | egrep '${portal}:[0-9]*,[0-9]* ${name}'\$",
require => Service["iscsid"],
}
}

View file

@ -0,0 +1,4 @@
<target <%= iqn -%>>
backing-store <%= name %>
initiator-address <%= initiator %>
</target>

View file

@ -0,0 +1,44 @@
class kerberos::client {
case $operatingsystem {
centos,fedora: {
package { ["krb5-workstation", "pam-krb5"]:
ensure => installed,
}
}
}
file { "/etc/krb5.conf":
ensure => present,
mode => 0644,
owner => root,
group => $operatingsystem ? {
openbsd => wheel,
default => root,
},
}
}
class kerberos::server inherits kerberos::client {
package { "heimdal-server":
ensure => installed,
}
}
define kerberos::keytab($principals = [], $ensure = present, $owner = "root", $group = "root", $mode = "0600") {
file { "${name}":
ensure => $ensure,
content => template("kerberos/keytab.erb"),
mode => "${mode}",
owner => "${owner}",
group => "${group}",
}
}

View file

@ -0,0 +1,73 @@
<%
require 'digest/md5'
require 'expect'
require 'tempfile'
require 'pty'
config = {}
config['cachedir'] = '/var/cache/puppet'
config['kadmin'] = '/opt/heimdal/sbin/kadmin'
config['klist'] = '/usr/kerberos/bin/klist'
# set global vars
cachefile = File.join(config['cachedir'],
fqdn + '.' + Digest::MD5.hexdigest(name))
# function to check if keytab contains required principals
def check_keytab(config, keytab, principals)
entries = []
IO.popen(sprintf('%s -k %s', config['klist'], keytab), mode='r') { |f|
f.readlines.each do |l|
next unless l =~ / \d+ .*/
entries << l.split()[1]
end
}
t = principals & entries.uniq
if t.size != principals.size
return false
else
return true
end
end
# check if we have cached keytab up to date
cached = true
if File.exists?(cachefile)
if not check_keytab(config, cachefile, principals)
cached = false
File.unlink(cachefile)
end
else
cached = false
end
# create new keytab if cache is not up to date
if not cached
cmd = sprintf('%s -p %s ext_keytab --keytab=%s %s', config['kadmin'],
kerberos_user, cachefile, principals.join(' '))
retval = nil
PTY.getpty(cmd) do |r,w,pid|
r.expect(/^.*'s Password:\s+/)
w.puts kerberos_pass + "\n"
begin
pid, retval = Process.wait2(pid)
rescue
nil
end
end
if not File.exists?(cachefile)
raise 'Failed to create keytab ' + name
elsif not check_keytab(config, cachefile, principals)
raise 'Invalid keytab ' + name + ' created'
end
end
# read keytab into memory
data = File.open(cachefile).read
-%><%= data -%>

78
munin/files/munin.conf.in Normal file
View file

@ -0,0 +1,78 @@
# Example configuration file for Munin, generated by 'make build'
# The next three variables specifies where the location of the RRD
# databases, the HTML output, and the logs, severally. They all
# must be writable by the user running munin-cron.
dbdir /var/lib/munin
htmldir /var/www/html/munin
logdir /var/log/munin
rundir /var/run/munin
# Where to look for the HTML templates
tmpldir /etc/munin/templates
# Make graphs show values per minute instead of per second
#graph_period minute
# Graphics files are normaly generated by munin-graph, no matter if
# the graphs are used or not. You can change this to
# on-demand-graphing by following the instructions in
# http://munin.projects.linpro.no/wiki/CgiHowto
#
#graph_strategy cgi
# Drop somejuser@fnord.comm and anotheruser@blibb.comm an email everytime
# something changes (OK -> WARNING, CRITICAL -> OK, etc)
#contact.someuser.command mail -s "Munin notification" somejuser@fnord.comm
#contact.anotheruser.command mail -s "Munin notification" anotheruser@blibb.comm
#
# For those with Nagios, the following might come in handy. In addition,
# the services must be defined in the Nagios server as well.
#contact.nagios.command /usr/sbin/send_nsca -H nagios.host.com -c /etc/send_nsca.cfg
# a simple host tree
#[localhost]
# address 127.0.0.1
# use_node_name yes
#
# A more complex example of a host tree
#
## First our "normal" host.
# [fii.foo.com]
# address foo
#
## Then our other host...
# [fay.foo.com]
# address fay
#
## Then we want totals...
# [foo.com;Totals] #Force it into the "foo.com"-domain...
# update no # Turn off data-fetching for this "host".
#
# # The graph "load1". We want to see the loads of both machines...
# # "fii=fii.foo.com:load.load" means "label=machine:graph.field"
# load1.graph_title Loads side by side
# load1.graph_order fii=fii.foo.com:load.load fay=fay.foo.com:load.load
#
# # The graph "load2". Now we want them stacked on top of each other.
# load2.graph_title Loads on top of each other
# load2.dummy_field.stack fii=fii.foo.com:load.load fay=fay.foo.com:load.load
# load2.dummy_field.draw AREA # We want area instead the default LINE2.
# load2.dummy_field.label dummy # This is needed. Silly, really.
#
# # The graph "load3". Now we want them summarised into one field
# load3.graph_title Loads summarised
# load3.combined_loads.sum fii.foo.com:load.load fay.foo.com:load.load
# load3.combined_loads.label Combined loads # Must be set, as this is
# # not a dummy field!
#
## ...and on a side note, I want them listen in another order (default is
## alphabetically)
#
# # Since [foo.com] would be interpreted as a host in the domain "com", we
# # specify that this is a domain by adding a semicolon.
# [foo.com;]
# node_order Totals fii.foo.com fay.foo.com
#

View file

@ -0,0 +1,2 @@
[vmware*]
user root

56
munin/files/plugins/vmware_vms Executable file
View file

@ -0,0 +1,56 @@
#!/bin/sh
#
# Plugin to monitor running and registered virtual machines in the system.
#
# Parameters:
#
# config (required)
# autoconf (optional - used by munin-config)
#
#%# family=auto
#%# capabilities=autoconf
if [ "$1" = "autoconf" ]; then
if [ -x /usr/bin/vmware-vim-cmd ]; then
echo yes
exit 0
else
echo no
exit 1
fi
fi
if [ "$1" = "config" ]; then
echo 'graph_title VMware virtual machines'
echo 'graph_vlabel number of virtual machines'
echo 'graph_category vmware'
echo 'graph_info This graph monitors registered and running virtual machines.'
echo 'running.label running'
echo 'running.info Running virtual machines.'
echo 'registered.label registered'
echo 'registered.info Registered virtual machines.'
exit 0
fi
vmware-vim-cmd vmsvc/getallvms | awk '
BEGIN {
registered = 0;
running = 0;
}
{
if (/^[0-9]+/) {
registered++;
("vmware-vim-cmd vmsvc/power.getstate " $1 " | grep Powered") | getline state;
if (state == "Powered on") {
running++;
}
}
}
END {
print "registered.value " registered
print "running.value " running
}
'

View file

@ -0,0 +1,79 @@
head 1.1;
access;
symbols;
locks; strict;
comment @# @;
1.1
date 2009.05.28.10.27.06; author root; state Exp;
branches;
next ;
desc
@@
1.1
log
@Initial revision
@
text
@#!/bin/sh
#
# Plugin to monitor running and registered virtual machines in the system.
#
# Parameters:
#
# config (required)
# autoconf (optional - used by munin-config)
#
#%# family=auto
#%# capabilities=autoconf
if [ "$1" = "autoconf" ]; then
if [ -x /usr/bin/vmware-vim-cmd ]; then
echo yes
exit 0
else
echo no
exit 1
fi
fi
if [ "$1" = "config" ]; then
echo 'graph_title VMware virtual machines'
echo 'graph_vlabel number of virtual machines'
echo 'graph_category vmware'
echo 'graph_info This graph monitors registered and running virtual machines.'
echo 'running.label running'
echo 'running.info Running virtual machines.'
echo 'registered.label registered'
echo 'registered.info Registered virtual machines.'
exit 0
fi
vmware-vim-cmd vmsvc/getallvms | awk '
BEGIN {
registered = 0;
running = 0;
}
{
if (/^[0-9]+/) {
registered++;
("vmware-vim-cmd vmsvc/power.getstate " $1 " | grep Powered") | getline state;
if (state == "Powered on") {
running++;
}
}
}
END {
print "registered.value " registered
print "running.value " running
}
'
@

141
munin/manifests/init.pp Normal file
View file

@ -0,0 +1,141 @@
# Install and configure munin node.
#
class munin::node {
package { "munin-node":
ensure => installed,
}
service { "munin-node":
ensure => running,
enable => true,
require => Package["munin-node"],
}
file { "/etc/munin/munin-node.conf":
ensure => present,
content => template("munin/munin-node.conf.erb"),
owner => root,
group => root,
mode => 0644,
require => Package["munin-node"],
notify => Service["munin-node"],
}
@@file { "/etc/munin/nodes.d/${fqdn}.conf":
content => "[${fqdn}]\n address ${ipaddress}\n use_node_name yes\n",
ensure => present,
tag => "munin",
notify => Exec["generate-munin-conf"],
}
exec { "munin-node-configure":
command => "munin-node-configure ; true",
path => "/bin:/usr/bin:/sbin:/usr/sbin",
user => root,
refreshonly => true,
require => Package["munin-node"],
subscribe => Package["munin-node"],
notify => Service["munin-node"],
}
munin::plugin { "vmware_vms": config => "vmware" }
}
# Add new custom munin plugin.
#
# === Parameters
#
# $name:
# Plugin name to install.
# $config:
# Configuration file name associated with plugin. Defaults to none.
#
# === Sample usage
#
# munin::plugin { "vmware_vms":
# config => "vmware",
# }
#
define munin::plugin($config = "") {
file { "/usr/share/munin/plugins/${name}":
ensure => present,
source => "puppet:///munin/plugins/${name}",
owner => root,
group => root,
mode => 0755,
require => Package["munin-node"],
}
if ($config) {
file { "/etc/munin/plugin-conf.d/${config}":
ensure => present,
source => [ "puppet:///files/munin/plugin-conf/${config}.${fqdn}",
"puppet:///files/munin/plugin-conf/${config}",
"puppet:///munin/plugin-conf/${config}", ],
owner => root,
group => root,
mode => 0644,
notify => Service["munin-node"],
require => File["/usr/share/munin/plugins/${name}"],
}
}
exec { "munin-enable-${name}":
command => "ln -s /usr/share/munin/plugins/${name} /etc/munin/plugins/${name}",
path => "/bin:/usr/bin:/sbin:/usr/sbin",
user => root,
onlyif => [ "! test -h /etc/munin/plugins/${name}",
"/usr/share/munin/plugins/${name} autoconf", ],
notify => Service["munin-node"],
require => File["/usr/share/munin/plugins/${name}"],
}
}
# Install and configure munin server.
#
# === Requires
#
# * Storedconfigs
#
class munin::server {
package { "munin":
ensure => installed,
}
file { "/etc/munin/nodes.d":
ensure => directory,
owner => root,
group => root,
mode => 0755,
require => Package["munin"],
}
file { "/etc/munin/munin.conf.in":
ensure => present,
source => "puppet:///munin/munin.conf.in",
owner => root,
group => root,
mode => 0644,
require => Package["munin"],
notify => Exec["generate-munin-conf"],
}
exec { "generate-munin-conf":
command => "cat /etc/munin/munin.conf.in /etc/munin/nodes.d/*.conf > /etc/munin/munin.conf",
path => "/bin:/usr/bin:/sbin:/usr/sbin",
user => root,
refreshonly => true,
require => File["/etc/munin/munin.conf.in"],
}
File <<| tag == "munin" |>>
}

View file

@ -0,0 +1,44 @@
#
# Example config-file for munin-node
#
log_level 4
log_file /var/log/munin/munin-node.log
pid_file /var/run/munin/munin-node.pid
background 1
setseid 1
user root
group root
setsid yes
# Regexps for files to ignore
ignore_file ~$
ignore_file \.bak$
ignore_file %$
ignore_file \.dpkg-(tmp|new|old|dist)$
ignore_file \.rpm(save|new)$
ignore_file \.pod$
# Set this if the client doesn't report the correct hostname when
# telnetting to localhost, port 4949
#
#host_name ppc3.fedora.redhat.com
host_name <%= fqdn %>
# A list of addresses that are allowed to connect. This must be a
# regular expression, due to brain damage in Net::Server, which
# doesn't understand CIDR-style network notation. You may repeat
# the allow line as many times as you'd like
allow <%= munin_allow %>
# Which address to bind to;
host <%= ipaddress %>
# host 127.0.0.1
# And which port
port 4949

23
ssh/manifests/init.pp Normal file
View file

@ -0,0 +1,23 @@
# Class: ssh::known_hosts
#
# Install global ssh_known_hosts file generated from LDAP directory.
#
# === Depencies:
#
# Template file generation requires Ruby LDAP bindings[http://ruby-ldap.sourceforge.net/] on puppet server.
#
class ssh::known_hosts {
file { "/etc/ssh/ssh_known_hosts":
ensure => present,
content => template("ssh/ssh_known_hosts.erb"),
mode => 0644,
owner => root,
group => $operatingsystem ? {
OpenBSD => wheel,
default => root,
},
}
}

View file

@ -0,0 +1,66 @@
<%
require 'ldap'
require 'uri'
basedn = ''
conn = ''
f = File.new('/etc/openldap/ldap.conf', 'r')
f.readlines.each do |line|
line = line.strip
next if line =~ /^#/
next if line == ''
line = line.split
if line[0] == 'BASE'
basedn = line[1]
elsif line[0] == 'URI'
line.shift
line.each do |uri|
uri = URI.parse(uri)
begin
if uri.scheme == 'ldaps'
if ! uri.port
uri.port = 636
end
conn = LDAP::SSLConn.new(uri.host, uri.port)
else
if ! uri.port
uri.port = 389
end
conn = LDAP::Conn.new(uri.host, uri.port)
end
conn.bind
break
rescue LDAP::ResultError
next
end
end
end
end
f.close
filter = '(&(objectClass=ipHost)(sshPublicKey=*))'
attrs = ['cn', 'sshPublicKey', 'ipHostNumber']
data = []
conn.search(basedn, LDAP::LDAP_SCOPE_SUBTREE, filter, attrs) { |entry|
names = []
entry.vals('cn').each do |v|
names.push(v)
names.push(v.split('.')[0])
end
names.push(entry.vals('ipHostNumber')[0])
names = names.uniq.sort
data.push(names.join(',') + ' ' + entry.vals('sshPublicKey')[0])
}
data.sort
data.each do |line|
%><%= line %>
<%
end
%>

31
sysctl/manifests/init.pp Normal file
View file

@ -0,0 +1,31 @@
# Set sysctl value
#
# === Parameters
#
# $name:
# Sysctl key to set.
# $value:
# Value for given key.
#
# === Sample usage
#
# sysctl { "vm.swappinesss":
# value => "100",
# }
#
define sysctl::set($value) {
exec { "sysctl-${name}":
command => "sysctl -w ${name}='${value}'",
path => "/bin:/usr/bin:/sbin:/usr/sbin",
unless => "sysctl -n ${name} | egrep '^${value}'",
}
exec { "sysctl-${name}-save":
path => "/bin:/usr/bin:/sbin:/usr/sbin",
command => "echo '${name}=${value}' >> /etc/sysctl.conf",
unless => "egrep '^${name}=' /etc/sysctl.conf",
}
}

11
time/manifests/init.pp Normal file
View file

@ -0,0 +1,11 @@
# Install time (time-stream) server.
#
class time::server {
include inetd::server
inetd::service { "time-stream":
enable => true,
}
}

View file

@ -0,0 +1,93 @@
#!/bin/sh
. /usr/local/lib/vmware.sh
get_pipe_file() {
vmware-vim-cmd vmsvc/device.getdevices $1 | awk '
/backing = \(vim.vm.device.VirtualSerialPort.PipeBackingInfo\)/ {
section = 1;
}
section == 1 {
if (/},/) {
if (pipe) {
print pipe
}
section = 0;
} else if (/pipeName = /) {
pipe = $3;
}
}
' | sed -n 's/^"\(.*\)",/\1/p'
}
usage() {
echo "Usage: `basename $0` [-g] <vm>" 1>&2
exit 1
}
SOCAT="`which socat 2> /dev/null`"
if [ $# -gt 2 ]; then
usage
elif [ $# -eq 2 ]; then
case $1 in
-g)
serial=0
;;
-s)
if [ "${SOCAT}" = "" ]; then
echo "Serial console not avaible, socat is missing" 1>&2
exit 1
fi
serial=2
;;
*)
usage
esac
vm="$2"
elif [ $# -eq 1 ]; then
vm="$1"
else
usage
fi
vmid="`vmid \"${vm}\"`"
if [ "${vmid}" = "" ]; then
echo "Cannot find virtual machine ${vm}" 1>&2
exit 1
fi
if [ "${serial}" != "0" -a "${SOCAT}" != "" ]; then
pipe="`get_pipe_file ${vmid}`"
if [ "${pipe}" != "" ]; then
echo ${pipe} | egrep -q "^/"
if [ $? -ne 0 ]; then
vmpath="`abspath ${vmid}`"
pipe="`dirname \"${vmpath}\"`/${pipe}"
fi
screen ${SOCAT} unix-connect:${pipe} stdio,echo=0,raw
exit $?
elif [ "${serial}" = "2" ]; then
echo "Serial console not available for virtual machine ${vm}" 1>&2
exit 1
fi
fi
platform="`uname -i`"
case ${platform} in
x86_64)
platform=x64
;;
*)
platform=x86
;;
esac
xpifile="`find /usr/lib/vmware/webAccess/tomcat/apache-tomcat-*/webapps/ui/plugin/vmware-vmrc-linux-${platform}.xpi`"
tmpdir="`mktemp -d /tmp/vmware-vmrc-${LOGNAME}-XXXXXXXXXX`" && {
cd ${tmpdir}
unzip -q ${xpifile}
vmware-vim-cmd vmsvc/acquiremksticket ${vmid}
./plugins/vmware-vmrc -u root -h localhost:8333 -M ${vmid}
rm -rf ${tmpdir}
}

View file

@ -0,0 +1,28 @@
#!/bin/sh
. /usr/local/lib/vmware.sh
if [ "$1" != "-h" ]; then
echo "Name ID Mem(MiB) VCPUs State"
fi
list_vms | while read vm ; do
vmname="`echo ${vm} | cut -d '|' -f 2`"
vmid=`echo ${vm} | cut -d '|' -f 1`
printf '%-25s %5s' ${vmname} ${vmid}
vmware-vim-cmd vmsvc/get.summary ${vmid} | \
sed -n 's/^[ ]*\(powerState\|memorySizeMB\|numCpu\) = \(.*\),[ ]*$/\1 \2/p' | \
awk '
{
if ($1 == "powerState") {
power=substr($2, 2, length($2)-2)
} else if ($1 == "memorySizeMB") {
memory=$2
} else if ($1 == "numCpu") {
cpus=$2
}
}
END {
printf "%9s %5s %s\n", memory, cpus, power
}
'
done

View file

@ -0,0 +1,16 @@
#!/bin/sh
. /usr/local/lib/vmware.sh
if [ $# -ne 1 ]; then
echo "Usage: `basename $0` <vmx>" 1>&2
exit 1
fi
vmx="`abspath \"${1}\"`"
if [ ! -f "${vmx}" ]; then
echo "Cannot find vmx file ${vmx}" 1>&2
exit 1
fi
vmware-vim-cmd solo/registervm "${vmx}"

View file

@ -0,0 +1,18 @@
#!/bin/sh
. /usr/local/lib/vmware.sh
if [ $# -ne 1 ]; then
echo "Usage: `basename $0` <vm>" 1>&2
exit 1
fi
vm="$1"
vmid="`vmid \"${vm}\"`"
if [ "${vmid}" = "" ]; then
echo "Cannot find virtual machine ${vm}" 1>&2
exit 1
fi
vmware-vim-cmd vmsvc/power.on ${vmid}

View file

@ -0,0 +1,33 @@
#!/bin/sh
. /usr/local/lib/vmware.sh
if [ $# -eq 1 ]; then
mode="power.shutdown"
elif [ $# -eq 2 ]; then
case $2 in
hard)
mode="power.off"
;;
soft)
mode="power.shutdown"
;;
default)
echo "Invalid power mode $2" 1>&2
exit 1
;;
esac
else
echo "Usage: `basename $0` <vm> [hard|soft]" 1>&2
exit 1
fi
vm="$1"
vmid="`vmid \"${vm}\"`"
if [ "${vmid}" = "" ]; then
echo "Cannot find virtual machine ${vm}" 1>&2
exit 1
fi
vmware-vim-cmd vmsvc/${mode} ${vmid}

View file

@ -0,0 +1,18 @@
#!/bin/sh
. /usr/local/lib/vmware.sh
if [ $# -ne 1 ]; then
echo "Usage: `basename $0` <vm>" 1>&2
exit 1
fi
vm="$1"
vmid="`vmid \"${vm}\"`"
if [ "${vmid}" = "" ]; then
echo "Cannot find virtual machine ${vm}" 1>&2
exit 1
fi
vmware-vim-cmd vmsvc/power.suspend ${vmid}

View file

@ -0,0 +1,18 @@
#!/bin/sh
. /usr/local/lib/vmware.sh
if [ $# -ne 1 ]; then
echo "Usage: `basename $0` <vm>" 1>&2
exit 1
fi
vm="$1"
vmid="`vmid \"$1\"`"
if [ "${vmid}" == "" ]; then
echo "Cannot find virtual machine ${vm}" 1>&2
exit 1
fi
vmware-vim-cmd vmsvc/unregister ${vmid}

View file

@ -0,0 +1,113 @@
#!/bin/bash
#
# $Id: vmware.sh,v 1.4 2009/07/29 20:04:19 root Exp $
#
# List all datastores on server
list_datastores() {
vmware-vim-cmd hostsvc/storage/fs_info | awk '
/^[ ]*path = / {
path = substr($3, 2, length($3)-3)
}
/^[ ]*name = / {
print substr($3, 2, length($3)-3) " " path
}
'
}
# Get list of all registered virtual machines.
#
# Returns list in format:
#
# vmid|displayName|path
#
list_vms() {
vmware-vim-cmd vmsvc/getallvms | sed -n \
's/^\([0-9][0-9]*\)[ ]*\([^ ]*\)[ ]*\(\[[^ ]*\] [^ ]*\).*/\1|\2|\3/p'
}
# Convert given path into datastore format.
#
# Eg. dspath /vmfs/volumes/mystore/foo.vmx returns
# [mystore] foo.vmx
#
dspath() {
case "$1" in
/*)
list_datastores | while read n p ; do
echo "$1" | egrep -q "^${p}" || continue
echo -n "[${n}] "
echo "$1" | cut -c `echo ${p} | wc -m`- | cut -c 2-
break
done
;;
[*)
echo "$1"
;;
esac
}
# Convert given path into filesystem format.
#
# Eg. abspath [mystore] foo.vmx returns
# /vmfs/volumes/mystore/foo.vmx
#
abspath() {
case "$1" in
/*)
echo "$1"
;;
[*)
ds=`echo "$1" | sed -e 's/^\[\(.*\)\] .*$/\1/'`
vmware-vim-cmd hostsvc/datastore/info ds-local | \
sed -n 's/^[ ]*path = \"\(.*\)\",[ ]*/\1/p' | uniq | tr '\n' '/'
echo "$1" | sed -e 's/^\[.*\] \(.*\)$/\1/'
;;
[0-9]*)
abspath "`list_vms | sed -n \"s/^$1|.*|\(\[.*\] .*\)$/\1/p\"`"
;;
esac
}
# Get id for given virtual machine
#
vmid() {
case "$1" in
/*)
ds="`dspath "\${1}\"`"
if [ "${ds}" == "" ]; then
exit
fi
list_vms | awk -F'|' '{print $1 " " $3}' | while read vmid vmpath ; do
if [ "${vmpath}" = "${ds}" ]; then
echo ${vmid}
break
fi
done
;;
[*)
list_vms | awk -F'|' '{print $1 " " $3}' | while read vmid vmpath ; do
if [ "${vmpath}" = "${1}" ]; then
echo ${vmid}
break
fi
done
;;
*)
list_vms | awk -F'|' '{print $1 " " $2}' | while read vmid vmname ; do
if [ "${vmid}" = "${1}" ]; then
echo ${vmid}
break
elif [ "${vmname}" = "${1}" ]; then
echo ${vmid}
break
fi
done
;;
esac
}

34
vmware/manifests/guest.pp Normal file
View file

@ -0,0 +1,34 @@
class vmware::guest {
case $virtual {
vmware: {
case $kernel {
Linux: { include vmware::guest::linux }
}
}
}
}
class vmware::guest::linux {
package { "VMwareTools":
ensure => installed,
}
exec { "vmware-config-tools.pl":
command => "unset DISPLAY REMOTEHOST SSH_CONNECTION ; perl /usr/bin/vmware-config-tools.pl -d",
path => "/bin:/usr/bin:/sbin:/usr/sbin",
environment => [ "PAGER=/bin/cat", ],
unless => "test -f /lib/modules/$kernelrelease/misc/vmci.ko",
require => Package["VMwareTools"],
}
service { "vmware-tools":
enable => true,
ensure => running,
subscribe => Exec["vmware-config-tools.pl"],
require => Exec["vmware-config-tools.pl"],
}
}

3
vmware/manifests/init.pp Normal file
View file

@ -0,0 +1,3 @@
import "guest.pp"
import "server.pp"

280
vmware/manifests/server.pp Normal file
View file

@ -0,0 +1,280 @@
class vmware::server {
package { "VMware-server":
ensure => installed,
}
service { "vmware":
ensure => running,
enable => true,
hasstatus => true,
start => "pkill 'vmnet-' ; /sbin/service vmware stop ; rm -f /etc/vmware/not_configured ; /sbin/service vmware start && sleep 5",
stop => "pkill 'vmnet-' ; /sbin/service vmware stop",
require => [ Package["VMware-server"],
Exec["vmware-config.pl"], ],
}
# seems that vmware init script fails if pid files are missing for vmnet
# processes, so kill them by force first
exec { "vmware-config.pl":
command => "pkill 'vmnet-' ; perl /usr/bin/vmware-config.pl --default EULA_AGREED=yes && rm -f /etc/vmware/not_configured",
path => "/bin:/usr/bin:/sbin:/usr/sbin",
environment => [ "PAGER=/bin/cat", ],
unless => "test ! -f /etc/vmware/not_configured -a -f /lib/modules/$kernelrelease/misc/vmci.ko",
require => Package["VMware-server"],
notify => Service["vmware"],
}
if $vmware_serial {
exec { "vmware-set-serial":
command => "/usr/lib/vmware/bin/vmware-vmx --new-sn ${vmware_serial}",
path => "/bin:/usr/bin:/sbin:/usr/sbin",
user => root,
creates => "/etc/vmware/license.vs.1.0-00",
require => Package["VMware-server"],
before => Exec["vmware-config.pl"],
}
}
}
# Install puppet certificate and key to VMware.
#
# === Depencies
#
# * Class["puppet::client"]
#
class vmware::server::certs {
file { "/etc/vmware/ssl":
ensure => directory,
mode => 0755,
owner => root,
group => root,
require => Package["VMware-server"],
}
file { "/etc/vmware/ssl/rui.crt":
ensure => present,
source => "${puppet_ssldir}/certs/${fqdn}.pem",
mode => 0644,
owner => root,
group => root,
require => [ File["/etc/vmware/ssl"],
Class["puppet::client"], ],
before => Service["vmware"],
notify => Service["vmware"],
}
file { "/etc/vmware/ssl/rui.key":
ensure => present,
source => "${puppet_ssldir}/private_keys/${fqdn}.pem",
mode => 0600,
owner => root,
group => root,
require => [ File["/etc/vmware/ssl"],
Class["puppet::client"], ],
before => Service["vmware"],
notify => Service["vmware"],
}
}
# Install custom VMware support scripts
#
class vmware::server::scripts {
define vmware::server::scripts::file {
file { "/usr/local/sbin/${name}":
ensure => present,
source => "puppet:///vmware/scripts/${name}",
mode => 0755,
owner => root,
group => root,
}
}
file { "/usr/local/lib/vmware.sh":
ensure => present,
source => "puppet:///vmware/scripts/vmware.sh",
mode => 0644,
owner => root,
group => root,
}
vmware::server::scripts::file { "vmware-console": }
vmware::server::scripts::file { "vmware-list": }
vmware::server::scripts::file { "vmware-register": }
vmware::server::scripts::file { "vmware-start": }
vmware::server::scripts::file { "vmware-stop": }
vmware::server::scripts::file { "vmware-suspend": }
vmware::server::scripts::file { "vmware-unregister": }
}
# Create /vmfs directory hierarcy.
#
# === Depencies
#
# * Package["VMware-server"]
#
class vmware::server::vmfs {
include vmware::server
file { "/vmfs":
ensure => directory,
mode => 0755,
owner => root,
group => root,
require => Package["VMware-server"],
}
file { "/vmfs/volumes":
ensure => directory,
mode => 0755,
owner => root,
group => root,
require => File["/vmfs"],
}
}
# Modify VMware datastores.
#
# === Parameters
#
# $name:
# Datastore name.
# $type:
# Filesystem type of datastore.
# $options:
# Filesystem mount options.
#
# === Sample usage
#
# vmware::server::datastore { "ds-001":
# device => "its1:/export/vmware/ds-001",
# options => "hard,intr,rw,nosuid,nodev,tcp,rsize=1048576,wsize=1048576",
# }
#
define vmware::server::datastore($device, $type = "auto", $options = "defaults") {
include vmware::server::vmfs
if $type == "auto" {
$server = regsubst($device, '^([a-zA-Z0-9\-]+):(/.+)$', '\1')
if $server == $device {
$fstype = $type
if $device == regsubst($device, '^(/dev/).+$', '\1') {
$mountopts = "bind"
}
} else {
$fstype = "nfs"
$path = regsubst($device, '^([a-zA-Z0-9\-]+):(/.+)$', '\2')
}
}
if !$mountopts {
$mountopts = $options
}
file { "/vmfs/volumes/${name}":
ensure => directory,
mode => 0755,
owner => root,
group => root,
require => File["/vmfs/volumes"],
}
mount { "/vmfs/volumes/${name}":
ensure => mounted,
device => "${device}",
fstype => "${fstype}",
options => "${mountopts}",
require => File["/vmfs/volumes/${name}"],
}
exec { "vmware-create-datastore-${name}":
command => $fstype ? {
nfs => "vmware-vim-cmd hostsvc/datastore/nas_create ${name} ${server} ${path} 0",
default => "vmware-vim-cmd hostsvc/datastore/localds_create ${name} /vmfs/volumes/${name}",
},
path => "/bin:/usr/bin:/sbin:/usr/sbin",
user => root,
unless => "vmware-vim-cmd hostsvc/datastore/summary ${name}",
require => [ Mount["/vmfs/volumes/${name}"],
Service["vmware"], ],
notify => Exec["vmware-refresh-datastore-${name}"],
}
exec { "vmware-refresh-datastore-${name}":
command => "vmware-vim-cmd hostsvc/datastore/refresh ${name}",
path => "/bin:/usr/bin:/sbin:/usr/sbin",
user => root,
refreshonly => true,
}
}
# Modify VMware bridge interfaces.
#
# === Parameters
#
# $name:
# vmnet device name.
# $description:
# Interface description.
# $device:
# Physical network device to bridge.
# $ensure:
# Set to present to enable bridge and absent to disable it.
#
# === Sample usage
#
# vmware::server::bridge { "vmnet0":
# ensure => present,
# device => "eth0",
# description => "Bunker",
# }
#
define vmware::server::bridge($description, $device, $ensure = "present") {
$vmnet = regsubst($name, '^vmnet([0-9]+)$', '\1')
if $vmnet == $name {
fail("Invalid vmnet device name.")
}
service { "${name}-bridge":
ensure => $ensure ? {
"present" => running,
"absent" => stopped,
},
pattern => "/usr/bin/vmnet-bridge -d .* -n ${vmnet}",
start => "/usr/bin/vmnet-bridge -d /var/run/vmnet-bridge-${vmnet}.pid -n ${vmnet} -i ${device}",
stop => "pkill -f '/usr/bin/vmnet-bridge -d .* -n ${vmnet}'",
provider => base,
require => Exec["vmware-config.pl"],
}
vmware_config { "VNET_${vmnet}_NAME":
ensure => $ensure ? {
absent => absent,
present => "${description}",
},
require => Exec["vmware-config.pl"],
}
vmware_config { "VNET_${vmnet}_INTERFACE":
ensure => $ensure ? {
absent => absent,
present => "${device}",
},
notify => Service["${name}-bridge"],
require => Exec["vmware-config.pl"],
}
}

View file

@ -0,0 +1,82 @@
Puppet::Type.newtype(:vmware_config) do
@doc = "Modify /etc/vmware/locations file."
@@locations = "/etc/vmware/locations"
@@netmap = "/etc/vmware/netmap.conf"
def bucket
filebucket = Puppet::Type.type(:filebucket)
(filebucket["puppet"] || filebucket.mkdefaultbucket).bucket
end
def netmap
config = parse()
data = "# This file is automatically generated.\n"
data += "# Hand-editing this file is not recommended.\n"
data += "\n"
id = 0
(0..254).each do |n|
if name = config["VNET_#{n}_NAME"]
data += "network#{id}.name = \"#{name}\"\n"
data += "network#{id}.device = \"vmnet#{n}\"\n"
id += 1
end
end
current = File.open(@@netmap).read
if data != current
bucket.backup(@@netmap)
File.open(@@netmap, "w").print data
end
end
def parse
config = {}
File.open(@@locations).each { |line|
if m = /^answer ([A-Z0-9_]+) (.+)$/.match(line)
config[m[1]] = m[2]
elsif m = /^remove_answer ([A-Z0-9_]+)$/.match(line)
config.delete(m[1])
end
}
return config
end
newparam(:name) do
desc "Configuration key name"
newvalues(/^[A-Z0-9_]+$/)
end
newproperty(:ensure) do
def retrieve
config = @resource.parse()
if config.has_key?(@resource.name)
config[@resource.name]
else
:absent
end
end
newvalue(:absent) do
@resource.bucket.backup(@@locations) if File.exists?(@@locations)
open(@@locations, File::WRONLY|File::APPEND) do |f|
f.print "remove_answer %s\n" % @resource.name
end
if @resource.name =~ /^VNET_/
@resource.netmap()
end
end
newvalue(/.+/) do
@resource.bucket.backup(@@locations) if File.exists?(@@locations)
open(@@locations, File::WRONLY|File::APPEND) do |f|
f.print "remove_answer %s\n" % @resource.name
f.print "answer %s %s\n" % [@resource.name, value]
end
if @resource.name =~ /^VNET_/
@resource.netmap()
end
end
end
end