Correctifs

master
Pierre Coimbra 2020-03-11 19:56:35 +01:00
parent af4a2edfcc
commit ede1c1f8f4
No known key found for this signature in database
GPG Key ID: F9C449C78F6FAEE6
5 changed files with 363 additions and 10 deletions

View File

@ -1,7 +1,26 @@
# Reverse proxy NGINX sur le réseau CTF
## Spécification du conteneur
Ce service n'est pas redondé car non vital, son IP est 10.0.2.5 sur le réseau CTF.
Ce service n'est pas redondé car non vital. Il portera le numéro 145.
#### Deux interfaces
- eth0 : vmbr1 / VLAN 40 / IP 10.0.3.3 / GW 10.0.2.254
- eth1 : vmbr2 / VLAN 100 / IP 10.1.0.145 / GW 10.1.0.254
### Le proxy
#### /root/.wgetrc
```
http_proxy = http://10.0.3.252:3128/
https_proxy = http://10.0.3.252:3128/
use_proxy = on
```
#### /etc/apt/apt.conf.d/01proxy
```
Acquire::http {
Proxy "http://10.0.3.252:9999";
};
```
## Objectif
Il doit rediriger les requêtes arrivant de HAProxy vers le bon conteneur en fonction de l'hostname. Pour cela nous allons utiliser des serveurs web HTTP Nginx.
@ -11,6 +30,8 @@ Il doit rediriger les requêtes arrivant de HAProxy vers le bon conteneur en fon
apt-get update
apt-get install -y nginx
systemctl enable nginx.service
rm /etc/nginx/sites-enabled/default
rm /etc/nginx/sites-available/default
```
## Mise en place d'un serveur faisant office de reverse proxy

View File

@ -4,7 +4,7 @@
Numéro 121
#### Deux interfaces
- eth0 : vmbr1 / VLAN 30 / IP 10.0.2.21 / GW 10.0.2.254
- eth1 : vmbr2 / VLAN 100 / IP 10.0.2.121 / GW 10.1.0.254
- eth1 : vmbr2 / VLAN 100 / IP 10.1.0.121 / GW 10.1.0.254
### Le proxy
@ -120,7 +120,6 @@ apt-get update
apt-get install -y git postgresql sudo
wget -O gitea https://dl.gitea.io/gitea/1.11.1/gitea-1.11.1-linux-amd64
```
pg_ctlcluster 11 main start
## Configuration de Nginx
@ -241,7 +240,6 @@ Cocher uniquement
- Activer le mode hors-ligne
- Désactiver Gravatar
- Désactiver le formulaire d'inscription
- Exiger la connexion à un compte pour afficher les pages
- Masquer les adresses e-mail par défaut
- Activer le suivi le temps par défaut
Ensuite

View File

@ -6,7 +6,7 @@ Mise en place du conteneur pour NextCloud et intégration à l'annuaire LDAP.
Numéro 120
#### Deux interfaces
- eth0 : vmbr1 / VLAN 30 / IP 10.0.2.20 / GW 10.0.2.254
- eth1 : vmbr2 / VLAN 100 / IP 10.0.2.120 / GW 10.1.0.254
- eth1 : vmbr2 / VLAN 100 / IP 10.1.0.120 / GW 10.1.0.254
### Le proxy
@ -350,8 +350,7 @@ Paramètres / Intégration LDAP/AD
#### Avancé
- Cocher `Configuration active`
- Champ "nom d'affichage" de l'utilisateur `cn`
- Second attribut pour le nom d'affichage `sn`
- Champ "nom d'affichage" de l'utilisateur `displayName`
- DN racine de l'arbre utilisateurs `ou=people,dc=krhacken,dc=org`
- Champ "nom d'affichage" du groupe `cn`
- DN racine de l'arbre groupes `cn=cloud,ou=people,dc=krhacken,dc=org`

View File

@ -11,7 +11,7 @@ Ce document regroupe uniquement des notes et des conseils non ordonné, certains
- Les adresses IP et VLAN à suivre sont dans mise_en_place.md
- Pour se connecter à un container : SSH l'hyperviseur puis lxc-attact <number>
- Pour se connecter à un conteneur : SSH l'hyperviseur puis lxc-attact <number>
Voilà l'ordre à suivre
installation_hyperviseurs.md
@ -136,13 +136,13 @@ sont accessible et fonctionne.
proxy_interne.md
Rien de bien dur pour la mise en place
Pour l'utilisation
- Chaque container dans une zone autre que DMZ doit avoir comme gateway l'adresse du proxy dans la bonne zone
- Chaque conteneur dans une zone autre que DMZ doit avoir comme gateway l'adresse du proxy dans la bonne zone
- Il faut configurer impérativement wget et apt vers l'adresse du proxy
- Mettre en place une interface dans chaque zone avec l'adresse en .252 avec comme gateway .254 (OPNSense)
nginx_principal.md
Création des deux container et connexion au proxy interne
Création des deux conteneur et connexion au proxy interne
#### /root/.wgetrc
```

335
deploiement/proxmox.py Normal file
View File

@ -0,0 +1,335 @@
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
import os
import time
import traceback
try:
from proxmoxer import ProxmoxAPI
HAS_PROXMOXER = True
except ImportError:
HAS_PROXMOXER = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
VZ_TYPE = None
def get_nextvmid(module, proxmox):
try:
vmid = proxmox.cluster.nextid.get()
return vmid
except Exception as e:
module.fail_json(msg="Unable to get next vmid. Failed with exception: %s" % to_native(e),
exception=traceback.format_exc())
def get_vmid(proxmox, hostname):
return [vm['vmid'] for vm in proxmox.cluster.resources.get(type='vm') if 'name' in vm and vm['name'] == hostname]
def get_instance(proxmox, vmid):
return [vm for vm in proxmox.cluster.resources.get(type='vm') if vm['vmid'] == int(vmid)]
def content_check(proxmox, node, ostemplate, template_store):
return [True for cnt in proxmox.nodes(node).storage(template_store).content.get() if cnt['volid'] == ostemplate]
def node_check(proxmox, node):
return [True for nd in proxmox.nodes.get() if nd['node'] == node]
def create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout, **kwargs):
proxmox_node = proxmox.nodes(node)
kwargs = dict((k, v) for k, v in kwargs.items() if v is not None)
if VZ_TYPE == 'lxc':
kwargs['cpulimit'] = cpus
kwargs['rootfs'] = disk
if 'netif' in kwargs:
kwargs.update(kwargs['netif'])
del kwargs['netif']
if 'mounts' in kwargs:
kwargs.update(kwargs['mounts'])
del kwargs['mounts']
if 'pubkey' in kwargs:
if float(6.0) >= 4.2:
kwargs['ssh-public-keys'] = kwargs['pubkey']
del kwargs['pubkey']
else:
kwargs['cpus'] = cpus
kwargs['disk'] = disk
taskid = getattr(proxmox_node, VZ_TYPE).create(vmid=vmid, storage=storage, memory=memory, swap=swap, **kwargs)
while timeout:
if (proxmox_node.tasks(taskid).status.get()['status'] == 'stopped' and
proxmox_node.tasks(taskid).status.get()['exitstatus'] == 'OK'):
return True
timeout -= 1
if timeout == 0:
module.fail_json(msg='Reached timeout while waiting for creating VM. Last line in task before timeout: %s' %
proxmox_node.tasks(taskid).log.get()[:1])
time.sleep(1)
return False
def start_instance(module, proxmox, vm, vmid, timeout):
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.start.post()
while timeout:
if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and
proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'):
return True
timeout -= 1
if timeout == 0:
module.fail_json(msg='Reached timeout while waiting for starting VM. Last line in task before timeout: %s' %
proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1])
time.sleep(1)
return False
def stop_instance(module, proxmox, vm, vmid, timeout, force):
if force:
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.shutdown.post(forceStop=1)
else:
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.shutdown.post()
while timeout:
if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and
proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'):
return True
timeout -= 1
if timeout == 0:
module.fail_json(msg='Reached timeout while waiting for stopping VM. Last line in task before timeout: %s' %
proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1])
time.sleep(1)
return False
def umount_instance(module, proxmox, vm, vmid, timeout):
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.umount.post()
while timeout:
if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and
proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'):
return True
timeout -= 1
if timeout == 0:
module.fail_json(msg='Reached timeout while waiting for unmounting VM. Last line in task before timeout: %s' %
proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1])
time.sleep(1)
return False
def main():
module = AnsibleModule(
argument_spec=dict(
api_host=dict(required=True),
api_user=dict(required=True),
api_password=dict(no_log=True),
vmid=dict(required=False),
validate_certs=dict(type='bool', default='no'),
node=dict(),
pool=dict(),
password=dict(no_log=True),
hostname=dict(),
ostemplate=dict(),
disk=dict(type='str', default='3'),
cores=dict(type='int', default=1),
cpus=dict(type='int', default=1),
memory=dict(type='int', default=512),
swap=dict(type='int', default=0),
netif=dict(type='dict'),
mounts=dict(type='dict'),
ip_address=dict(),
onboot=dict(type='bool', default='no'),
storage=dict(default='local'),
cpuunits=dict(type='int', default=1000),
nameserver=dict(),
searchdomain=dict(),
timeout=dict(type='int', default=30),
force=dict(type='bool', default='no'),
state=dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted']),
pubkey=dict(type='str', default=None),
unprivileged=dict(type='bool', default='no')
)
)
if not HAS_PROXMOXER:
module.fail_json(msg='proxmoxer required for this module')
state = module.params['state']
api_user = module.params['api_user']
api_host = module.params['api_host']
api_password = module.params['api_password']
vmid = module.params['vmid']
validate_certs = module.params['validate_certs']
node = module.params['node']
disk = module.params['disk']
cpus = module.params['cpus']
memory = module.params['memory']
swap = module.params['swap']
storage = module.params['storage']
hostname = module.params['hostname']
if module.params['ostemplate'] is not None:
template_store = module.params['ostemplate'].split(":")[0]
timeout = module.params['timeout']
# If password not set get it from PROXMOX_PASSWORD env
if not api_password:
try:
api_password = os.environ['PROXMOX_PASSWORD']
except KeyError as e:
module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable')
try:
proxmox = ProxmoxAPI(api_host, user=api_user, password=api_password, verify_ssl=validate_certs)
global VZ_TYPE
VZ_TYPE = 'openvz' if float(6.0) < 4.0 else 'lxc'
except Exception as e:
module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e)
# If vmid not set get the Next VM id from ProxmoxAPI
# If hostname is set get the VM id from ProxmoxAPI
if not vmid and state == 'present':
vmid = get_nextvmid(module, proxmox)
elif not vmid and hostname:
hosts = get_vmid(proxmox, hostname)
if len(hosts) == 0:
module.fail_json(msg="Vmid could not be fetched => Hostname doesn't exist (action: %s)" % state)
vmid = hosts[0]
elif not vmid:
module.exit_json(changed=False, msg="Vmid could not be fetched for the following action: %s" % state)
if state == 'present':
try:
if get_instance(proxmox, vmid) and not module.params['force']:
module.exit_json(changed=False, msg="VM with vmid = %s is already exists" % vmid)
# If no vmid was passed, there cannot be another VM named 'hostname'
if not module.params['vmid'] and get_vmid(proxmox, hostname) and not module.params['force']:
module.exit_json(changed=False, msg="VM with hostname %s already exists and has ID number %s" % (hostname, get_vmid(proxmox, hostname)[0]))
elif not (node, module.params['hostname'] and module.params['password'] and module.params['ostemplate']):
module.fail_json(msg='node, hostname, password and ostemplate are mandatory for creating vm')
elif not node_check(proxmox, node):
module.fail_json(msg="node '%s' not exists in cluster" % node)
elif not content_check(proxmox, node, module.params['ostemplate'], template_store):
module.fail_json(msg="ostemplate '%s' not exists on node %s and storage %s"
% (module.params['ostemplate'], node, template_store))
create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout,
cores=module.params['cores'],
pool=module.params['pool'],
password=module.params['password'],
hostname=module.params['hostname'],
ostemplate=module.params['ostemplate'],
netif=module.params['netif'],
mounts=module.params['mounts'],
ip_address=module.params['ip_address'],
onboot=int(module.params['onboot']),
cpuunits=module.params['cpuunits'],
nameserver=module.params['nameserver'],
searchdomain=module.params['searchdomain'],
force=int(module.params['force']),
pubkey=module.params['pubkey'],
unprivileged=int(module.params['unprivileged']))
module.exit_json(changed=True, msg="deployed VM %s from template %s" % (vmid, module.params['ostemplate']))
except Exception as e:
module.fail_json(msg="creation of %s VM %s failed with exception: %s" % (VZ_TYPE, vmid, e))
elif state == 'started':
try:
vm = get_instance(proxmox, vmid)
if not vm:
module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid)
if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running':
module.exit_json(changed=False, msg="VM %s is already running" % vmid)
if start_instance(module, proxmox, vm, vmid, timeout):
module.exit_json(changed=True, msg="VM %s started" % vmid)
except Exception as e:
module.fail_json(msg="starting of VM %s failed with exception: %s" % (vmid, e))
elif state == 'stopped':
try:
vm = get_instance(proxmox, vmid)
if not vm:
module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid)
if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted':
if module.params['force']:
if umount_instance(module, proxmox, vm, vmid, timeout):
module.exit_json(changed=True, msg="VM %s is shutting down" % vmid)
else:
module.exit_json(changed=False, msg=("VM %s is already shutdown, but mounted. "
"You can use force option to umount it.") % vmid)
if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped':
module.exit_json(changed=False, msg="VM %s is already shutdown" % vmid)
if stop_instance(module, proxmox, vm, vmid, timeout, force=module.params['force']):
module.exit_json(changed=True, msg="VM %s is shutting down" % vmid)
except Exception as e:
module.fail_json(msg="stopping of VM %s failed with exception: %s" % (vmid, e))
elif state == 'restarted':
try:
vm = get_instance(proxmox, vmid)
if not vm:
module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid)
if (getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped' or
getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted'):
module.exit_json(changed=False, msg="VM %s is not running" % vmid)
if (stop_instance(module, proxmox, vm, vmid, timeout, force=module.params['force']) and
start_instance(module, proxmox, vm, vmid, timeout)):
module.exit_json(changed=True, msg="VM %s is restarted" % vmid)
except Exception as e:
module.fail_json(msg="restarting of VM %s failed with exception: %s" % (vmid, e))
elif state == 'absent':
try:
vm = get_instance(proxmox, vmid)
if not vm:
module.exit_json(changed=False, msg="VM %s does not exist" % vmid)
if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running':
module.exit_json(changed=False, msg="VM %s is running. Stop it before deletion." % vmid)
if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted':
module.exit_json(changed=False, msg="VM %s is mounted. Stop it with force option before deletion." % vmid)
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE).delete(vmid)
while timeout:
if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and
proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'):
module.exit_json(changed=True, msg="VM %s removed" % vmid)
timeout -= 1
if timeout == 0:
module.fail_json(msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s'
% proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1])
time.sleep(1)
except Exception as e:
module.fail_json(msg="deletion of VM %s failed with exception: %s" % (vmid, to_native(e)))
if __name__ == '__main__':
main()