Signed-off-by: Dominick Grift <[email protected]>
---
:100644 100644 2ecdde8... 7a1b5de... M policy/modules/kernel/corenetwork.te.in
:000000 100644 0000000... d88b5ff... A policy/modules/services/hadoop.fc
:000000 100644 0000000... 6cc0049... A policy/modules/services/hadoop.if
:000000 100644 0000000... 53a242b... A policy/modules/services/hadoop.te
policy/modules/kernel/corenetwork.te.in | 4 +
policy/modules/services/hadoop.fc | 40 ++++
policy/modules/services/hadoop.if | 247 ++++++++++++++++++++++
policy/modules/services/hadoop.te | 347 +++++++++++++++++++++++++++++++
4 files changed, 638 insertions(+), 0 deletions(-)
diff --git a/policy/modules/kernel/corenetwork.te.in b/policy/modules/kernel/corenetwork.te.in
index 2ecdde8..7a1b5de 100644
--- a/policy/modules/kernel/corenetwork.te.in
+++ b/policy/modules/kernel/corenetwork.te.in
@@ -105,6 +105,7 @@ network_port(giftd, tcp,1213,s0)
network_port(git, tcp,9418,s0, udp,9418,s0)
network_port(gopher, tcp,70,s0, udp,70,s0)
network_port(gpsd, tcp,2947,s0)
+network_port(hadoop_namenode, tcp, 8020,s0)
network_port(hddtemp, tcp,7634,s0)
network_port(howl, tcp,5335,s0, udp,5353,s0)
network_port(hplip, tcp,1782,s0, tcp,2207,s0, tcp,2208,s0, tcp, 8290,s0, tcp,50000,s0, tcp,50002,s0, tcp,8292,s0, tcp,9100,s0, tcp,9101,s0, tcp,9102,s0, tcp,9220,s0, tcp,9221,s0, tcp,9222,s0, tcp,9280,s0, tcp,9281,s0, tcp,9282,s0, tcp,9290,s0, tcp,9291,s0, tcp,9292,s0)
@@ -211,6 +212,9 @@ network_port(xdmcp, udp,177,s0, tcp,177,s0)
network_port(xen, tcp,8002,s0)
network_port(xfs, tcp,7100,s0)
network_port(xserver, tcp,6000-6020,s0)
+network_port(zookeeper_client, tcp, 2181,s0)
+network_port(zookeeper_election, tcp, 3888,s0)
+network_port(zookeeper_leader, tcp, 2888,s0)
network_port(zebra, tcp,2600-2604,s0, tcp,2606,s0, udp,2600-2604,s0, udp,2606,s0)
network_port(zope, tcp,8021,s0)
diff --git a/policy/modules/services/hadoop.fc b/policy/modules/services/hadoop.fc
new file mode 100644
index 0000000..d88b5ff
--- /dev/null
+++ b/policy/modules/services/hadoop.fc
@@ -0,0 +1,40 @@
+/etc/hadoop.*(/.*)? gen_context(system_u:object_r:hadoop_etc_t,s0)
+
+/etc/rc\.d/init\.d/hadoop-(.*)?-datanode -- gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-jobtracker -- gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-namenode -- gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-secondarynamenode -- gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-tasktracker -- gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-zookeeper -- gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
+
+/etc/zookeeper(/.*)? gen_context(system_u:object_r:zookeeper_etc_t,s0)
+/etc/zookeeper\.dist(/.*)? gen_context(system_u:object_r:zookeeper_etc_t,s0)
+
+/usr/lib/hadoop(.*)?/bin/hadoop -- gen_context(system_u:object_r:hadoop_exec_t,s0)
+
+/usr/bin/zookeeper-client -- gen_context(system_u:object_r:zookeeper_exec_t,s0)
+/usr/bin/zookeeper-server -- gen_context(system_u:object_r:zookeeper_server_exec_t,s0)
+
+/var/zookeeper(/.*)? gen_context(system_u:object_r:zookeeper_server_var_t,s0)
+
+/var/lib/hadoop(.*)? gen_context(system_u:object_r:hadoop_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/data(/.*)? gen_context(system_u:object_r:hadoop_datanode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/jobTracker(/.*)? gen_context(system_u:object_r:hadoop_jobtracker_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/name(/.*)? gen_context(system_u:object_r:hadoop_namenode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/namesecondary(/.*)? gen_context(system_u:object_r:hadoop_secondarynamenode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/taskTracker(/.*)? gen_context(system_u:object_r:hadoop_tasktracker_t,s0)
+
+/var/lock/subsys/hadoop-datanode -- gen_context(system_u:object_r:hadoop_datanode_initrc_lock_t,s0)
+/var/lock/subsys/hadoop-namenode -- gen_context(system_u:object_r:hadoop_namenode_initrc_lock_t,s0)
+
+/var/log/hadoop(.*)? gen_context(system_u:object_r:hadoop_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-datanode-(.*)? gen_context(system_u:object_r:hadoop_datanode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-jobtracker-(.*)? gen_context(system_u:object_r:hadoop_jobtracker_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-namenode-(.*)? gen_context(system_u:object_r:hadoop_namenode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-secondarynamenode-(.*)? gen_context(system_u:object_r:hadoop_secondarynamenode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-tasktracker-(.*)? gen_context(system_u:object_r:hadoop_tasktracker_log_t,s0)
+/var/log/zookeeper(/.*)? gen_context(system_u:object_r:zookeeper_log_t,s0)
+
+/var/run/hadoop -d gen_context(system_u:object_r:hadoop_var_run_t,s0)
+/var/run/hadoop/hadoop-hadoop-datanode.pid -- gen_context(system_u:object_r:hadoop_datanode_var_run_t,s0)
+/var/run/hadoop/hadoop-hadoop-namenode.pid -- gen_context(system_u:object_r:hadoop_namenode_var_run_t,s0)
diff --git a/policy/modules/services/hadoop.if b/policy/modules/services/hadoop.if
new file mode 100644
index 0000000..6cc0049
--- /dev/null
+++ b/policy/modules/services/hadoop.if
@@ -0,0 +1,247 @@
+## <summary>Software for reliable, scalable, distributed computing.</summary>
+
+#######################################
+## <summary>
+## The template to define a hadoop domain.
+## </summary>
+## <param name="domain_prefix">
+## <summary>
+## Domain prefix to be used.
+## </summary>
+## </param>
+#
+template(`hadoop_domain_template',`
+ gen_require(`
+ attribute hadoop_domain;
+ type hadoop_log_t, hadoop_var_lib_t, hadoop_var_run_t;
+ type hadoop_exec_t;
+ ')
+
+ ########################################
+ #
+ # Shared declarations.
+ #
+
+ type hadoop_$1_t, hadoop_domain;
+ domain_type(hadoop_$1_t)
+ domain_entry_file(hadoop_$1_t, hadoop_exec_t)
+
+ type hadoop_$1_initrc_t;
+ type hadoop_$1_initrc_exec_t;
+ init_script_domain(hadoop_$1_initrc_t, hadoop_$1_initrc_exec_t)
+
+ role system_r types { hadoop_$1_initrc_t hadoop_$1_t };
+
+ # This will need a file context specification.
+ type hadoop_$1_initrc_lock_t;
+ files_lock_file(hadoop_$1_initrc_lock_t)
+
+ type hadoop_$1_log_t;
+ logging_log_file(hadoop_$1_log_t)
+
+ type hadoop_$1_var_lib_t;
+ files_type(hadoop_$1_var_lib_t)
+
+ # This will need a file context specification.
+ type hadoop_$1_var_run_t;
+ files_pid_file(hadoop_$1_var_run_t)
+
+ type hadoop_$1_tmp_t;
+ files_tmp_file(hadoop_$1_tmp_t)
+
+ # permissive hadoop_$1_t;
+ # permissive hadoop_$1_initrc_t;
+
+ ####################################
+ #
+ # Shared hadoop_$1 initrc policy.
+ #
+
+ allow hadoop_$1_initrc_t self:capability { setuid setgid };
+ dontaudit hadoop_$1_initrc_t self:capability sys_tty_config;
+
+ allow hadoop_$1_initrc_t hadoop_$1_initrc_lock_t:file manage_file_perms;
+ files_lock_filetrans(hadoop_$1_initrc_t, hadoop_$1_initrc_lock_t, file)
+
+ append_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
+ create_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
+ read_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
+ setattr_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
+ filetrans_pattern(hadoop_$1_initrc_t, hadoop_log_t, hadoop_$1_log_t, file)
+ logging_search_logs(hadoop_$1_initrc_t)
+
+ allow hadoop_$1_initrc_t hadoop_$1_var_run_t:file manage_file_perms;
+ filetrans_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_$1_var_run_t, file)
+ files_search_pids(hadoop_$1_initrc_t)
+
+ allow hadoop_$1_initrc_t hadoop_$1_t:process { signal signull };
+
+ domtrans_pattern(hadoop_$1_initrc_t, hadoop_exec_t, hadoop_$1_t)
+
+ kernel_read_kernel_sysctls(hadoop_$1_initrc_t)
+ kernel_read_sysctl(hadoop_$1_initrc_t)
+
+ corecmd_exec_all_executables(hadoop_$1_initrc_t)
+
+ init_rw_utmp(hadoop_$1_initrc_t)
+
+ # This can be removed on anything post-el5
+ libs_use_ld_so(hadoop_$1_initrc_t)
+ libs_use_shared_libs(hadoop_$1_initrc_t)
+
+ logging_send_audit_msgs(hadoop_$1_initrc_t)
+ logging_send_syslog_msg(hadoop_$1_initrc_t)
+
+ ####################################
+ #
+ # Shared hadoop_$1 policy.
+ #
+
+ allow hadoop_$1_t hadoop_domain:process signull;
+
+ # This can be removed on anything post-el5
+ libs_use_ld_so(hadoop_$1_t)
+ libs_use_shared_libs(hadoop_$1_t)
+')
+
+########################################
+## <summary>
+## Execute hadoop in the
+## hadoop domain.
+## </summary>
+## <param name="domain">
+## <summary>
+## Domain allowed to transition.
+## </summary>
+## </param>
+#
+interface(`hadoop_domtrans',`
+ gen_require(`
+ type hadoop_t, hadoop_t;
+ ')
+
+ files_search_usr($1)
+ libs_search_lib($1)
+ domtrans_pattern($1, hadoop_exec_t, hadoop_t)
+')
+
+########################################
+## <summary>
+## Execute hadoop in the hadoop domain,
+## and allow the specified role the
+## hadoop domain.
+## </summary>
+## <param name="domain">
+## <summary>
+## Domain allowed to transition.
+## </summary>
+## </param>
+## <param name="role">
+## <summary>
+## Role allowed access.
+## </summary>
+## </param>
+## <rolecap/>
+#
+interface(`hadoop_run',`
+ gen_require(`
+ type hadoop_t;
+ ')
+
+ hadoop_domtrans($1)
+ role $2 types hadoop_t;
+
+ allow $1 hadoop_t:process { ptrace signal_perms };
+ ps_process_pattern($1, hadoop_t)
+')
+
+########################################
+## <summary>
+## Execute zookeeper client in the
+## zookeeper client domain.
+## </summary>
+## <param name="domain">
+## <summary>
+## Domain allowed to transition.
+## </summary>
+## </param>
+#
+interface(`zookeeper_domtrans_client',`
+ gen_require(`
+ type zookeeper_t, zookeeper_exec_t;
+ ')
+
+ corecmd_search_bin($1)
+ files_search_usr($1)
+ domtrans_pattern($1, zookeeper_exec_t, zookeeper_t)
+')
+
+########################################
+## <summary>
+## Execute zookeeper server in the
+## zookeeper server domain.
+## </summary>
+## <param name="domain">
+## <summary>
+## Domain allowed to transition.
+## </summary>
+## </param>
+#
+interface(`zookeeper_domtrans_server',`
+ gen_require(`
+ type zookeeper_server_t, zookeeper_server_exec_t;
+ ')
+
+ corecmd_search_bin($1)
+ files_search_usr($1)
+ domtrans_pattern($1, zookeeper_server_exec_t, zookeeper_server_t)
+')
+
+########################################
+## <summary>
+## Execute zookeeper server in the
+## zookeeper domain.
+## </summary>
+## <param name="domain">
+## <summary>
+## Domain allowed to transition.
+## </summary>
+## </param>
+#
+interface(`zookeeper_initrc_domtrans_server',`
+ gen_require(`
+ type zookeeper_server_initrc_exec_t;
+ ')
+
+ init_labeled_script_domtrans($1, zookeeper_server_initrc_exec_t)
+')
+
+########################################
+## <summary>
+## Execute zookeeper client in the
+## zookeeper client domain, and allow the
+## specified role the zookeeper client domain.
+## </summary>
+## <param name="domain">
+## <summary>
+## Domain allowed to transition.
+## </summary>
+## </param>
+## <param name="role">
+## <summary>
+## Role allowed access.
+## </summary>
+## </param>
+## <rolecap/>
+#
+interface(`zookeeper_run_client',`
+ gen_require(`
+ type zookeeper_t;
+ ')
+
+ zookeeper_domtrans_client($1)
+ role $2 types zookeeper_t;
+
+ allow $1 zookeeper_t:process { ptrace signal_perms };
+ ps_process_pattern($1, zookeeper_t)
+')
diff --git a/policy/modules/services/hadoop.te b/policy/modules/services/hadoop.te
new file mode 100644
index 0000000..53a242b
--- /dev/null
+++ b/policy/modules/services/hadoop.te
@@ -0,0 +1,347 @@
+policy_module(hadoop, 1.0.0)
+
+########################################
+#
+# Hadoop declarations.
+#
+
+attribute hadoop_domain;
+
+# What or who runs this?
+type hadoop_t;
+type hadoop_exec_t;
+domain_type(hadoop_t)
+domain_entry_file(hadoop_t, hadoop_exec_t)
+
+type hadoop_etc_t;
+files_config_file(hadoop_etc_t)
+
+type hadoop_var_lib_t;
+files_type(hadoop_var_lib_t)
+
+type hadoop_log_t;
+logging_log_file(hadoop_log_t)
+
+type hadoop_var_run_t;
+files_pid_file(hadoop_var_run_t)
+
+type hadoop_tmp_t;
+files_tmp_file(hadoop_tmp_t)
+
+# permissive hadoop_t;
+
+hadoop_domain_template(datanode)
+hadoop_domain_template(jobtracker)
+hadoop_domain_template(namenode)
+hadoop_domain_template(secondarynamenode)
+hadoop_domain_template(tasktracker)
+
+########################################
+#
+# Hadoop zookeeper client declarations.
+#
+
+type zookeeper_t;
+type zookeeper_exec_t;
+application_domain(zookeeper_t, zookeeper_exec_t)
+ubac_constrained(zookeeper_t)
+
+type zookeeper_etc_t;
+files_config_file(zookeeper_etc_t)
+
+type zookeeper_log_t;
+logging_log_file(zookeeper_log_t)
+
+type zookeeper_tmp_t;
+files_tmp_file(zookeeper_tmp_t)
+ubac_constrained(zookeeper_tmp_t)
+
+# permissive zookeeper_t;
+
+########################################
+#
+# Hadoop zookeeper server declarations.
+#
+
+type zookeeper_server_t;
+type zookeeper_server_exec_t;
+init_daemon_domain(zookeeper_server_t, zookeeper_server_exec_t)
+
+type zookeeper_server_initrc_exec_t;
+init_script_file(zookeeper_server_initrc_exec_t)
+
+type zookeeper_server_var_t;
+files_type(zookeeper_server_var_t)
+
+# This will need a file context specification.
+type zookeeper_server_var_run_t;
+files_pid_file(zookeeper_server_var_run_t)
+
+type zookeeper_server_tmp_t;
+files_tmp_file(zookeeper_server_tmp_t)
+
+# permissive zookeeper_server_t;
+
+########################################
+#
+# Hadoop policy.
+#
+
+allow hadoop_t self:capability sys_resource;
+allow hadoop_t self:process { getsched setsched signal signull setrlimit };
+allow hadoop_t self:fifo_file rw_fifo_file_perms;
+allow hadoop_t self:key write;
+# This probably needs to be allowed.
+dontaudit hadoop_t self:netlink_route_socket rw_netlink_socket_perms;
+allow hadoop_t self:tcp_socket create_stream_socket_perms;
+allow hadoop_t self:udp_socket create_socket_perms;
+
+read_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
+read_lnk_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
+can_exec(hadoop_t, hadoop_etc_t)
+
+manage_dirs_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
+manage_files_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+manage_dirs_pattern(hadoop_t, hadoop_log_t, hadoop_log_t)
+
+# Who or what creates /var/run/hadoop?
+getattr_dirs_pattern(hadoop_t, hadoop_var_run_t, hadoop_var_run_t)
+
+manage_dirs_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
+manage_files_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
+files_tmp_filetrans(hadoop_t, hadoop_tmp_t, { dir file })
+
+allow hadoop_t hadoop_domain:process signull;
+
+kernel_read_network_state(hadoop_t)
+kernel_read_system_state(hadoop_t)
+
+corecmd_exec_bin(hadoop_t)
+corecmd_exec_shell(hadoop_t)
+
+corenet_all_recvfrom_unlabeled(hadoop_t)
+corenet_all_recvfrom_netlabel(hadoop_t)
+corenet_sendrecv_hadoop_namenode_client_packets(hadoop_t)
+corenet_sendrecv_portmap_client_packets(hadoop_t)
+corenet_sendrecv_zope_client_packets(hadoop_t)
+corenet_tcp_bind_all_nodes(hadoop_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_t)
+corenet_tcp_connect_portmap_port(hadoop_t)
+corenet_tcp_connect_zope_port(hadoop_t)
+corenet_tcp_sendrecv_all_nodes(hadoop_t)
+corenet_tcp_sendrecv_all_ports(hadoop_t)
+corenet_tcp_sendrecv_generic_if(hadoop_t)
+corenet_udp_bind_all_nodes(hadoop_t)
+corenet_udp_sendrecv_all_nodes(hadoop_t)
+corenet_udp_sendrecv_all_ports(hadoop_t)
+corenet_udp_sendrecv_generic_if(hadoop_t)
+
+dev_read_rand(hadoop_t)
+dev_read_sysfs(hadoop_t)
+dev_read_urand(hadoop_t)
+
+files_dontaudit_search_spool(hadoop_t)
+files_read_usr_files(hadoop_t)
+files_read_all_files(hadoop_t)
+
+fs_getattr_xattr_fs(hadoop_t)
+
+# This can be removed on anything post-el5
+libs_use_ld_so(hadoop_t)
+libs_use_shared_libs(hadoop_t)
+
+miscfiles_read_localization(hadoop_t)
+
+userdom_dontaudit_search_user_home_dirs(hadoop_t)
+
+optional_policy(`
+ # Java might not be optional
+ java_exec(hadoop_t)
+')
+
+optional_policy(`
+ nis_use_ypbind(hadoop_t)
+')
+
+optional_policy(`
+ nscd_socket_use(hadoop_t)
+')
+
+########################################
+#
+# Hadoop datanode policy.
+#
+
+########################################
+#
+# Hadoop jobtracker policy.
+#
+
+########################################
+#
+# Hadoop namenode policy.
+#
+
+########################################
+#
+# Hadoop secondary namenode policy.
+#
+
+########################################
+#
+# Hadoop tasktracker policy.
+#
+
+########################################
+#
+# Hadoop zookeeper client policy.
+#
+
+allow zookeeper_t self:process { getsched sigkill signal signull };
+allow zookeeper_t self:fifo_file rw_fifo_file_perms;
+allow zookeeper_t self:tcp_socket create_stream_socket_perms;
+allow zookeeper_t self:udp_socket create_socket_perms;
+
+read_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
+read_lnk_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
+
+setattr_dirs_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+append_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+create_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+read_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+setattr_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+logging_log_filetrans(zookeeper_t, zookeeper_log_t, file)
+
+manage_files_pattern(zookeeper_t, zookeeper_tmp_t, zookeeper_tmp_t)
+files_tmp_filetrans(zookeeper_t, zookeeper_tmp_t, file)
+
+allow zookeeper_t zookeeper_server_t:process signull;
+
+can_exec(zookeeper_t, zookeeper_exec_t)
+
+kernel_read_network_state(zookeeper_t)
+kernel_read_system_state(zookeeper_t)
+
+corecmd_exec_bin(zookeeper_t)
+corecmd_exec_shell(zookeeper_t)
+
+corenet_all_recvfrom_unlabeled(zookeeper_t)
+corenet_all_recvfrom_netlabel(zookeeper_t)
+corenet_sendrecv_zookeeper_client_client_packets(zookeeper_t)
+corenet_tcp_bind_all_nodes(zookeeper_t)
+corenet_tcp_connect_zookeeper_client_port(zookeeper_t)
+corenet_tcp_sendrecv_all_nodes(zookeeper_t)
+corenet_tcp_sendrecv_all_ports(zookeeper_t)
+corenet_tcp_sendrecv_generic_if(zookeeper_t)
+corenet_udp_bind_all_nodes(zookeeper_t)
+corenet_udp_sendrecv_all_nodes(zookeeper_t)
+corenet_udp_sendrecv_all_ports(zookeeper_t)
+corenet_udp_sendrecv_generic_if(zookeeper_t)
+
+dev_read_rand(zookeeper_t)
+dev_read_sysfs(zookeeper_t)
+dev_read_urand(zookeeper_t)
+
+files_read_etc_files(zookeeper_t)
+files_read_usr_files(zookeeper_t)
+
+# This can be removed on anything post-el5
+libs_use_ld_so(zookeeper_t)
+libs_use_shared_libs(zookeeper_t)
+
+miscfiles_read_localization(zookeeper_t)
+
+sysnet_read_config(zookeeper_t)
+
+userdom_dontaudit_search_user_home_dirs(zookeeper_t)
+userdom_use_user_terminals(zookeeper_t)
+
+optional_policy(`
+ # Java might not be optional
+ java_exec(zookeeper_t)
+')
+
+optional_policy(`
+ nscd_socket_use(zookeeper_t)
+')
+
+########################################
+#
+# Hadoop zookeeper server policy.
+#
+
+allow zookeeper_server_t self:capability kill;
+allow zookeeper_server_t self:process { execmem getsched sigkill signal signull };
+allow zookeeper_server_t self:fifo_file rw_fifo_file_perms;
+allow zookeeper_server_t self:netlink_route_socket rw_netlink_socket_perms;
+allow zookeeper_server_t self:tcp_socket create_stream_socket_perms;
+
+read_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
+read_lnk_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
+
+manage_dirs_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
+manage_files_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
+files_var_lib_filetrans(zookeeper_server_t, zookeeper_server_var_t, { dir file })
+
+setattr_dirs_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+append_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+create_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+read_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+setattr_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+logging_log_filetrans(zookeeper_server_t, zookeeper_log_t, file)
+
+manage_files_pattern(zookeeper_server_t, zookeeper_server_var_run_t, zookeeper_server_var_run_t)
+files_pid_filetrans(zookeeper_server_t, zookeeper_server_var_run_t, file)
+
+manage_files_pattern(zookeeper_server_t, zookeeper_server_tmp_t, zookeeper_server_tmp_t)
+files_tmp_filetrans(zookeeper_server_t, zookeeper_server_tmp_t, file)
+
+can_exec(zookeeper_server_t, zookeeper_server_exec_t)
+
+kernel_read_network_state(zookeeper_server_t)
+kernel_read_system_state(zookeeper_server_t)
+
+corecmd_exec_bin(zookeeper_server_t)
+corecmd_exec_shell(zookeeper_server_t)
+
+corenet_all_recvfrom_unlabeled(zookeeper_server_t)
+corenet_all_recvfrom_netlabel(zookeeper_server_t)
+corenet_sendrecv_zookeeper_election_client_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_leader_client_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_client_server_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_election_server_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_leader_server_packets(zookeeper_server_t)
+corenet_tcp_bind_all_nodes(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_client_port(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_election_port(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_leader_port(zookeeper_server_t)
+corenet_tcp_connect_zookeeper_election_port(zookeeper_server_t)
+corenet_tcp_connect_zookeeper_leader_port(zookeeper_server_t)
+corenet_tcp_sendrecv_generic_if(zookeeper_server_t)
+corenet_tcp_sendrecv_generic_node(zookeeper_server_t)
+corenet_tcp_sendrecv_all_ports(zookeeper_server_t)
+
+dev_read_rand(zookeeper_server_t)
+dev_read_sysfs(zookeeper_server_t)
+dev_read_urand(zookeeper_server_t)
+
+files_read_etc_files(zookeeper_server_t)
+files_read_usr_files(zookeeper_server_t)
+
+fs_getattr_xattr_fs(zookeeper_server_t)
+
+# This can be removed on anything post-el5
+libs_use_ld_so(zookeeper_server_t)
+libs_use_shared_libs(zookeeper_server_t)
+
+logging_send_syslog_msg(zookeeper_server_t)
+
+miscfiles_read_localization(zookeeper_server_t)
+
+sysnet_read_config(zookeeper_server_t)
+
+optional_policy(`
+ # Java might not be optional
+ java_exec(zookeeper_server_t)
+')
--
1.7.2.3
-------------- next part --------------
A non-text attachment was scrubbed...
Name: not available
Type: application/pgp-signature
Size: 198 bytes
Desc: not available
Url : http://oss.tresys.com/pipermail/refpolicy/attachments/20100921/31440ada/attachment.bin
On Tue, 2010-09-21 at 21:57 +0200, Dominick Grift wrote:
> Signed-off-by: Dominick Grift <[email protected]>
Thanks Dominick.
Paul, are you going to include all of his changes in your patch set?
> ---
> :100644 100644 2ecdde8... 7a1b5de... M policy/modules/kernel/corenetwork.te.in
> :000000 100644 0000000... d88b5ff... A policy/modules/services/hadoop.fc
> :000000 100644 0000000... 6cc0049... A policy/modules/services/hadoop.if
> :000000 100644 0000000... 53a242b... A policy/modules/services/hadoop.te
> policy/modules/kernel/corenetwork.te.in | 4 +
> policy/modules/services/hadoop.fc | 40 ++++
> policy/modules/services/hadoop.if | 247 ++++++++++++++++++++++
> policy/modules/services/hadoop.te | 347 +++++++++++++++++++++++++++++++
> 4 files changed, 638 insertions(+), 0 deletions(-)
>
--
Jeremy J. Solt
Tresys Technology, LLC
410-290-1411 x122
On 09/21/2010 04:04 PM, Jeremy Solt wrote:
> On Tue, 2010-09-21 at 21:57 +0200, Dominick Grift wrote:
>> Signed-off-by: Dominick Grift <[email protected]>
> Thanks Dominick.
>
> Paul, are you going to include all of his changes in your patch set?
I guess it depends how we want to structure the patch. Do we want to upstream 9 different
modules or one monolithic one?
>> ---
>> :100644 100644 2ecdde8... 7a1b5de... M policy/modules/kernel/corenetwork.te.in
>> :000000 100644 0000000... d88b5ff... A policy/modules/services/hadoop.fc
>> :000000 100644 0000000... 6cc0049... A policy/modules/services/hadoop.if
>> :000000 100644 0000000... 53a242b... A policy/modules/services/hadoop.te
>> policy/modules/kernel/corenetwork.te.in | 4 +
>> policy/modules/services/hadoop.fc | 40 ++++
>> policy/modules/services/hadoop.if | 247 ++++++++++++++++++++++
>> policy/modules/services/hadoop.te | 347 +++++++++++++++++++++++++++++++
>> 4 files changed, 638 insertions(+), 0 deletions(-)
>>
>
>
On Thu, 2010-09-23 at 09:13 -0400, Paul Nuzzi wrote:
> On 09/21/2010 04:04 PM, Jeremy Solt wrote:
> > On Tue, 2010-09-21 at 21:57 +0200, Dominick Grift wrote:
> >> Signed-off-by: Dominick Grift <[email protected]>
> > Thanks Dominick.
> >
> > Paul, are you going to include all of his changes in your patch set?
>
> I guess it depends how we want to structure the patch. Do we want to upstream 9 different
> modules or one monolithic one?
>
Does it make sense to have any of these modules without the rest of
hadoop? I see that zookeeper is a subproject of hadoop. Could it be used
separately or is it only used with hadoop systems? If they're all
dependent on each other, then I think they should be in one module.
> >> ---
> >> :100644 100644 2ecdde8... 7a1b5de... M policy/modules/kernel/corenetwork.te.in
> >> :000000 100644 0000000... d88b5ff... A policy/modules/services/hadoop.fc
> >> :000000 100644 0000000... 6cc0049... A policy/modules/services/hadoop.if
> >> :000000 100644 0000000... 53a242b... A policy/modules/services/hadoop.te
> >> policy/modules/kernel/corenetwork.te.in | 4 +
> >> policy/modules/services/hadoop.fc | 40 ++++
> >> policy/modules/services/hadoop.if | 247 ++++++++++++++++++++++
> >> policy/modules/services/hadoop.te | 347 +++++++++++++++++++++++++++++++
> >> 4 files changed, 638 insertions(+), 0 deletions(-)
> >>
--
Jeremy J. Solt
Tresys Technology, LLC
410-290-1411 x122
On 09/24/2010 10:20 AM, Jeremy Solt wrote:
> On Thu, 2010-09-23 at 09:13 -0400, Paul Nuzzi wrote:
>> On 09/21/2010 04:04 PM, Jeremy Solt wrote:
>>> On Tue, 2010-09-21 at 21:57 +0200, Dominick Grift wrote:
>>>> Signed-off-by: Dominick Grift <[email protected]>
>>> Thanks Dominick.
>>>
>>> Paul, are you going to include all of his changes in your patch set?
>>
>> I guess it depends how we want to structure the patch. Do we want to upstream 9 different
>> modules or one monolithic one?
>>
>
> Does it make sense to have any of these modules without the rest of
> hadoop? I see that zookeeper is a subproject of hadoop. Could it be used
> separately or is it only used with hadoop systems? If they're all
> dependent on each other, then I think they should be in one module.
Keeping it all together is fine. The module could be split if a sysadmin
decides to run HDFS without zookeeper. Not a big deal. I will continue to
port it to one monolithic patch.
>>>> ---
>>>> :100644 100644 2ecdde8... 7a1b5de... M policy/modules/kernel/corenetwork.te.in
>>>> :000000 100644 0000000... d88b5ff... A policy/modules/services/hadoop.fc
>>>> :000000 100644 0000000... 6cc0049... A policy/modules/services/hadoop.if
>>>> :000000 100644 0000000... 53a242b... A policy/modules/services/hadoop.te
>>>> policy/modules/kernel/corenetwork.te.in | 4 +
>>>> policy/modules/services/hadoop.fc | 40 ++++
>>>> policy/modules/services/hadoop.if | 247 ++++++++++++++++++++++
>>>> policy/modules/services/hadoop.te | 347 +++++++++++++++++++++++++++++++
>>>> 4 files changed, 638 insertions(+), 0 deletions(-)
>>>>
>
>
>
I updated the patch based on recommendations from the mailing list.
All of hadoop's services are included in one module instead of
individual ones. Unconfined and sysadm roles are given access to
hadoop and zookeeper client domain transitions. The services are started
using run_init. Let me know what you think.
Signed-off-by: Paul Nuzzi <[email protected]>
---
policy/modules/kernel/corenetwork.te.in | 4
policy/modules/roles/sysadm.te | 8
policy/modules/services/hadoop.fc | 53 ++++
policy/modules/services/hadoop.if | 336 +++++++++++++++++++++++++++++
policy/modules/services/hadoop.te | 367 ++++++++++++++++++++++++++++++++
policy/modules/system/unconfined.te | 8
6 files changed, 776 insertions(+)
diff --git a/policy/modules/kernel/corenetwork.te.in b/policy/modules/kernel/corenetwork.te.in
index 2ecdde8..7a1b5de 100644
--- a/policy/modules/kernel/corenetwork.te.in
+++ b/policy/modules/kernel/corenetwork.te.in
@@ -105,6 +105,7 @@ network_port(giftd, tcp,1213,s0)
network_port(git, tcp,9418,s0, udp,9418,s0)
network_port(gopher, tcp,70,s0, udp,70,s0)
network_port(gpsd, tcp,2947,s0)
+network_port(hadoop_namenode, tcp, 8020,s0)
network_port(hddtemp, tcp,7634,s0)
network_port(howl, tcp,5335,s0, udp,5353,s0)
network_port(hplip, tcp,1782,s0, tcp,2207,s0, tcp,2208,s0, tcp, 8290,s0, tcp,50000,s0, tcp,50002,s0, tcp,8292,s0, tcp,9100,s0, tcp,9101,s0, tcp,9102,s0, tcp,9220,s0, tcp,9221,s0, tcp,9222,s0, tcp,9280,s0, tcp,9281,s0, tcp,9282,s0, tcp,9290,s0, tcp,9291,s0, tcp,9292,s0)
@@ -211,6 +212,9 @@ network_port(xdmcp, udp,177,s0, tcp,177,s0)
network_port(xen, tcp,8002,s0)
network_port(xfs, tcp,7100,s0)
network_port(xserver, tcp,6000-6020,s0)
+network_port(zookeeper_client, tcp, 2181,s0)
+network_port(zookeeper_election, tcp, 3888,s0)
+network_port(zookeeper_leader, tcp, 2888,s0)
network_port(zebra, tcp,2600-2604,s0, tcp,2606,s0, udp,2600-2604,s0, udp,2606,s0)
network_port(zope, tcp,8021,s0)
diff --git a/policy/modules/roles/sysadm.te b/policy/modules/roles/sysadm.te
index 2a19751..7954580 100644
--- a/policy/modules/roles/sysadm.te
+++ b/policy/modules/roles/sysadm.te
@@ -152,6 +152,10 @@ optional_policy(`
')
optional_policy(`
+ hadoop_run(sysadm_t, sysadm_r)
+')
+
+optional_policy(`
# allow system administrator to use the ipsec script to look
# at things (e.g., ipsec auto --status)
# probably should create an ipsec_admin role for this kind of thing
@@ -397,6 +401,10 @@ optional_policy(`
yam_run(sysadm_t, sysadm_r)
')
+optional_policy(`
+ zookeeper_run_client(sysadm_t, sysadm_r)
+')
+
ifndef(`distro_redhat',`
optional_policy(`
auth_role(sysadm_r, sysadm_t)
diff --git a/policy/modules/services/hadoop.fc b/policy/modules/services/hadoop.fc
new file mode 100644
index 0000000..5bdd554
--- /dev/null
+++ b/policy/modules/services/hadoop.fc
@@ -0,0 +1,53 @@
+/etc/hadoop.*(/.*)? gen_context(system_u:object_r:hadoop_etc_t,s0)
+
+/etc/rc\.d/init\.d/hadoop-(.*)?-datanode -- gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-jobtracker -- gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-namenode -- gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-secondarynamenode -- gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-tasktracker -- gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-zookeeper -- gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
+/etc/init\.d/hadoop-datanode -- gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
+/etc/init\.d/hadoop-jobtracker -- gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
+/etc/init\.d/hadoop-namenode -- gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
+/etc/init\.d/hadoop-secondarynamenode -- gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
+/etc/init\.d/hadoop-tasktracker -- gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
+/etc/init\.d/zookeeper -- gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
+
+/etc/zookeeper(/.*)? gen_context(system_u:object_r:zookeeper_etc_t,s0)
+/etc/zookeeper\.dist(/.*)? gen_context(system_u:object_r:zookeeper_etc_t,s0)
+
+/usr/lib/hadoop(.*)?/bin/hadoop -- gen_context(system_u:object_r:hadoop_exec_t,s0)
+
+/usr/bin/zookeeper-client -- gen_context(system_u:object_r:zookeeper_exec_t,s0)
+/usr/bin/zookeeper-server -- gen_context(system_u:object_r:zookeeper_server_exec_t,s0)
+
+/var/zookeeper(/.*)? gen_context(system_u:object_r:zookeeper_server_var_t,s0)
+/var/lib/zookeeper(/.*)? gen_context(system_u:object_r:zookeeper_server_var_t,s0)
+
+/var/lib/hadoop(.*)? gen_context(system_u:object_r:hadoop_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/data(/.*)? gen_context(system_u:object_r:hadoop_datanode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/name(/.*)? gen_context(system_u:object_r:hadoop_namenode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/namesecondary(/.*)? gen_context(system_u:object_r:hadoop_secondarynamenode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/jobTracker(/.*)? gen_context(system_u:object_r:hadoop_jobtracker_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/taskTracker(/.*)? gen_context(system_u:object_r:hadoop_tasktracker_var_lib_t,s0)
+
+/var/lock/subsys/hadoop-datanode -- gen_context(system_u:object_r:hadoop_datanode_lock_t,s0)
+/var/lock/subsys/hadoop-namenode -- gen_context(system_u:object_r:hadoop_namenode_lock_t,s0)
+/var/lock/subsys/hadoop-jobtracker -- gen_context(system_u:object_r:hadoop_jobtracker_lock_t,s0)
+/var/lock/subsys/hadoop-tasktracker -- gen_context(system_u:object_r:hadoop_tasktracker_lock_t,s0)
+/var/lock/subsys/hadoop-secondarynamenode -- gen_context(system_u:object_r:hadoop_secondarynamenode_lock_t,s0)
+
+/var/log/hadoop(.*)? gen_context(system_u:object_r:hadoop_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-datanode-(.*)? gen_context(system_u:object_r:hadoop_datanode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-jobtracker-(.*)? gen_context(system_u:object_r:hadoop_jobtracker_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-namenode-(.*)? gen_context(system_u:object_r:hadoop_namenode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-secondarynamenode-(.*)? gen_context(system_u:object_r:hadoop_secondarynamenode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-tasktracker-(.*)? gen_context(system_u:object_r:hadoop_tasktracker_log_t,s0)
+/var/log/zookeeper(/.*)? gen_context(system_u:object_r:zookeeper_log_t,s0)
+
+/var/run/hadoop(.*)? -d gen_context(system_u:object_r:hadoop_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-datanode.pid -- gen_context(system_u:object_r:hadoop_datanode_initrc_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-namenode.pid -- gen_context(system_u:object_r:hadoop_namenode_initrc_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-jobtracker.pid -- gen_context(system_u:object_r:hadoop_jobtracker_initrc_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-tasktracker.pid -- gen_context(system_u:object_r:hadoop_tasktracker_initrc_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-secondarynamenode.pid -- gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_var_run_t,s0)
diff --git a/policy/modules/services/hadoop.if b/policy/modules/services/hadoop.if
new file mode 100644
index 0000000..0e5bb28
--- /dev/null
+++ b/policy/modules/services/hadoop.if
@@ -0,0 +1,336 @@
+## <summary>Software for reliable, scalable, distributed computing.</summary>
+
+#######################################
+## <summary>
+## The template to define a hadoop domain.
+## </summary>
+## <param name="domain_prefix">
+## <summary>
+## Domain prefix to be used.
+## </summary>
+## </param>
+#
+template(`hadoop_domain_template',`
+ gen_require(`
+ attribute hadoop_domain;
+ type hadoop_log_t, hadoop_var_lib_t, hadoop_var_run_t;
+ type hadoop_exec_t;
+ ')
+
+ ########################################
+ #
+ # Shared declarations.
+ #
+
+ type hadoop_$1_t, hadoop_domain;
+ domain_type(hadoop_$1_t)
+ domain_entry_file(hadoop_$1_t, hadoop_exec_t)
+
+ type hadoop_$1_initrc_t;
+ type hadoop_$1_initrc_exec_t;
+ init_script_domain(hadoop_$1_initrc_t, hadoop_$1_initrc_exec_t)
+
+ role system_r types { hadoop_$1_initrc_t hadoop_$1_t };
+
+ type hadoop_$1_lock_t;
+ files_lock_file(hadoop_$1_lock_t)
+ files_lock_filetrans(hadoop_$1_initrc_t, hadoop_$1_lock_t, file)
+
+ type hadoop_$1_log_t;
+ logging_log_file(hadoop_$1_log_t)
+ filetrans_pattern(hadoop_$1_initrc_t, hadoop_log_t, hadoop_$1_log_t, {dir file})
+ filetrans_pattern(hadoop_$1_t, hadoop_log_t, hadoop_$1_log_t, {dir file})
+
+ type hadoop_$1_var_lib_t;
+ files_type(hadoop_$1_var_lib_t)
+ type_transition hadoop_$1_t hadoop_var_lib_t:file hadoop_$1_var_lib_t;
+
+ type hadoop_$1_initrc_var_run_t;
+ files_pid_file(hadoop_$1_initrc_var_run_t)
+ type_transition hadoop_$1_initrc_t hadoop_var_run_t:file hadoop_$1_initrc_var_run_t;
+
+ type hadoop_$1_tmp_t;
+ files_tmp_file(hadoop_$1_tmp_t)
+ files_tmp_filetrans(hadoop_$1_t, hadoop_$1_tmp_t, file)
+
+ ####################################
+ #
+ # Shared hadoop_$1 initrc policy.
+ #
+
+ allow hadoop_$1_initrc_t hadoop_$1_t:process { signal signull };
+ allow hadoop_$1_initrc_t self:capability { setuid setgid };
+ allow hadoop_$1_initrc_t self:fifo_file { read write getattr ioctl };
+ allow hadoop_$1_initrc_t self:process setsched;
+
+ consoletype_exec(hadoop_$1_initrc_t)
+ corecmd_exec_bin(hadoop_$1_initrc_t)
+ corecmd_exec_shell(hadoop_$1_initrc_t)
+
+ domtrans_pattern(hadoop_$1_initrc_t, hadoop_exec_t, hadoop_$1_t)
+ dontaudit hadoop_$1_initrc_t self:capability sys_tty_config;
+
+ files_read_etc_files(hadoop_$1_initrc_t)
+ files_read_usr_files(hadoop_$1_initrc_t)
+ files_manage_generic_tmp_dirs(hadoop_$1_initrc_t)
+ files_manage_generic_tmp_files(hadoop_$1_initrc_t)
+ fs_getattr_xattr_fs(hadoop_$1_initrc_t)
+
+ hadoop_rx_etc(hadoop_$1_initrc_t)
+
+ init_rw_utmp(hadoop_$1_initrc_t)
+ init_use_script_ptys(hadoop_$1_initrc_t)
+
+ kernel_read_kernel_sysctls(hadoop_$1_initrc_t)
+ kernel_read_sysctl(hadoop_$1_initrc_t)
+ kernel_read_system_state(hadoop_$1_initrc_t)
+
+ logging_send_syslog_msg(hadoop_$1_initrc_t)
+ logging_send_audit_msgs(hadoop_$1_initrc_t)
+ logging_search_logs(hadoop_$1_initrc_t)
+
+ manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_lock_t, hadoop_$1_lock_t)
+ manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_initrc_var_run_t, hadoop_$1_initrc_var_run_t)
+ manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
+ manage_dirs_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_var_run_t)
+ manage_files_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_var_run_t)
+
+ miscfiles_read_localization(hadoop_$1_initrc_t)
+
+ optional_policy(`
+ nscd_socket_use(hadoop_$1_initrc_t)
+ ')
+
+ term_use_generic_ptys(hadoop_$1_initrc_t)
+
+ userdom_dontaudit_search_user_home_dirs(hadoop_$1_initrc_t)
+
+ # This can be removed on anything post-el5
+ libs_use_ld_so(hadoop_$1_initrc_t)
+ libs_use_shared_libs(hadoop_$1_initrc_t)
+
+ ####################################
+ #
+ # Shared hadoop_$1 policy.
+ #
+
+ allow hadoop_$1_t hadoop_domain:process signull;
+ allow hadoop_$1_t self:fifo_file { read write getattr ioctl };
+ allow hadoop_$1_t self:process execmem;
+ allow hadoop_$1_t hadoop_var_run_t:dir getattr;
+
+ corecmd_exec_bin(hadoop_$1_t)
+ corecmd_exec_shell(hadoop_$1_t)
+
+ dev_read_rand(hadoop_$1_t)
+ dev_read_urand(hadoop_$1_t)
+ dev_read_sysfs(hadoop_$1_t)
+ dontaudit hadoop_$1_t self:netlink_route_socket rw_netlink_socket_perms;
+
+ files_manage_generic_tmp_files(hadoop_$1_t)
+ files_manage_generic_tmp_dirs(hadoop_$1_t)
+ files_read_etc_files(hadoop_$1_t)
+ files_read_var_lib_files(hadoop_$1_t)
+ files_search_pids(hadoop_$1_t)
+
+ hadoop_rx_etc(hadoop_$1_t)
+
+ java_exec(hadoop_$1_t)
+
+ kernel_read_network_state(hadoop_$1_t)
+ kernel_read_system_state(hadoop_$1_t)
+
+ logging_search_logs(hadoop_$1_t)
+
+ manage_dirs_pattern(hadoop_$1_t, hadoop_$1_var_lib_t, hadoop_$1_var_lib_t)
+ manage_files_pattern(hadoop_$1_t, hadoop_$1_log_t, hadoop_$1_log_t)
+ manage_files_pattern(hadoop_$1_t, hadoop_$1_var_lib_t, hadoop_$1_var_lib_t)
+ manage_files_pattern(hadoop_$1_t, hadoop_$1_tmp_t, hadoop_$1_tmp_t)
+ miscfiles_read_localization(hadoop_$1_t)
+
+ optional_policy(`
+ nscd_socket_use(hadoop_$1_t)
+ ')
+
+ sysnet_read_config(hadoop_$1_t)
+
+ allow hadoop_$1_t self:tcp_socket create_stream_socket_perms;
+ corenet_all_recvfrom_unlabeled(hadoop_$1_t)
+ corenet_all_recvfrom_netlabel(hadoop_$1_t)
+ corenet_tcp_bind_all_nodes(hadoop_$1_t)
+ corenet_tcp_sendrecv_generic_if(hadoop_$1_t)
+ corenet_tcp_sendrecv_generic_node(hadoop_$1_t)
+ corenet_tcp_sendrecv_all_ports(hadoop_$1_t)
+ corenet_tcp_connect_generic_port(hadoop_$1_t)
+
+ allow hadoop_$1_t self:udp_socket create_socket_perms;
+ corenet_udp_sendrecv_generic_if(hadoop_$1_t)
+ corenet_udp_sendrecv_all_nodes(hadoop_$1_t)
+ corenet_udp_bind_all_nodes(hadoop_$1_t)
+
+ # This can be removed on anything post-el5
+ libs_use_ld_so(hadoop_$1_t)
+ libs_use_shared_libs(hadoop_$1_t)
+')
+
+########################################
+## <summary>
+## Execute hadoop in the
+## hadoop domain.
+## </summary>
+## <param name="domain">
+## <summary>
+## Domain allowed to transition.
+## </summary>
+## </param>
+#
+interface(`hadoop_domtrans',`
+ gen_require(`
+ type hadoop_t, hadoop_exec_t;
+ ')
+
+ files_search_usr($1)
+ libs_search_lib($1)
+ domtrans_pattern($1, hadoop_exec_t, hadoop_t)
+')
+
+########################################
+## <summary>
+## Execute hadoop in the hadoop domain,
+## and allow the specified role the
+## hadoop domain.
+## </summary>
+## <param name="domain">
+## <summary>
+## Domain allowed to transition.
+## </summary>
+## </param>
+## <param name="role">
+## <summary>
+## Role allowed access.
+## </summary>
+## </param>
+## <rolecap/>
+#
+interface(`hadoop_run',`
+ gen_require(`
+ type hadoop_t;
+ ')
+
+ hadoop_domtrans($1)
+ role $2 types hadoop_t;
+
+ allow $1 hadoop_t:process { ptrace signal_perms };
+ ps_process_pattern($1, hadoop_t)
+')
+
+########################################
+## <summary>
+## Execute zookeeper client in the
+## zookeeper client domain.
+## </summary>
+## <param name="domain">
+## <summary>
+## Domain allowed to transition.
+## </summary>
+## </param>
+#
+interface(`zookeeper_domtrans_client',`
+ gen_require(`
+ type zookeeper_t, zookeeper_exec_t;
+ ')
+
+ corecmd_search_bin($1)
+ files_search_usr($1)
+ domtrans_pattern($1, zookeeper_exec_t, zookeeper_t)
+')
+
+########################################
+## <summary>
+## Execute zookeeper server in the
+## zookeeper server domain.
+## </summary>
+## <param name="domain">
+## <summary>
+## Domain allowed to transition.
+## </summary>
+## </param>
+#
+interface(`zookeeper_domtrans_server',`
+ gen_require(`
+ type zookeeper_server_t, zookeeper_server_exec_t;
+ ')
+
+ corecmd_search_bin($1)
+ files_search_usr($1)
+ domtrans_pattern($1, zookeeper_server_exec_t, zookeeper_server_t)
+')
+
+########################################
+## <summary>
+## Execute zookeeper server in the
+## zookeeper domain.
+## </summary>
+## <param name="domain">
+## <summary>
+## Domain allowed to transition.
+## </summary>
+## </param>
+#
+interface(`zookeeper_initrc_domtrans_server',`
+ gen_require(`
+ type zookeeper_server_initrc_exec_t;
+ ')
+
+ init_labeled_script_domtrans($1, zookeeper_server_initrc_exec_t)
+')
+
+########################################
+## <summary>
+## Execute zookeeper client in the
+## zookeeper client domain, and allow the
+## specified role the zookeeper client domain.
+## </summary>
+## <param name="domain">
+## <summary>
+## Domain allowed to transition.
+## </summary>
+## </param>
+## <param name="role">
+## <summary>
+## Role allowed access.
+## </summary>
+## </param>
+## <rolecap/>
+#
+interface(`zookeeper_run_client',`
+ gen_require(`
+ type zookeeper_t;
+ ')
+
+ zookeeper_domtrans_client($1)
+ role $2 types zookeeper_t;
+
+ allow $1 zookeeper_t:process { ptrace signal_perms };
+ ps_process_pattern($1, zookeeper_t)
+')
+
+########################################
+## <summary>
+## Give permission to a domain to access hadoop_etc_t
+## </summary>
+## <param name="domain">
+## <summary>
+## Domain needing read and execute permission
+## </summary>
+## </param>
+#
+interface(`hadoop_rx_etc', `
+ gen_require(`
+ type hadoop_etc_t;
+ ')
+
+ allow $1 hadoop_etc_t:dir search_dir_perms;
+ allow $1 hadoop_etc_t:lnk_file { read getattr };
+ allow $1 hadoop_etc_t:file { read_file_perms execute execute_no_trans};
+')
diff --git a/policy/modules/services/hadoop.te b/policy/modules/services/hadoop.te
new file mode 100644
index 0000000..1a573ea
--- /dev/null
+++ b/policy/modules/services/hadoop.te
@@ -0,0 +1,367 @@
+policy_module(hadoop, 1.0.0)
+
+########################################
+#
+# Hadoop declarations.
+#
+
+attribute hadoop_domain;
+
+type hadoop_t;
+type hadoop_exec_t;
+application_domain(hadoop_t, hadoop_exec_t)
+ubac_constrained(hadoop_t)
+
+type hadoop_etc_t;
+files_config_file(hadoop_etc_t)
+
+type hadoop_var_lib_t;
+files_type(hadoop_var_lib_t)
+
+type hadoop_log_t;
+logging_log_file(hadoop_log_t)
+
+type hadoop_var_run_t;
+files_pid_file(hadoop_var_run_t)
+
+type hadoop_tmp_t;
+files_tmp_file(hadoop_tmp_t)
+ubac_constrained(hadoop_tmp_t)
+
+hadoop_domain_template(datanode)
+hadoop_domain_template(jobtracker)
+hadoop_domain_template(namenode)
+hadoop_domain_template(secondarynamenode)
+hadoop_domain_template(tasktracker)
+
+########################################
+#
+# Hadoop zookeeper client declarations.
+#
+
+type zookeeper_t;
+type zookeeper_exec_t;
+application_domain(zookeeper_t, zookeeper_exec_t)
+ubac_constrained(zookeeper_t)
+
+type zookeeper_etc_t;
+files_config_file(zookeeper_etc_t)
+
+type zookeeper_log_t;
+logging_log_file(zookeeper_log_t)
+
+type zookeeper_tmp_t;
+files_tmp_file(zookeeper_tmp_t)
+ubac_constrained(zookeeper_tmp_t)
+
+########################################
+#
+# Hadoop zookeeper server declarations.
+#
+
+type zookeeper_server_t;
+type zookeeper_server_exec_t;
+init_daemon_domain(zookeeper_server_t, zookeeper_server_exec_t)
+
+type zookeeper_server_initrc_exec_t;
+init_script_file(zookeeper_server_initrc_exec_t)
+
+type zookeeper_server_var_t;
+files_type(zookeeper_server_var_t)
+
+# This will need a file context specification.
+type zookeeper_server_var_run_t;
+files_pid_file(zookeeper_server_var_run_t)
+
+type zookeeper_server_tmp_t;
+files_tmp_file(zookeeper_server_tmp_t)
+
+########################################
+#
+# Hadoop policy.
+#
+
+allow hadoop_t self:capability sys_resource;
+allow hadoop_t self:process { getsched setsched signal signull setrlimit execmem };
+allow hadoop_t self:fifo_file rw_fifo_file_perms;
+allow hadoop_t self:key write;
+allow hadoop_t self:tcp_socket create_stream_socket_perms;
+allow hadoop_t self:udp_socket create_socket_perms;
+allow hadoop_t hadoop_domain:process signull;
+
+dontaudit hadoop_t self:netlink_route_socket rw_netlink_socket_perms;
+
+read_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
+read_lnk_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
+can_exec(hadoop_t, hadoop_etc_t)
+
+manage_dirs_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
+manage_dirs_pattern(hadoop_t, hadoop_log_t, hadoop_log_t)
+manage_dirs_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
+manage_files_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
+manage_files_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
+
+getattr_dirs_pattern(hadoop_t, hadoop_var_run_t, hadoop_var_run_t)
+
+files_tmp_filetrans(hadoop_t, hadoop_tmp_t, file)
+files_manage_generic_tmp_dirs(hadoop_t)
+
+kernel_read_network_state(hadoop_t)
+kernel_read_system_state(hadoop_t)
+
+corecmd_exec_bin(hadoop_t)
+corecmd_exec_shell(hadoop_t)
+
+corenet_all_recvfrom_unlabeled(hadoop_t)
+corenet_all_recvfrom_netlabel(hadoop_t)
+corenet_sendrecv_hadoop_namenode_client_packets(hadoop_t)
+corenet_sendrecv_portmap_client_packets(hadoop_t)
+corenet_sendrecv_zope_client_packets(hadoop_t)
+corenet_tcp_bind_all_nodes(hadoop_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_t)
+corenet_tcp_connect_portmap_port(hadoop_t)
+corenet_tcp_connect_zope_port(hadoop_t)
+corenet_tcp_sendrecv_all_nodes(hadoop_t)
+corenet_tcp_sendrecv_all_ports(hadoop_t)
+corenet_tcp_sendrecv_generic_if(hadoop_t)
+corenet_tcp_connect_generic_port(hadoop_t)
+corenet_udp_bind_all_nodes(hadoop_t)
+corenet_udp_sendrecv_all_nodes(hadoop_t)
+corenet_udp_sendrecv_all_ports(hadoop_t)
+corenet_udp_sendrecv_generic_if(hadoop_t)
+
+dev_read_rand(hadoop_t)
+dev_read_sysfs(hadoop_t)
+dev_read_urand(hadoop_t)
+
+files_dontaudit_search_spool(hadoop_t)
+files_read_usr_files(hadoop_t)
+files_read_all_files(hadoop_t)
+
+fs_getattr_xattr_fs(hadoop_t)
+
+java_exec(hadoop_t)
+
+# This can be removed on anything post-el5
+libs_use_ld_so(hadoop_t)
+libs_use_shared_libs(hadoop_t)
+
+miscfiles_read_localization(hadoop_t)
+
+userdom_dontaudit_search_user_home_dirs(hadoop_t)
+userdom_use_user_terminals(hadoop_t)
+
+optional_policy(`
+ nis_use_ypbind(hadoop_t)
+')
+
+optional_policy(`
+ nscd_socket_use(hadoop_t)
+')
+
+########################################
+#
+# Hadoop datanode policy.
+#
+
+corenet_tcp_connect_hadoop_namenode_port(hadoop_datanode_t)
+fs_getattr_xattr_fs(hadoop_datanode_t)
+manage_dirs_pattern(hadoop_datanode_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+########################################
+#
+# Hadoop jobtracker policy.
+#
+
+corenet_tcp_bind_zope_port(hadoop_jobtracker_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_jobtracker_t)
+create_dirs_pattern(hadoop_jobtracker_t, hadoop_jobtracker_log_t, hadoop_jobtracker_log_t)
+manage_dirs_pattern(hadoop_jobtracker_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+########################################
+#
+# Hadoop namenode policy.
+#
+
+corenet_tcp_bind_hadoop_namenode_port(hadoop_namenode_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_namenode_t)
+manage_dirs_pattern(hadoop_namenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
+manage_files_pattern(hadoop_namenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+########################################
+#
+# Hadoop secondary namenode policy.
+#
+
+corenet_tcp_connect_hadoop_namenode_port(hadoop_secondarynamenode_t)
+manage_dirs_pattern(hadoop_secondarynamenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+########################################
+#
+# Hadoop tasktracker policy.
+#
+
+allow hadoop_tasktracker_t self:process signal;
+
+corenet_tcp_connect_zope_port(hadoop_tasktracker_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_tasktracker_t)
+
+filetrans_pattern(hadoop_tasktracker_t, hadoop_log_t, hadoop_tasktracker_log_t, dir)
+fs_getattr_xattr_fs(hadoop_tasktracker_t)
+fs_associate(hadoop_tasktracker_t)
+
+manage_dirs_pattern(hadoop_tasktracker_t, hadoop_var_lib_t, hadoop_var_lib_t)
+manage_dirs_pattern(hadoop_tasktracker_t, hadoop_tasktracker_log_t, hadoop_tasktracker_log_t);
+
+########################################
+#
+# Hadoop zookeeper client policy.
+#
+
+allow zookeeper_t self:process { getsched sigkill signal signull execmem };
+allow zookeeper_t self:fifo_file rw_fifo_file_perms;
+allow zookeeper_t self:tcp_socket create_stream_socket_perms;
+allow zookeeper_t self:udp_socket create_socket_perms;
+allow zookeeper_t zookeeper_server_t:process signull;
+
+read_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
+read_lnk_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
+
+setattr_dirs_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+append_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+create_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+read_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+setattr_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+logging_log_filetrans(zookeeper_t, zookeeper_log_t, file)
+
+manage_files_pattern(zookeeper_t, zookeeper_tmp_t, zookeeper_tmp_t)
+files_tmp_filetrans(zookeeper_t, zookeeper_tmp_t, file)
+files_manage_generic_tmp_dirs(zookeeper_t)
+
+can_exec(zookeeper_t, zookeeper_exec_t)
+
+kernel_read_network_state(zookeeper_t)
+kernel_read_system_state(zookeeper_t)
+
+corecmd_exec_bin(zookeeper_t)
+corecmd_exec_shell(zookeeper_t)
+
+dontaudit zookeeper_t self:netlink_route_socket rw_netlink_socket_perms;
+
+corenet_all_recvfrom_unlabeled(zookeeper_t)
+corenet_all_recvfrom_netlabel(zookeeper_t)
+corenet_sendrecv_zookeeper_client_client_packets(zookeeper_t)
+corenet_tcp_bind_all_nodes(zookeeper_t)
+corenet_tcp_connect_zookeeper_client_port(zookeeper_t)
+corenet_tcp_sendrecv_all_nodes(zookeeper_t)
+corenet_tcp_sendrecv_all_ports(zookeeper_t)
+corenet_tcp_sendrecv_generic_if(zookeeper_t)
+corenet_tcp_connect_generic_port(zookeeper_t)
+corenet_udp_bind_all_nodes(zookeeper_t)
+corenet_udp_sendrecv_all_nodes(zookeeper_t)
+corenet_udp_sendrecv_all_ports(zookeeper_t)
+corenet_udp_sendrecv_generic_if(zookeeper_t)
+
+dev_read_rand(zookeeper_t)
+dev_read_sysfs(zookeeper_t)
+dev_read_urand(zookeeper_t)
+
+files_read_etc_files(zookeeper_t)
+files_read_usr_files(zookeeper_t)
+
+# This can be removed on anything post-el5
+libs_use_ld_so(zookeeper_t)
+libs_use_shared_libs(zookeeper_t)
+
+miscfiles_read_localization(zookeeper_t)
+
+sysnet_read_config(zookeeper_t)
+
+userdom_dontaudit_search_user_home_dirs(zookeeper_t)
+userdom_use_user_terminals(zookeeper_t)
+
+java_exec(zookeeper_t)
+
+optional_policy(`
+ nscd_socket_use(zookeeper_t)
+')
+
+########################################
+#
+# Hadoop zookeeper server policy.
+#
+
+allow zookeeper_server_t self:capability kill;
+allow zookeeper_server_t self:process { execmem getsched sigkill signal signull };
+allow zookeeper_server_t self:fifo_file rw_fifo_file_perms;
+allow zookeeper_server_t self:netlink_route_socket rw_netlink_socket_perms;
+allow zookeeper_server_t self:tcp_socket create_stream_socket_perms;
+
+read_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
+read_lnk_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
+
+manage_dirs_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
+manage_files_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
+files_var_lib_filetrans(zookeeper_server_t, zookeeper_server_var_t, { dir file })
+
+setattr_dirs_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+append_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+create_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+read_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+setattr_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+logging_log_filetrans(zookeeper_server_t, zookeeper_log_t, file)
+
+manage_files_pattern(zookeeper_server_t, zookeeper_server_var_run_t, zookeeper_server_var_run_t)
+files_pid_filetrans(zookeeper_server_t, zookeeper_server_var_run_t, file)
+
+manage_files_pattern(zookeeper_server_t, zookeeper_server_tmp_t, zookeeper_server_tmp_t)
+files_tmp_filetrans(zookeeper_server_t, zookeeper_server_tmp_t, file)
+files_manage_generic_tmp_files(zookeeper_server_t)
+files_manage_generic_tmp_dirs(zookeeper_server_t)
+
+can_exec(zookeeper_server_t, zookeeper_server_exec_t)
+
+kernel_read_network_state(zookeeper_server_t)
+kernel_read_system_state(zookeeper_server_t)
+
+corecmd_exec_bin(zookeeper_server_t)
+corecmd_exec_shell(zookeeper_server_t)
+
+corenet_all_recvfrom_unlabeled(zookeeper_server_t)
+corenet_all_recvfrom_netlabel(zookeeper_server_t)
+corenet_sendrecv_zookeeper_election_client_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_leader_client_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_client_server_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_election_server_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_leader_server_packets(zookeeper_server_t)
+corenet_tcp_bind_all_nodes(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_client_port(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_election_port(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_leader_port(zookeeper_server_t)
+corenet_tcp_connect_zookeeper_election_port(zookeeper_server_t)
+corenet_tcp_connect_zookeeper_leader_port(zookeeper_server_t)
+corenet_tcp_sendrecv_generic_if(zookeeper_server_t)
+corenet_tcp_sendrecv_generic_node(zookeeper_server_t)
+corenet_tcp_sendrecv_all_ports(zookeeper_server_t)
+corenet_tcp_connect_generic_port(zookeeper_server_t)
+
+dev_read_rand(zookeeper_server_t)
+dev_read_sysfs(zookeeper_server_t)
+dev_read_urand(zookeeper_server_t)
+
+files_read_etc_files(zookeeper_server_t)
+files_read_usr_files(zookeeper_server_t)
+
+fs_getattr_xattr_fs(zookeeper_server_t)
+
+# This can be removed on anything post-el5
+libs_use_ld_so(zookeeper_server_t)
+libs_use_shared_libs(zookeeper_server_t)
+
+logging_send_syslog_msg(zookeeper_server_t)
+
+miscfiles_read_localization(zookeeper_server_t)
+
+sysnet_read_config(zookeeper_server_t)
+
+java_exec(zookeeper_server_t)
diff --git a/policy/modules/system/unconfined.te b/policy/modules/system/unconfined.te
index f976344..ac27264 100644
--- a/policy/modules/system/unconfined.te
+++ b/policy/modules/system/unconfined.te
@@ -118,6 +118,10 @@ optional_policy(`
')
optional_policy(`
+ hadoop_run(unconfined_t, unconfined_r)
+')
+
+optional_policy(`
inn_domtrans(unconfined_t)
')
@@ -210,6 +214,10 @@ optional_policy(`
xserver_domtrans(unconfined_t)
')
+optional_policy(`
+ zookeeper_run_client(unconfined_t, unconfined_r)
+')
+
########################################
#
# Unconfined Execmem Local policy
On Thu, Sep 30, 2010 at 03:39:40PM -0400, Paul Nuzzi wrote:
> I updated the patch based on recommendations from the mailing list.
> All of hadoop's services are included in one module instead of
> individual ones. Unconfined and sysadm roles are given access to
> hadoop and zookeeper client domain transitions. The services are started
> using run_init. Let me know what you think.
Why do some hadoop domain need to manage generic tmp?
files_manage_generic_tmp_dirs(zookeeper_t)
files_manage_generic_tmp_dirs(hadoop_t)
files_manage_generic_tmp_dirs(hadoop_$1_initrc_t)
files_manage_generic_tmp_files(hadoop_$1_initrc_t)
files_manage_generic_tmp_files(hadoop_$1_t)
files_manage_generic_tmp_dirs(hadoop_$1_t)
You probably need:
files_search_pids() and files_search_locks() for hadoop_$1_initrc_t
becuase it needs to traverse /var/run and /var/lock/subsys to be able to manage its objects there.
Can use rw_fifo_file_perms here:
allow hadoop_$1_initrc_t self:fifo_file { read write getattr ioctl };
Might want to split this into hadoop_read_config_files and hadoop_exec_config_files.
hadoop_rx_etc(hadoop_$1_initrc_t)
This seems wrong. Why does it need that? use files_search_var_lib() if possible:
files_read_var_lib_files(hadoop_$1_t)
This is not a declaration and might want to use filetrans_pattern() instead:
type_transition hadoop_$1_initrc_t hadoop_var_run_t:file hadoop_$1_initrc_var_run_t;
Other then the above, there are some style issues:
http://oss.tresys.com/projects/refpolicy/wiki/StyleGuide
But i can help clean that up once above issues are resolved.
>
>
> Signed-off-by: Paul Nuzzi <[email protected]>
>
> ---
> policy/modules/kernel/corenetwork.te.in | 4
> policy/modules/roles/sysadm.te | 8
> policy/modules/services/hadoop.fc | 53 ++++
> policy/modules/services/hadoop.if | 336 +++++++++++++++++++++++++++++
> policy/modules/services/hadoop.te | 367 ++++++++++++++++++++++++++++++++
> policy/modules/system/unconfined.te | 8
> 6 files changed, 776 insertions(+)
>
> diff --git a/policy/modules/kernel/corenetwork.te.in b/policy/modules/kernel/corenetwork.te.in
> index 2ecdde8..7a1b5de 100644
> --- a/policy/modules/kernel/corenetwork.te.in
> +++ b/policy/modules/kernel/corenetwork.te.in
> @@ -105,6 +105,7 @@ network_port(giftd, tcp,1213,s0)
> network_port(git, tcp,9418,s0, udp,9418,s0)
> network_port(gopher, tcp,70,s0, udp,70,s0)
> network_port(gpsd, tcp,2947,s0)
> +network_port(hadoop_namenode, tcp, 8020,s0)
> network_port(hddtemp, tcp,7634,s0)
> network_port(howl, tcp,5335,s0, udp,5353,s0)
> network_port(hplip, tcp,1782,s0, tcp,2207,s0, tcp,2208,s0, tcp, 8290,s0, tcp,50000,s0, tcp,50002,s0, tcp,8292,s0, tcp,9100,s0, tcp,9101,s0, tcp,9102,s0, tcp,9220,s0, tcp,9221,s0, tcp,9222,s0, tcp,9280,s0, tcp,9281,s0, tcp,9282,s0, tcp,9290,s0, tcp,9291,s0, tcp,9292,s0)
> @@ -211,6 +212,9 @@ network_port(xdmcp, udp,177,s0, tcp,177,s0)
> network_port(xen, tcp,8002,s0)
> network_port(xfs, tcp,7100,s0)
> network_port(xserver, tcp,6000-6020,s0)
> +network_port(zookeeper_client, tcp, 2181,s0)
> +network_port(zookeeper_election, tcp, 3888,s0)
> +network_port(zookeeper_leader, tcp, 2888,s0)
> network_port(zebra, tcp,2600-2604,s0, tcp,2606,s0, udp,2600-2604,s0, udp,2606,s0)
> network_port(zope, tcp,8021,s0)
>
> diff --git a/policy/modules/roles/sysadm.te b/policy/modules/roles/sysadm.te
> index 2a19751..7954580 100644
> --- a/policy/modules/roles/sysadm.te
> +++ b/policy/modules/roles/sysadm.te
> @@ -152,6 +152,10 @@ optional_policy(`
> ')
>
> optional_policy(`
> + hadoop_run(sysadm_t, sysadm_r)
> +')
> +
> +optional_policy(`
> # allow system administrator to use the ipsec script to look
> # at things (e.g., ipsec auto --status)
> # probably should create an ipsec_admin role for this kind of thing
> @@ -397,6 +401,10 @@ optional_policy(`
> yam_run(sysadm_t, sysadm_r)
> ')
>
> +optional_policy(`
> + zookeeper_run_client(sysadm_t, sysadm_r)
> +')
> +
> ifndef(`distro_redhat',`
> optional_policy(`
> auth_role(sysadm_r, sysadm_t)
> diff --git a/policy/modules/services/hadoop.fc b/policy/modules/services/hadoop.fc
> new file mode 100644
> index 0000000..5bdd554
> --- /dev/null
> +++ b/policy/modules/services/hadoop.fc
> @@ -0,0 +1,53 @@
> +/etc/hadoop.*(/.*)? gen_context(system_u:object_r:hadoop_etc_t,s0)
> +
> +/etc/rc\.d/init\.d/hadoop-(.*)?-datanode -- gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
> +/etc/rc\.d/init\.d/hadoop-(.*)?-jobtracker -- gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
> +/etc/rc\.d/init\.d/hadoop-(.*)?-namenode -- gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
> +/etc/rc\.d/init\.d/hadoop-(.*)?-secondarynamenode -- gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
> +/etc/rc\.d/init\.d/hadoop-(.*)?-tasktracker -- gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
> +/etc/rc\.d/init\.d/hadoop-zookeeper -- gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
> +/etc/init\.d/hadoop-datanode -- gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
> +/etc/init\.d/hadoop-jobtracker -- gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
> +/etc/init\.d/hadoop-namenode -- gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
> +/etc/init\.d/hadoop-secondarynamenode -- gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
> +/etc/init\.d/hadoop-tasktracker -- gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
> +/etc/init\.d/zookeeper -- gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
> +
> +/etc/zookeeper(/.*)? gen_context(system_u:object_r:zookeeper_etc_t,s0)
> +/etc/zookeeper\.dist(/.*)? gen_context(system_u:object_r:zookeeper_etc_t,s0)
> +
> +/usr/lib/hadoop(.*)?/bin/hadoop -- gen_context(system_u:object_r:hadoop_exec_t,s0)
> +
> +/usr/bin/zookeeper-client -- gen_context(system_u:object_r:zookeeper_exec_t,s0)
> +/usr/bin/zookeeper-server -- gen_context(system_u:object_r:zookeeper_server_exec_t,s0)
> +
> +/var/zookeeper(/.*)? gen_context(system_u:object_r:zookeeper_server_var_t,s0)
> +/var/lib/zookeeper(/.*)? gen_context(system_u:object_r:zookeeper_server_var_t,s0)
> +
> +/var/lib/hadoop(.*)? gen_context(system_u:object_r:hadoop_var_lib_t,s0)
> +/var/lib/hadoop(.*)?/cache/hadoop/dfs/data(/.*)? gen_context(system_u:object_r:hadoop_datanode_var_lib_t,s0)
> +/var/lib/hadoop(.*)?/cache/hadoop/dfs/name(/.*)? gen_context(system_u:object_r:hadoop_namenode_var_lib_t,s0)
> +/var/lib/hadoop(.*)?/cache/hadoop/dfs/namesecondary(/.*)? gen_context(system_u:object_r:hadoop_secondarynamenode_var_lib_t,s0)
> +/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/jobTracker(/.*)? gen_context(system_u:object_r:hadoop_jobtracker_var_lib_t,s0)
> +/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/taskTracker(/.*)? gen_context(system_u:object_r:hadoop_tasktracker_var_lib_t,s0)
> +
> +/var/lock/subsys/hadoop-datanode -- gen_context(system_u:object_r:hadoop_datanode_lock_t,s0)
> +/var/lock/subsys/hadoop-namenode -- gen_context(system_u:object_r:hadoop_namenode_lock_t,s0)
> +/var/lock/subsys/hadoop-jobtracker -- gen_context(system_u:object_r:hadoop_jobtracker_lock_t,s0)
> +/var/lock/subsys/hadoop-tasktracker -- gen_context(system_u:object_r:hadoop_tasktracker_lock_t,s0)
> +/var/lock/subsys/hadoop-secondarynamenode -- gen_context(system_u:object_r:hadoop_secondarynamenode_lock_t,s0)
> +
> +/var/log/hadoop(.*)? gen_context(system_u:object_r:hadoop_log_t,s0)
> +/var/log/hadoop(.*)?/hadoop-hadoop-datanode-(.*)? gen_context(system_u:object_r:hadoop_datanode_log_t,s0)
> +/var/log/hadoop(.*)?/hadoop-hadoop-jobtracker-(.*)? gen_context(system_u:object_r:hadoop_jobtracker_log_t,s0)
> +/var/log/hadoop(.*)?/hadoop-hadoop-namenode-(.*)? gen_context(system_u:object_r:hadoop_namenode_log_t,s0)
> +/var/log/hadoop(.*)?/hadoop-hadoop-secondarynamenode-(.*)? gen_context(system_u:object_r:hadoop_secondarynamenode_log_t,s0)
> +/var/log/hadoop(.*)?/hadoop-hadoop-tasktracker-(.*)? gen_context(system_u:object_r:hadoop_tasktracker_log_t,s0)
> +/var/log/zookeeper(/.*)? gen_context(system_u:object_r:zookeeper_log_t,s0)
> +
> +/var/run/hadoop(.*)? -d gen_context(system_u:object_r:hadoop_var_run_t,s0)
> +/var/run/hadoop(.*)?/hadoop-hadoop-datanode.pid -- gen_context(system_u:object_r:hadoop_datanode_initrc_var_run_t,s0)
> +/var/run/hadoop(.*)?/hadoop-hadoop-namenode.pid -- gen_context(system_u:object_r:hadoop_namenode_initrc_var_run_t,s0)
> +/var/run/hadoop(.*)?/hadoop-hadoop-jobtracker.pid -- gen_context(system_u:object_r:hadoop_jobtracker_initrc_var_run_t,s0)
> +/var/run/hadoop(.*)?/hadoop-hadoop-tasktracker.pid -- gen_context(system_u:object_r:hadoop_tasktracker_initrc_var_run_t,s0)
> +/var/run/hadoop(.*)?/hadoop-hadoop-secondarynamenode.pid -- gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_var_run_t,s0)
> diff --git a/policy/modules/services/hadoop.if b/policy/modules/services/hadoop.if
> new file mode 100644
> index 0000000..0e5bb28
> --- /dev/null
> +++ b/policy/modules/services/hadoop.if
> @@ -0,0 +1,336 @@
> +## <summary>Software for reliable, scalable, distributed computing.</summary>
> +
> +#######################################
> +## <summary>
> +## The template to define a hadoop domain.
> +## </summary>
> +## <param name="domain_prefix">
> +## <summary>
> +## Domain prefix to be used.
> +## </summary>
> +## </param>
> +#
> +template(`hadoop_domain_template',`
> + gen_require(`
> + attribute hadoop_domain;
> + type hadoop_log_t, hadoop_var_lib_t, hadoop_var_run_t;
> + type hadoop_exec_t;
> + ')
> +
> + ########################################
> + #
> + # Shared declarations.
> + #
> +
> + type hadoop_$1_t, hadoop_domain;
> + domain_type(hadoop_$1_t)
> + domain_entry_file(hadoop_$1_t, hadoop_exec_t)
> +
> + type hadoop_$1_initrc_t;
> + type hadoop_$1_initrc_exec_t;
> + init_script_domain(hadoop_$1_initrc_t, hadoop_$1_initrc_exec_t)
> +
> + role system_r types { hadoop_$1_initrc_t hadoop_$1_t };
> +
> + type hadoop_$1_lock_t;
> + files_lock_file(hadoop_$1_lock_t)
> + files_lock_filetrans(hadoop_$1_initrc_t, hadoop_$1_lock_t, file)
> +
> + type hadoop_$1_log_t;
> + logging_log_file(hadoop_$1_log_t)
> + filetrans_pattern(hadoop_$1_initrc_t, hadoop_log_t, hadoop_$1_log_t, {dir file})
> + filetrans_pattern(hadoop_$1_t, hadoop_log_t, hadoop_$1_log_t, {dir file})
> +
> + type hadoop_$1_var_lib_t;
> + files_type(hadoop_$1_var_lib_t)
> + type_transition hadoop_$1_t hadoop_var_lib_t:file hadoop_$1_var_lib_t;
> +
> + type hadoop_$1_initrc_var_run_t;
> + files_pid_file(hadoop_$1_initrc_var_run_t)
> + type_transition hadoop_$1_initrc_t hadoop_var_run_t:file hadoop_$1_initrc_var_run_t;
> +
> + type hadoop_$1_tmp_t;
> + files_tmp_file(hadoop_$1_tmp_t)
> + files_tmp_filetrans(hadoop_$1_t, hadoop_$1_tmp_t, file)
> +
> + ####################################
> + #
> + # Shared hadoop_$1 initrc policy.
> + #
> +
> + allow hadoop_$1_initrc_t hadoop_$1_t:process { signal signull };
> + allow hadoop_$1_initrc_t self:capability { setuid setgid };
> + allow hadoop_$1_initrc_t self:fifo_file { read write getattr ioctl };
> + allow hadoop_$1_initrc_t self:process setsched;
> +
> + consoletype_exec(hadoop_$1_initrc_t)
> + corecmd_exec_bin(hadoop_$1_initrc_t)
> + corecmd_exec_shell(hadoop_$1_initrc_t)
> +
> + domtrans_pattern(hadoop_$1_initrc_t, hadoop_exec_t, hadoop_$1_t)
> + dontaudit hadoop_$1_initrc_t self:capability sys_tty_config;
> +
> + files_read_etc_files(hadoop_$1_initrc_t)
> + files_read_usr_files(hadoop_$1_initrc_t)
> + files_manage_generic_tmp_dirs(hadoop_$1_initrc_t)
> + files_manage_generic_tmp_files(hadoop_$1_initrc_t)
> + fs_getattr_xattr_fs(hadoop_$1_initrc_t)
> +
> + hadoop_rx_etc(hadoop_$1_initrc_t)
> +
> + init_rw_utmp(hadoop_$1_initrc_t)
> + init_use_script_ptys(hadoop_$1_initrc_t)
> +
> + kernel_read_kernel_sysctls(hadoop_$1_initrc_t)
> + kernel_read_sysctl(hadoop_$1_initrc_t)
> + kernel_read_system_state(hadoop_$1_initrc_t)
> +
> + logging_send_syslog_msg(hadoop_$1_initrc_t)
> + logging_send_audit_msgs(hadoop_$1_initrc_t)
> + logging_search_logs(hadoop_$1_initrc_t)
> +
> + manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_lock_t, hadoop_$1_lock_t)
> + manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_initrc_var_run_t, hadoop_$1_initrc_var_run_t)
> + manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
> + manage_dirs_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_var_run_t)
> + manage_files_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_var_run_t)
> +
> + miscfiles_read_localization(hadoop_$1_initrc_t)
> +
> + optional_policy(`
> + nscd_socket_use(hadoop_$1_initrc_t)
> + ')
> +
> + term_use_generic_ptys(hadoop_$1_initrc_t)
> +
> + userdom_dontaudit_search_user_home_dirs(hadoop_$1_initrc_t)
> +
> + # This can be removed on anything post-el5
> + libs_use_ld_so(hadoop_$1_initrc_t)
> + libs_use_shared_libs(hadoop_$1_initrc_t)
> +
> + ####################################
> + #
> + # Shared hadoop_$1 policy.
> + #
> +
> + allow hadoop_$1_t hadoop_domain:process signull;
> + allow hadoop_$1_t self:fifo_file { read write getattr ioctl };
> + allow hadoop_$1_t self:process execmem;
> + allow hadoop_$1_t hadoop_var_run_t:dir getattr;
> +
> + corecmd_exec_bin(hadoop_$1_t)
> + corecmd_exec_shell(hadoop_$1_t)
> +
> + dev_read_rand(hadoop_$1_t)
> + dev_read_urand(hadoop_$1_t)
> + dev_read_sysfs(hadoop_$1_t)
> + dontaudit hadoop_$1_t self:netlink_route_socket rw_netlink_socket_perms;
> +
> + files_manage_generic_tmp_files(hadoop_$1_t)
> + files_manage_generic_tmp_dirs(hadoop_$1_t)
> + files_read_etc_files(hadoop_$1_t)
> + files_read_var_lib_files(hadoop_$1_t)
> + files_search_pids(hadoop_$1_t)
> +
> + hadoop_rx_etc(hadoop_$1_t)
> +
> + java_exec(hadoop_$1_t)
> +
> + kernel_read_network_state(hadoop_$1_t)
> + kernel_read_system_state(hadoop_$1_t)
> +
> + logging_search_logs(hadoop_$1_t)
> +
> + manage_dirs_pattern(hadoop_$1_t, hadoop_$1_var_lib_t, hadoop_$1_var_lib_t)
> + manage_files_pattern(hadoop_$1_t, hadoop_$1_log_t, hadoop_$1_log_t)
> + manage_files_pattern(hadoop_$1_t, hadoop_$1_var_lib_t, hadoop_$1_var_lib_t)
> + manage_files_pattern(hadoop_$1_t, hadoop_$1_tmp_t, hadoop_$1_tmp_t)
> + miscfiles_read_localization(hadoop_$1_t)
> +
> + optional_policy(`
> + nscd_socket_use(hadoop_$1_t)
> + ')
> +
> + sysnet_read_config(hadoop_$1_t)
> +
> + allow hadoop_$1_t self:tcp_socket create_stream_socket_perms;
> + corenet_all_recvfrom_unlabeled(hadoop_$1_t)
> + corenet_all_recvfrom_netlabel(hadoop_$1_t)
> + corenet_tcp_bind_all_nodes(hadoop_$1_t)
> + corenet_tcp_sendrecv_generic_if(hadoop_$1_t)
> + corenet_tcp_sendrecv_generic_node(hadoop_$1_t)
> + corenet_tcp_sendrecv_all_ports(hadoop_$1_t)
> + corenet_tcp_connect_generic_port(hadoop_$1_t)
> +
> + allow hadoop_$1_t self:udp_socket create_socket_perms;
> + corenet_udp_sendrecv_generic_if(hadoop_$1_t)
> + corenet_udp_sendrecv_all_nodes(hadoop_$1_t)
> + corenet_udp_bind_all_nodes(hadoop_$1_t)
> +
> + # This can be removed on anything post-el5
> + libs_use_ld_so(hadoop_$1_t)
> + libs_use_shared_libs(hadoop_$1_t)
> +')
> +
> +########################################
> +## <summary>
> +## Execute hadoop in the
> +## hadoop domain.
> +## </summary>
> +## <param name="domain">
> +## <summary>
> +## Domain allowed to transition.
> +## </summary>
> +## </param>
> +#
> +interface(`hadoop_domtrans',`
> + gen_require(`
> + type hadoop_t, hadoop_exec_t;
> + ')
> +
> + files_search_usr($1)
> + libs_search_lib($1)
> + domtrans_pattern($1, hadoop_exec_t, hadoop_t)
> +')
> +
> +########################################
> +## <summary>
> +## Execute hadoop in the hadoop domain,
> +## and allow the specified role the
> +## hadoop domain.
> +## </summary>
> +## <param name="domain">
> +## <summary>
> +## Domain allowed to transition.
> +## </summary>
> +## </param>
> +## <param name="role">
> +## <summary>
> +## Role allowed access.
> +## </summary>
> +## </param>
> +## <rolecap/>
> +#
> +interface(`hadoop_run',`
> + gen_require(`
> + type hadoop_t;
> + ')
> +
> + hadoop_domtrans($1)
> + role $2 types hadoop_t;
> +
> + allow $1 hadoop_t:process { ptrace signal_perms };
> + ps_process_pattern($1, hadoop_t)
> +')
> +
> +########################################
> +## <summary>
> +## Execute zookeeper client in the
> +## zookeeper client domain.
> +## </summary>
> +## <param name="domain">
> +## <summary>
> +## Domain allowed to transition.
> +## </summary>
> +## </param>
> +#
> +interface(`zookeeper_domtrans_client',`
> + gen_require(`
> + type zookeeper_t, zookeeper_exec_t;
> + ')
> +
> + corecmd_search_bin($1)
> + files_search_usr($1)
> + domtrans_pattern($1, zookeeper_exec_t, zookeeper_t)
> +')
> +
> +########################################
> +## <summary>
> +## Execute zookeeper server in the
> +## zookeeper server domain.
> +## </summary>
> +## <param name="domain">
> +## <summary>
> +## Domain allowed to transition.
> +## </summary>
> +## </param>
> +#
> +interface(`zookeeper_domtrans_server',`
> + gen_require(`
> + type zookeeper_server_t, zookeeper_server_exec_t;
> + ')
> +
> + corecmd_search_bin($1)
> + files_search_usr($1)
> + domtrans_pattern($1, zookeeper_server_exec_t, zookeeper_server_t)
> +')
> +
> +########################################
> +## <summary>
> +## Execute zookeeper server in the
> +## zookeeper domain.
> +## </summary>
> +## <param name="domain">
> +## <summary>
> +## Domain allowed to transition.
> +## </summary>
> +## </param>
> +#
> +interface(`zookeeper_initrc_domtrans_server',`
> + gen_require(`
> + type zookeeper_server_initrc_exec_t;
> + ')
> +
> + init_labeled_script_domtrans($1, zookeeper_server_initrc_exec_t)
> +')
> +
> +########################################
> +## <summary>
> +## Execute zookeeper client in the
> +## zookeeper client domain, and allow the
> +## specified role the zookeeper client domain.
> +## </summary>
> +## <param name="domain">
> +## <summary>
> +## Domain allowed to transition.
> +## </summary>
> +## </param>
> +## <param name="role">
> +## <summary>
> +## Role allowed access.
> +## </summary>
> +## </param>
> +## <rolecap/>
> +#
> +interface(`zookeeper_run_client',`
> + gen_require(`
> + type zookeeper_t;
> + ')
> +
> + zookeeper_domtrans_client($1)
> + role $2 types zookeeper_t;
> +
> + allow $1 zookeeper_t:process { ptrace signal_perms };
> + ps_process_pattern($1, zookeeper_t)
> +')
> +
> +########################################
> +## <summary>
> +## Give permission to a domain to access hadoop_etc_t
> +## </summary>
> +## <param name="domain">
> +## <summary>
> +## Domain needing read and execute permission
> +## </summary>
> +## </param>
> +#
> +interface(`hadoop_rx_etc', `
> + gen_require(`
> + type hadoop_etc_t;
> + ')
> +
> + allow $1 hadoop_etc_t:dir search_dir_perms;
> + allow $1 hadoop_etc_t:lnk_file { read getattr };
> + allow $1 hadoop_etc_t:file { read_file_perms execute execute_no_trans};
> +')
> diff --git a/policy/modules/services/hadoop.te b/policy/modules/services/hadoop.te
> new file mode 100644
> index 0000000..1a573ea
> --- /dev/null
> +++ b/policy/modules/services/hadoop.te
> @@ -0,0 +1,367 @@
> +policy_module(hadoop, 1.0.0)
> +
> +########################################
> +#
> +# Hadoop declarations.
> +#
> +
> +attribute hadoop_domain;
> +
> +type hadoop_t;
> +type hadoop_exec_t;
> +application_domain(hadoop_t, hadoop_exec_t)
> +ubac_constrained(hadoop_t)
> +
> +type hadoop_etc_t;
> +files_config_file(hadoop_etc_t)
> +
> +type hadoop_var_lib_t;
> +files_type(hadoop_var_lib_t)
> +
> +type hadoop_log_t;
> +logging_log_file(hadoop_log_t)
> +
> +type hadoop_var_run_t;
> +files_pid_file(hadoop_var_run_t)
> +
> +type hadoop_tmp_t;
> +files_tmp_file(hadoop_tmp_t)
> +ubac_constrained(hadoop_tmp_t)
> +
> +hadoop_domain_template(datanode)
> +hadoop_domain_template(jobtracker)
> +hadoop_domain_template(namenode)
> +hadoop_domain_template(secondarynamenode)
> +hadoop_domain_template(tasktracker)
> +
> +########################################
> +#
> +# Hadoop zookeeper client declarations.
> +#
> +
> +type zookeeper_t;
> +type zookeeper_exec_t;
> +application_domain(zookeeper_t, zookeeper_exec_t)
> +ubac_constrained(zookeeper_t)
> +
> +type zookeeper_etc_t;
> +files_config_file(zookeeper_etc_t)
> +
> +type zookeeper_log_t;
> +logging_log_file(zookeeper_log_t)
> +
> +type zookeeper_tmp_t;
> +files_tmp_file(zookeeper_tmp_t)
> +ubac_constrained(zookeeper_tmp_t)
> +
> +########################################
> +#
> +# Hadoop zookeeper server declarations.
> +#
> +
> +type zookeeper_server_t;
> +type zookeeper_server_exec_t;
> +init_daemon_domain(zookeeper_server_t, zookeeper_server_exec_t)
> +
> +type zookeeper_server_initrc_exec_t;
> +init_script_file(zookeeper_server_initrc_exec_t)
> +
> +type zookeeper_server_var_t;
> +files_type(zookeeper_server_var_t)
> +
> +# This will need a file context specification.
> +type zookeeper_server_var_run_t;
> +files_pid_file(zookeeper_server_var_run_t)
> +
> +type zookeeper_server_tmp_t;
> +files_tmp_file(zookeeper_server_tmp_t)
> +
> +########################################
> +#
> +# Hadoop policy.
> +#
> +
> +allow hadoop_t self:capability sys_resource;
> +allow hadoop_t self:process { getsched setsched signal signull setrlimit execmem };
> +allow hadoop_t self:fifo_file rw_fifo_file_perms;
> +allow hadoop_t self:key write;
> +allow hadoop_t self:tcp_socket create_stream_socket_perms;
> +allow hadoop_t self:udp_socket create_socket_perms;
> +allow hadoop_t hadoop_domain:process signull;
> +
> +dontaudit hadoop_t self:netlink_route_socket rw_netlink_socket_perms;
> +
> +read_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
> +read_lnk_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
> +can_exec(hadoop_t, hadoop_etc_t)
> +
> +manage_dirs_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +manage_dirs_pattern(hadoop_t, hadoop_log_t, hadoop_log_t)
> +manage_dirs_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
> +manage_files_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +manage_files_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
> +
> +getattr_dirs_pattern(hadoop_t, hadoop_var_run_t, hadoop_var_run_t)
> +
> +files_tmp_filetrans(hadoop_t, hadoop_tmp_t, file)
> +files_manage_generic_tmp_dirs(hadoop_t)
> +
> +kernel_read_network_state(hadoop_t)
> +kernel_read_system_state(hadoop_t)
> +
> +corecmd_exec_bin(hadoop_t)
> +corecmd_exec_shell(hadoop_t)
> +
> +corenet_all_recvfrom_unlabeled(hadoop_t)
> +corenet_all_recvfrom_netlabel(hadoop_t)
> +corenet_sendrecv_hadoop_namenode_client_packets(hadoop_t)
> +corenet_sendrecv_portmap_client_packets(hadoop_t)
> +corenet_sendrecv_zope_client_packets(hadoop_t)
> +corenet_tcp_bind_all_nodes(hadoop_t)
> +corenet_tcp_connect_hadoop_namenode_port(hadoop_t)
> +corenet_tcp_connect_portmap_port(hadoop_t)
> +corenet_tcp_connect_zope_port(hadoop_t)
> +corenet_tcp_sendrecv_all_nodes(hadoop_t)
> +corenet_tcp_sendrecv_all_ports(hadoop_t)
> +corenet_tcp_sendrecv_generic_if(hadoop_t)
> +corenet_tcp_connect_generic_port(hadoop_t)
> +corenet_udp_bind_all_nodes(hadoop_t)
> +corenet_udp_sendrecv_all_nodes(hadoop_t)
> +corenet_udp_sendrecv_all_ports(hadoop_t)
> +corenet_udp_sendrecv_generic_if(hadoop_t)
> +
> +dev_read_rand(hadoop_t)
> +dev_read_sysfs(hadoop_t)
> +dev_read_urand(hadoop_t)
> +
> +files_dontaudit_search_spool(hadoop_t)
> +files_read_usr_files(hadoop_t)
> +files_read_all_files(hadoop_t)
> +
> +fs_getattr_xattr_fs(hadoop_t)
> +
> +java_exec(hadoop_t)
> +
> +# This can be removed on anything post-el5
> +libs_use_ld_so(hadoop_t)
> +libs_use_shared_libs(hadoop_t)
> +
> +miscfiles_read_localization(hadoop_t)
> +
> +userdom_dontaudit_search_user_home_dirs(hadoop_t)
> +userdom_use_user_terminals(hadoop_t)
> +
> +optional_policy(`
> + nis_use_ypbind(hadoop_t)
> +')
> +
> +optional_policy(`
> + nscd_socket_use(hadoop_t)
> +')
> +
> +########################################
> +#
> +# Hadoop datanode policy.
> +#
> +
> +corenet_tcp_connect_hadoop_namenode_port(hadoop_datanode_t)
> +fs_getattr_xattr_fs(hadoop_datanode_t)
> +manage_dirs_pattern(hadoop_datanode_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +
> +########################################
> +#
> +# Hadoop jobtracker policy.
> +#
> +
> +corenet_tcp_bind_zope_port(hadoop_jobtracker_t)
> +corenet_tcp_connect_hadoop_namenode_port(hadoop_jobtracker_t)
> +create_dirs_pattern(hadoop_jobtracker_t, hadoop_jobtracker_log_t, hadoop_jobtracker_log_t)
> +manage_dirs_pattern(hadoop_jobtracker_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +
> +########################################
> +#
> +# Hadoop namenode policy.
> +#
> +
> +corenet_tcp_bind_hadoop_namenode_port(hadoop_namenode_t)
> +corenet_tcp_connect_hadoop_namenode_port(hadoop_namenode_t)
> +manage_dirs_pattern(hadoop_namenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +manage_files_pattern(hadoop_namenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +
> +########################################
> +#
> +# Hadoop secondary namenode policy.
> +#
> +
> +corenet_tcp_connect_hadoop_namenode_port(hadoop_secondarynamenode_t)
> +manage_dirs_pattern(hadoop_secondarynamenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +
> +########################################
> +#
> +# Hadoop tasktracker policy.
> +#
> +
> +allow hadoop_tasktracker_t self:process signal;
> +
> +corenet_tcp_connect_zope_port(hadoop_tasktracker_t)
> +corenet_tcp_connect_hadoop_namenode_port(hadoop_tasktracker_t)
> +
> +filetrans_pattern(hadoop_tasktracker_t, hadoop_log_t, hadoop_tasktracker_log_t, dir)
> +fs_getattr_xattr_fs(hadoop_tasktracker_t)
> +fs_associate(hadoop_tasktracker_t)
> +
> +manage_dirs_pattern(hadoop_tasktracker_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +manage_dirs_pattern(hadoop_tasktracker_t, hadoop_tasktracker_log_t, hadoop_tasktracker_log_t);
> +
> +########################################
> +#
> +# Hadoop zookeeper client policy.
> +#
> +
> +allow zookeeper_t self:process { getsched sigkill signal signull execmem };
> +allow zookeeper_t self:fifo_file rw_fifo_file_perms;
> +allow zookeeper_t self:tcp_socket create_stream_socket_perms;
> +allow zookeeper_t self:udp_socket create_socket_perms;
> +allow zookeeper_t zookeeper_server_t:process signull;
> +
> +read_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
> +read_lnk_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
> +
> +setattr_dirs_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
> +append_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
> +create_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
> +read_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
> +setattr_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
> +logging_log_filetrans(zookeeper_t, zookeeper_log_t, file)
> +
> +manage_files_pattern(zookeeper_t, zookeeper_tmp_t, zookeeper_tmp_t)
> +files_tmp_filetrans(zookeeper_t, zookeeper_tmp_t, file)
> +files_manage_generic_tmp_dirs(zookeeper_t)
> +
> +can_exec(zookeeper_t, zookeeper_exec_t)
> +
> +kernel_read_network_state(zookeeper_t)
> +kernel_read_system_state(zookeeper_t)
> +
> +corecmd_exec_bin(zookeeper_t)
> +corecmd_exec_shell(zookeeper_t)
> +
> +dontaudit zookeeper_t self:netlink_route_socket rw_netlink_socket_perms;
> +
> +corenet_all_recvfrom_unlabeled(zookeeper_t)
> +corenet_all_recvfrom_netlabel(zookeeper_t)
> +corenet_sendrecv_zookeeper_client_client_packets(zookeeper_t)
> +corenet_tcp_bind_all_nodes(zookeeper_t)
> +corenet_tcp_connect_zookeeper_client_port(zookeeper_t)
> +corenet_tcp_sendrecv_all_nodes(zookeeper_t)
> +corenet_tcp_sendrecv_all_ports(zookeeper_t)
> +corenet_tcp_sendrecv_generic_if(zookeeper_t)
> +corenet_tcp_connect_generic_port(zookeeper_t)
> +corenet_udp_bind_all_nodes(zookeeper_t)
> +corenet_udp_sendrecv_all_nodes(zookeeper_t)
> +corenet_udp_sendrecv_all_ports(zookeeper_t)
> +corenet_udp_sendrecv_generic_if(zookeeper_t)
> +
> +dev_read_rand(zookeeper_t)
> +dev_read_sysfs(zookeeper_t)
> +dev_read_urand(zookeeper_t)
> +
> +files_read_etc_files(zookeeper_t)
> +files_read_usr_files(zookeeper_t)
> +
> +# This can be removed on anything post-el5
> +libs_use_ld_so(zookeeper_t)
> +libs_use_shared_libs(zookeeper_t)
> +
> +miscfiles_read_localization(zookeeper_t)
> +
> +sysnet_read_config(zookeeper_t)
> +
> +userdom_dontaudit_search_user_home_dirs(zookeeper_t)
> +userdom_use_user_terminals(zookeeper_t)
> +
> +java_exec(zookeeper_t)
> +
> +optional_policy(`
> + nscd_socket_use(zookeeper_t)
> +')
> +
> +########################################
> +#
> +# Hadoop zookeeper server policy.
> +#
> +
> +allow zookeeper_server_t self:capability kill;
> +allow zookeeper_server_t self:process { execmem getsched sigkill signal signull };
> +allow zookeeper_server_t self:fifo_file rw_fifo_file_perms;
> +allow zookeeper_server_t self:netlink_route_socket rw_netlink_socket_perms;
> +allow zookeeper_server_t self:tcp_socket create_stream_socket_perms;
> +
> +read_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
> +read_lnk_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
> +
> +manage_dirs_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
> +manage_files_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
> +files_var_lib_filetrans(zookeeper_server_t, zookeeper_server_var_t, { dir file })
> +
> +setattr_dirs_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
> +append_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
> +create_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
> +read_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
> +setattr_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
> +logging_log_filetrans(zookeeper_server_t, zookeeper_log_t, file)
> +
> +manage_files_pattern(zookeeper_server_t, zookeeper_server_var_run_t, zookeeper_server_var_run_t)
> +files_pid_filetrans(zookeeper_server_t, zookeeper_server_var_run_t, file)
> +
> +manage_files_pattern(zookeeper_server_t, zookeeper_server_tmp_t, zookeeper_server_tmp_t)
> +files_tmp_filetrans(zookeeper_server_t, zookeeper_server_tmp_t, file)
> +files_manage_generic_tmp_files(zookeeper_server_t)
> +files_manage_generic_tmp_dirs(zookeeper_server_t)
> +
> +can_exec(zookeeper_server_t, zookeeper_server_exec_t)
> +
> +kernel_read_network_state(zookeeper_server_t)
> +kernel_read_system_state(zookeeper_server_t)
> +
> +corecmd_exec_bin(zookeeper_server_t)
> +corecmd_exec_shell(zookeeper_server_t)
> +
> +corenet_all_recvfrom_unlabeled(zookeeper_server_t)
> +corenet_all_recvfrom_netlabel(zookeeper_server_t)
> +corenet_sendrecv_zookeeper_election_client_packets(zookeeper_server_t)
> +corenet_sendrecv_zookeeper_leader_client_packets(zookeeper_server_t)
> +corenet_sendrecv_zookeeper_client_server_packets(zookeeper_server_t)
> +corenet_sendrecv_zookeeper_election_server_packets(zookeeper_server_t)
> +corenet_sendrecv_zookeeper_leader_server_packets(zookeeper_server_t)
> +corenet_tcp_bind_all_nodes(zookeeper_server_t)
> +corenet_tcp_bind_zookeeper_client_port(zookeeper_server_t)
> +corenet_tcp_bind_zookeeper_election_port(zookeeper_server_t)
> +corenet_tcp_bind_zookeeper_leader_port(zookeeper_server_t)
> +corenet_tcp_connect_zookeeper_election_port(zookeeper_server_t)
> +corenet_tcp_connect_zookeeper_leader_port(zookeeper_server_t)
> +corenet_tcp_sendrecv_generic_if(zookeeper_server_t)
> +corenet_tcp_sendrecv_generic_node(zookeeper_server_t)
> +corenet_tcp_sendrecv_all_ports(zookeeper_server_t)
> +corenet_tcp_connect_generic_port(zookeeper_server_t)
> +
> +dev_read_rand(zookeeper_server_t)
> +dev_read_sysfs(zookeeper_server_t)
> +dev_read_urand(zookeeper_server_t)
> +
> +files_read_etc_files(zookeeper_server_t)
> +files_read_usr_files(zookeeper_server_t)
> +
> +fs_getattr_xattr_fs(zookeeper_server_t)
> +
> +# This can be removed on anything post-el5
> +libs_use_ld_so(zookeeper_server_t)
> +libs_use_shared_libs(zookeeper_server_t)
> +
> +logging_send_syslog_msg(zookeeper_server_t)
> +
> +miscfiles_read_localization(zookeeper_server_t)
> +
> +sysnet_read_config(zookeeper_server_t)
> +
> +java_exec(zookeeper_server_t)
> diff --git a/policy/modules/system/unconfined.te b/policy/modules/system/unconfined.te
> index f976344..ac27264 100644
> --- a/policy/modules/system/unconfined.te
> +++ b/policy/modules/system/unconfined.te
> @@ -118,6 +118,10 @@ optional_policy(`
> ')
>
> optional_policy(`
> + hadoop_run(unconfined_t, unconfined_r)
> +')
> +
> +optional_policy(`
> inn_domtrans(unconfined_t)
> ')
>
> @@ -210,6 +214,10 @@ optional_policy(`
> xserver_domtrans(unconfined_t)
> ')
>
> +optional_policy(`
> + zookeeper_run_client(unconfined_t, unconfined_r)
> +')
> +
> ########################################
> #
> # Unconfined Execmem Local policy
> _______________________________________________
> refpolicy mailing list
> refpolicy at oss.tresys.com
> http://oss.tresys.com/mailman/listinfo/refpolicy
-------------- next part --------------
A non-text attachment was scrubbed...
Name: not available
Type: application/pgp-signature
Size: 198 bytes
Desc: not available
Url : http://oss.tresys.com/pipermail/refpolicy/attachments/20101001/232fbbe6/attachment-0001.bin
On 10/01/2010 08:02 AM, Dominick Grift wrote:
> On Thu, Sep 30, 2010 at 03:39:40PM -0400, Paul Nuzzi wrote:
>> I updated the patch based on recommendations from the mailing list.
>> All of hadoop's services are included in one module instead of
>> individual ones. Unconfined and sysadm roles are given access to
>> hadoop and zookeeper client domain transitions. The services are started
>> using run_init. Let me know what you think.
>
> Why do some hadoop domain need to manage generic tmp?
>
> files_manage_generic_tmp_dirs(zookeeper_t)
> files_manage_generic_tmp_dirs(hadoop_t)
> files_manage_generic_tmp_dirs(hadoop_$1_initrc_t)
> files_manage_generic_tmp_files(hadoop_$1_initrc_t)
> files_manage_generic_tmp_files(hadoop_$1_t)
> files_manage_generic_tmp_dirs(hadoop_$1_t)
This has to be done for Java JMX to work. All of the files are written to
/tmp/hsperfdata_(hadoop/zookeeper). /tmp/hsperfdata_ is labeled tmp_t while
all the files for each service are labeled with hadoop_*_tmp_t. The first service
will end up owning the directory if it is not labeled tmp_t.
> You probably need:
>
> files_search_pids() and files_search_locks() for hadoop_$1_initrc_t
> becuase it needs to traverse /var/run and /var/lock/subsys to be able to manage its objects there.
> Can use rw_fifo_file_perms here:
>
> allow hadoop_$1_initrc_t self:fifo_file { read write getattr ioctl };
>
> Might want to split this into hadoop_read_config_files and hadoop_exec_config_files.
>
> hadoop_rx_etc(hadoop_$1_initrc_t)
>
> This seems wrong. Why does it need that? use files_search_var_lib() if possible:
>
> files_read_var_lib_files(hadoop_$1_t)
>
> This is not a declaration and might want to use filetrans_pattern() instead:
>
> type_transition hadoop_$1_initrc_t hadoop_var_run_t:file hadoop_$1_initrc_var_run_t;
Changed. Thanks for the comments.
> Other then the above, there are some style issues:
>
> http://oss.tresys.com/projects/refpolicy/wiki/StyleGuide
>
> But i can help clean that up once above issues are resolved.
>
Is there a style checking script for refpolicy patches similar to the Linux kernel?
Signed-off-by: Paul Nuzzi <[email protected]>
---
policy/modules/kernel/corenetwork.te.in | 4
policy/modules/roles/sysadm.te | 8
policy/modules/services/hadoop.fc | 53 ++++
policy/modules/services/hadoop.if | 360 ++++++++++++++++++++++++++++++++
policy/modules/services/hadoop.te | 360 ++++++++++++++++++++++++++++++++
policy/modules/system/unconfined.te | 8
6 files changed, 793 insertions(+)
diff --git a/policy/modules/kernel/corenetwork.te.in b/policy/modules/kernel/corenetwork.te.in
index 2ecdde8..7a1b5de 100644
--- a/policy/modules/kernel/corenetwork.te.in
+++ b/policy/modules/kernel/corenetwork.te.in
@@ -105,6 +105,7 @@ network_port(giftd, tcp,1213,s0)
network_port(git, tcp,9418,s0, udp,9418,s0)
network_port(gopher, tcp,70,s0, udp,70,s0)
network_port(gpsd, tcp,2947,s0)
+network_port(hadoop_namenode, tcp, 8020,s0)
network_port(hddtemp, tcp,7634,s0)
network_port(howl, tcp,5335,s0, udp,5353,s0)
network_port(hplip, tcp,1782,s0, tcp,2207,s0, tcp,2208,s0, tcp, 8290,s0, tcp,50000,s0, tcp,50002,s0, tcp,8292,s0, tcp,9100,s0, tcp,9101,s0, tcp,9102,s0, tcp,9220,s0, tcp,9221,s0, tcp,9222,s0, tcp,9280,s0, tcp,9281,s0, tcp,9282,s0, tcp,9290,s0, tcp,9291,s0, tcp,9292,s0)
@@ -211,6 +212,9 @@ network_port(xdmcp, udp,177,s0, tcp,177,s0)
network_port(xen, tcp,8002,s0)
network_port(xfs, tcp,7100,s0)
network_port(xserver, tcp,6000-6020,s0)
+network_port(zookeeper_client, tcp, 2181,s0)
+network_port(zookeeper_election, tcp, 3888,s0)
+network_port(zookeeper_leader, tcp, 2888,s0)
network_port(zebra, tcp,2600-2604,s0, tcp,2606,s0, udp,2600-2604,s0, udp,2606,s0)
network_port(zope, tcp,8021,s0)
diff --git a/policy/modules/roles/sysadm.te b/policy/modules/roles/sysadm.te
index cad05ff..b46b28b 100644
--- a/policy/modules/roles/sysadm.te
+++ b/policy/modules/roles/sysadm.te
@@ -152,6 +152,10 @@ optional_policy(`
')
optional_policy(`
+ hadoop_run(sysadm_t, sysadm_r)
+')
+
+optional_policy(`
# allow system administrator to use the ipsec script to look
# at things (e.g., ipsec auto --status)
# probably should create an ipsec_admin role for this kind of thing
@@ -392,6 +396,10 @@ optional_policy(`
yam_run(sysadm_t, sysadm_r)
')
+optional_policy(`
+ zookeeper_run_client(sysadm_t, sysadm_r)
+')
+
ifndef(`distro_redhat',`
optional_policy(`
auth_role(sysadm_r, sysadm_t)
diff --git a/policy/modules/services/hadoop.fc b/policy/modules/services/hadoop.fc
new file mode 100644
index 0000000..5bdd554
--- /dev/null
+++ b/policy/modules/services/hadoop.fc
@@ -0,0 +1,53 @@
+/etc/hadoop.*(/.*)? gen_context(system_u:object_r:hadoop_etc_t,s0)
+
+/etc/rc\.d/init\.d/hadoop-(.*)?-datanode -- gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-jobtracker -- gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-namenode -- gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-secondarynamenode -- gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-tasktracker -- gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-zookeeper -- gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
+/etc/init\.d/hadoop-datanode -- gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
+/etc/init\.d/hadoop-jobtracker -- gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
+/etc/init\.d/hadoop-namenode -- gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
+/etc/init\.d/hadoop-secondarynamenode -- gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
+/etc/init\.d/hadoop-tasktracker -- gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
+/etc/init\.d/zookeeper -- gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
+
+/etc/zookeeper(/.*)? gen_context(system_u:object_r:zookeeper_etc_t,s0)
+/etc/zookeeper\.dist(/.*)? gen_context(system_u:object_r:zookeeper_etc_t,s0)
+
+/usr/lib/hadoop(.*)?/bin/hadoop -- gen_context(system_u:object_r:hadoop_exec_t,s0)
+
+/usr/bin/zookeeper-client -- gen_context(system_u:object_r:zookeeper_exec_t,s0)
+/usr/bin/zookeeper-server -- gen_context(system_u:object_r:zookeeper_server_exec_t,s0)
+
+/var/zookeeper(/.*)? gen_context(system_u:object_r:zookeeper_server_var_t,s0)
+/var/lib/zookeeper(/.*)? gen_context(system_u:object_r:zookeeper_server_var_t,s0)
+
+/var/lib/hadoop(.*)? gen_context(system_u:object_r:hadoop_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/data(/.*)? gen_context(system_u:object_r:hadoop_datanode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/name(/.*)? gen_context(system_u:object_r:hadoop_namenode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/namesecondary(/.*)? gen_context(system_u:object_r:hadoop_secondarynamenode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/jobTracker(/.*)? gen_context(system_u:object_r:hadoop_jobtracker_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/taskTracker(/.*)? gen_context(system_u:object_r:hadoop_tasktracker_var_lib_t,s0)
+
+/var/lock/subsys/hadoop-datanode -- gen_context(system_u:object_r:hadoop_datanode_lock_t,s0)
+/var/lock/subsys/hadoop-namenode -- gen_context(system_u:object_r:hadoop_namenode_lock_t,s0)
+/var/lock/subsys/hadoop-jobtracker -- gen_context(system_u:object_r:hadoop_jobtracker_lock_t,s0)
+/var/lock/subsys/hadoop-tasktracker -- gen_context(system_u:object_r:hadoop_tasktracker_lock_t,s0)
+/var/lock/subsys/hadoop-secondarynamenode -- gen_context(system_u:object_r:hadoop_secondarynamenode_lock_t,s0)
+
+/var/log/hadoop(.*)? gen_context(system_u:object_r:hadoop_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-datanode-(.*)? gen_context(system_u:object_r:hadoop_datanode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-jobtracker-(.*)? gen_context(system_u:object_r:hadoop_jobtracker_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-namenode-(.*)? gen_context(system_u:object_r:hadoop_namenode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-secondarynamenode-(.*)? gen_context(system_u:object_r:hadoop_secondarynamenode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-tasktracker-(.*)? gen_context(system_u:object_r:hadoop_tasktracker_log_t,s0)
+/var/log/zookeeper(/.*)? gen_context(system_u:object_r:zookeeper_log_t,s0)
+
+/var/run/hadoop(.*)? -d gen_context(system_u:object_r:hadoop_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-datanode.pid -- gen_context(system_u:object_r:hadoop_datanode_initrc_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-namenode.pid -- gen_context(system_u:object_r:hadoop_namenode_initrc_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-jobtracker.pid -- gen_context(system_u:object_r:hadoop_jobtracker_initrc_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-tasktracker.pid -- gen_context(system_u:object_r:hadoop_tasktracker_initrc_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-secondarynamenode.pid -- gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_var_run_t,s0)
diff --git a/policy/modules/services/hadoop.if b/policy/modules/services/hadoop.if
new file mode 100644
index 0000000..051e68c
--- /dev/null
+++ b/policy/modules/services/hadoop.if
@@ -0,0 +1,360 @@
+## <summary>Software for reliable, scalable, distributed computing.</summary>
+
+#######################################
+## <summary>
+## The template to define a hadoop domain.
+## </summary>
+## <param name="domain_prefix">
+## <summary>
+## Domain prefix to be used.
+## </summary>
+## </param>
+#
+template(`hadoop_domain_template',`
+ gen_require(`
+ attribute hadoop_domain;
+ type hadoop_log_t, hadoop_var_lib_t, hadoop_var_run_t;
+ type hadoop_exec_t;
+ ')
+
+ ########################################
+ #
+ # Shared declarations.
+ #
+
+ type hadoop_$1_t, hadoop_domain;
+ domain_type(hadoop_$1_t)
+ domain_entry_file(hadoop_$1_t, hadoop_exec_t)
+
+ type hadoop_$1_initrc_t;
+ type hadoop_$1_initrc_exec_t;
+ init_script_domain(hadoop_$1_initrc_t, hadoop_$1_initrc_exec_t)
+
+ role system_r types { hadoop_$1_initrc_t hadoop_$1_t };
+
+ type hadoop_$1_lock_t;
+ files_lock_file(hadoop_$1_lock_t)
+ files_lock_filetrans(hadoop_$1_initrc_t, hadoop_$1_lock_t, file)
+
+ type hadoop_$1_log_t;
+ logging_log_file(hadoop_$1_log_t)
+ filetrans_pattern(hadoop_$1_initrc_t, hadoop_log_t, hadoop_$1_log_t, {dir file})
+ filetrans_pattern(hadoop_$1_t, hadoop_log_t, hadoop_$1_log_t, {dir file})
+
+ type hadoop_$1_var_lib_t;
+ files_type(hadoop_$1_var_lib_t)
+ filetrans_pattern(hadoop_$1_t, hadoop_var_lib_t, hadoop_$1_var_lib_t, file)
+
+ type hadoop_$1_initrc_var_run_t;
+ files_pid_file(hadoop_$1_initrc_var_run_t)
+ filetrans_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_$1_initrc_var_run_t, file)
+
+ type hadoop_$1_tmp_t;
+ files_tmp_file(hadoop_$1_tmp_t)
+ files_tmp_filetrans(hadoop_$1_t, hadoop_$1_tmp_t, file)
+
+ ####################################
+ #
+ # Shared hadoop_$1 initrc policy.
+ #
+
+ allow hadoop_$1_initrc_t hadoop_$1_t:process { signal signull };
+ allow hadoop_$1_initrc_t self:capability { setuid setgid };
+ allow hadoop_$1_initrc_t self:fifo_file rw_fifo_file_perms;
+ allow hadoop_$1_initrc_t self:process setsched;
+
+ consoletype_exec(hadoop_$1_initrc_t)
+ corecmd_exec_bin(hadoop_$1_initrc_t)
+ corecmd_exec_shell(hadoop_$1_initrc_t)
+
+ domtrans_pattern(hadoop_$1_initrc_t, hadoop_exec_t, hadoop_$1_t)
+ dontaudit hadoop_$1_initrc_t self:capability sys_tty_config;
+
+ files_read_etc_files(hadoop_$1_initrc_t)
+ files_read_usr_files(hadoop_$1_initrc_t)
+ files_manage_generic_tmp_dirs(hadoop_$1_initrc_t)
+ files_manage_generic_tmp_files(hadoop_$1_initrc_t)
+ files_search_pids(hadoop_$1_initrc_t)
+ files_search_locks(hadoop_$1_initrc_t)
+ fs_getattr_xattr_fs(hadoop_$1_initrc_t)
+
+ hadoop_exec_config_files(hadoop_$1_initrc_t)
+
+ init_rw_utmp(hadoop_$1_initrc_t)
+ init_use_script_ptys(hadoop_$1_initrc_t)
+
+ kernel_read_kernel_sysctls(hadoop_$1_initrc_t)
+ kernel_read_sysctl(hadoop_$1_initrc_t)
+ kernel_read_system_state(hadoop_$1_initrc_t)
+
+ logging_send_syslog_msg(hadoop_$1_initrc_t)
+ logging_send_audit_msgs(hadoop_$1_initrc_t)
+ logging_search_logs(hadoop_$1_initrc_t)
+
+ manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_lock_t, hadoop_$1_lock_t)
+ manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_initrc_var_run_t, hadoop_$1_initrc_var_run_t)
+ manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
+ manage_dirs_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_var_run_t)
+ manage_files_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_var_run_t)
+
+ miscfiles_read_localization(hadoop_$1_initrc_t)
+
+ optional_policy(`
+ nscd_socket_use(hadoop_$1_initrc_t)
+ ')
+
+ term_use_generic_ptys(hadoop_$1_initrc_t)
+
+ userdom_dontaudit_search_user_home_dirs(hadoop_$1_initrc_t)
+
+ # This can be removed on anything post-el5
+ libs_use_ld_so(hadoop_$1_initrc_t)
+ libs_use_shared_libs(hadoop_$1_initrc_t)
+
+ ####################################
+ #
+ # Shared hadoop_$1 policy.
+ #
+
+ allow hadoop_$1_t hadoop_domain:process signull;
+ allow hadoop_$1_t self:fifo_file rw_fifo_file_perms;
+ allow hadoop_$1_t self:process execmem;
+ allow hadoop_$1_t hadoop_var_run_t:dir getattr;
+
+ corecmd_exec_bin(hadoop_$1_t)
+ corecmd_exec_shell(hadoop_$1_t)
+
+ dev_read_rand(hadoop_$1_t)
+ dev_read_urand(hadoop_$1_t)
+ dev_read_sysfs(hadoop_$1_t)
+ dontaudit hadoop_$1_t self:netlink_route_socket rw_netlink_socket_perms;
+
+ files_manage_generic_tmp_files(hadoop_$1_t)
+ files_manage_generic_tmp_dirs(hadoop_$1_t)
+ files_read_etc_files(hadoop_$1_t)
+ files_search_pids(hadoop_$1_t)
+ files_search_var_lib(hadoop_$1_t)
+
+ hadoop_exec_config_files(hadoop_$1_t)
+
+ java_exec(hadoop_$1_t)
+
+ kernel_read_network_state(hadoop_$1_t)
+ kernel_read_system_state(hadoop_$1_t)
+
+ logging_search_logs(hadoop_$1_t)
+
+ manage_dirs_pattern(hadoop_$1_t, hadoop_$1_var_lib_t, hadoop_$1_var_lib_t)
+ manage_files_pattern(hadoop_$1_t, hadoop_$1_log_t, hadoop_$1_log_t)
+ manage_files_pattern(hadoop_$1_t, hadoop_$1_var_lib_t, hadoop_$1_var_lib_t)
+ manage_files_pattern(hadoop_$1_t, hadoop_$1_tmp_t, hadoop_$1_tmp_t)
+ miscfiles_read_localization(hadoop_$1_t)
+
+ optional_policy(`
+ nscd_socket_use(hadoop_$1_t)
+ ')
+
+ sysnet_read_config(hadoop_$1_t)
+
+ allow hadoop_$1_t self:tcp_socket create_stream_socket_perms;
+ corenet_all_recvfrom_unlabeled(hadoop_$1_t)
+ corenet_all_recvfrom_netlabel(hadoop_$1_t)
+ corenet_tcp_bind_all_nodes(hadoop_$1_t)
+ corenet_tcp_sendrecv_generic_if(hadoop_$1_t)
+ corenet_tcp_sendrecv_generic_node(hadoop_$1_t)
+ corenet_tcp_sendrecv_all_ports(hadoop_$1_t)
+ corenet_tcp_connect_generic_port(hadoop_$1_t)
+
+ allow hadoop_$1_t self:udp_socket create_socket_perms;
+ corenet_udp_sendrecv_generic_if(hadoop_$1_t)
+ corenet_udp_sendrecv_all_nodes(hadoop_$1_t)
+ corenet_udp_bind_all_nodes(hadoop_$1_t)
+
+ # This can be removed on anything post-el5
+ libs_use_ld_so(hadoop_$1_t)
+ libs_use_shared_libs(hadoop_$1_t)
+')
+
+########################################
+## <summary>
+## Execute hadoop in the
+## hadoop domain.
+## </summary>
+## <param name="domain">
+## <summary>
+## Domain allowed to transition.
+## </summary>
+## </param>
+#
+interface(`hadoop_domtrans',`
+ gen_require(`
+ type hadoop_t, hadoop_exec_t;
+ ')
+
+ files_search_usr($1)
+ libs_search_lib($1)
+ domtrans_pattern($1, hadoop_exec_t, hadoop_t)
+')
+
+########################################
+## <summary>
+## Execute hadoop in the hadoop domain,
+## and allow the specified role the
+## hadoop domain.
+## </summary>
+## <param name="domain">
+## <summary>
+## Domain allowed to transition.
+## </summary>
+## </param>
+## <param name="role">
+## <summary>
+## Role allowed access.
+## </summary>
+## </param>
+## <rolecap/>
+#
+interface(`hadoop_run',`
+ gen_require(`
+ type hadoop_t;
+ ')
+
+ hadoop_domtrans($1)
+ role $2 types hadoop_t;
+
+ allow $1 hadoop_t:process { ptrace signal_perms };
+ ps_process_pattern($1, hadoop_t)
+')
+
+########################################
+## <summary>
+## Execute zookeeper client in the
+## zookeeper client domain.
+## </summary>
+## <param name="domain">
+## <summary>
+## Domain allowed to transition.
+## </summary>
+## </param>
+#
+interface(`zookeeper_domtrans_client',`
+ gen_require(`
+ type zookeeper_t, zookeeper_exec_t;
+ ')
+
+ corecmd_search_bin($1)
+ files_search_usr($1)
+ domtrans_pattern($1, zookeeper_exec_t, zookeeper_t)
+')
+
+########################################
+## <summary>
+## Execute zookeeper server in the
+## zookeeper server domain.
+## </summary>
+## <param name="domain">
+## <summary>
+## Domain allowed to transition.
+## </summary>
+## </param>
+#
+interface(`zookeeper_domtrans_server',`
+ gen_require(`
+ type zookeeper_server_t, zookeeper_server_exec_t;
+ ')
+
+ corecmd_search_bin($1)
+ files_search_usr($1)
+ domtrans_pattern($1, zookeeper_server_exec_t, zookeeper_server_t)
+')
+
+########################################
+## <summary>
+## Execute zookeeper server in the
+## zookeeper domain.
+## </summary>
+## <param name="domain">
+## <summary>
+## Domain allowed to transition.
+## </summary>
+## </param>
+#
+interface(`zookeeper_initrc_domtrans_server',`
+ gen_require(`
+ type zookeeper_server_initrc_exec_t;
+ ')
+
+ init_labeled_script_domtrans($1, zookeeper_server_initrc_exec_t)
+')
+
+########################################
+## <summary>
+## Execute zookeeper client in the
+## zookeeper client domain, and allow the
+## specified role the zookeeper client domain.
+## </summary>
+## <param name="domain">
+## <summary>
+## Domain allowed to transition.
+## </summary>
+## </param>
+## <param name="role">
+## <summary>
+## Role allowed access.
+## </summary>
+## </param>
+## <rolecap/>
+#
+interface(`zookeeper_run_client',`
+ gen_require(`
+ type zookeeper_t;
+ ')
+
+ zookeeper_domtrans_client($1)
+ role $2 types zookeeper_t;
+
+ allow $1 zookeeper_t:process { ptrace signal_perms };
+ ps_process_pattern($1, zookeeper_t)
+')
+
+########################################
+## <summary>
+## Give permission to a domain to read
+## hadoop_etc_t
+## </summary>
+## <param name="domain">
+## <summary>
+## Domain needing read permission
+## </summary>
+## </param>
+#
+interface(`hadoop_read_config_files', `
+ gen_require(`
+ type hadoop_etc_t;
+ ')
+
+ allow $1 hadoop_etc_t:dir search_dir_perms;
+ allow $1 hadoop_etc_t:lnk_file { read getattr };
+ allow $1 hadoop_etc_t:file read_file_perms;
+')
+
+########################################
+## <summary>
+## Give permission to a domain to
+## execute hadoop_etc_t
+## </summary>
+## <param name="domain">
+## <summary>
+## Domain needing read and execute
+## permission
+## </summary>
+## </param>
+#
+interface(`hadoop_exec_config_files', `
+ gen_require(`
+ type hadoop_etc_t;
+ ')
+
+ hadoop_read_config_files($1)
+ allow $1 hadoop_etc_t:file { execute execute_no_trans};
+')
diff --git a/policy/modules/services/hadoop.te b/policy/modules/services/hadoop.te
new file mode 100644
index 0000000..6a66962
--- /dev/null
+++ b/policy/modules/services/hadoop.te
@@ -0,0 +1,360 @@
+policy_module(hadoop, 1.0.0)
+
+########################################
+#
+# Hadoop declarations.
+#
+
+attribute hadoop_domain;
+
+type hadoop_t;
+type hadoop_exec_t;
+application_domain(hadoop_t, hadoop_exec_t)
+ubac_constrained(hadoop_t)
+
+type hadoop_etc_t;
+files_config_file(hadoop_etc_t)
+
+type hadoop_var_lib_t;
+files_type(hadoop_var_lib_t)
+
+type hadoop_log_t;
+logging_log_file(hadoop_log_t)
+
+type hadoop_var_run_t;
+files_pid_file(hadoop_var_run_t)
+
+type hadoop_tmp_t;
+files_tmp_file(hadoop_tmp_t)
+ubac_constrained(hadoop_tmp_t)
+
+hadoop_domain_template(datanode)
+hadoop_domain_template(jobtracker)
+hadoop_domain_template(namenode)
+hadoop_domain_template(secondarynamenode)
+hadoop_domain_template(tasktracker)
+
+########################################
+#
+# Hadoop zookeeper client declarations.
+#
+
+type zookeeper_t;
+type zookeeper_exec_t;
+application_domain(zookeeper_t, zookeeper_exec_t)
+ubac_constrained(zookeeper_t)
+
+type zookeeper_etc_t;
+files_config_file(zookeeper_etc_t)
+
+type zookeeper_log_t;
+logging_log_file(zookeeper_log_t)
+
+type zookeeper_tmp_t;
+files_tmp_file(zookeeper_tmp_t)
+ubac_constrained(zookeeper_tmp_t)
+
+########################################
+#
+# Hadoop zookeeper server declarations.
+#
+
+type zookeeper_server_t;
+type zookeeper_server_exec_t;
+init_daemon_domain(zookeeper_server_t, zookeeper_server_exec_t)
+
+type zookeeper_server_initrc_exec_t;
+init_script_file(zookeeper_server_initrc_exec_t)
+
+type zookeeper_server_var_t;
+files_type(zookeeper_server_var_t)
+
+# This will need a file context specification.
+type zookeeper_server_var_run_t;
+files_pid_file(zookeeper_server_var_run_t)
+
+type zookeeper_server_tmp_t;
+files_tmp_file(zookeeper_server_tmp_t)
+
+########################################
+#
+# Hadoop policy.
+#
+
+allow hadoop_t self:capability sys_resource;
+allow hadoop_t self:process { getsched setsched signal signull setrlimit execmem };
+allow hadoop_t self:fifo_file rw_fifo_file_perms;
+allow hadoop_t self:key write;
+allow hadoop_t self:tcp_socket create_stream_socket_perms;
+allow hadoop_t self:udp_socket create_socket_perms;
+allow hadoop_t hadoop_domain:process signull;
+
+dontaudit hadoop_t self:netlink_route_socket rw_netlink_socket_perms;
+
+read_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
+read_lnk_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
+can_exec(hadoop_t, hadoop_etc_t)
+
+manage_dirs_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
+manage_dirs_pattern(hadoop_t, hadoop_log_t, hadoop_log_t)
+manage_dirs_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
+manage_files_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
+manage_files_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
+
+getattr_dirs_pattern(hadoop_t, hadoop_var_run_t, hadoop_var_run_t)
+
+files_tmp_filetrans(hadoop_t, hadoop_tmp_t, file)
+files_manage_generic_tmp_dirs(hadoop_t)
+
+kernel_read_network_state(hadoop_t)
+kernel_read_system_state(hadoop_t)
+
+corecmd_exec_bin(hadoop_t)
+corecmd_exec_shell(hadoop_t)
+
+corenet_all_recvfrom_unlabeled(hadoop_t)
+corenet_all_recvfrom_netlabel(hadoop_t)
+corenet_sendrecv_portmap_client_packets(hadoop_t)
+corenet_sendrecv_zope_client_packets(hadoop_t)
+corenet_tcp_bind_all_nodes(hadoop_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_t)
+corenet_tcp_connect_portmap_port(hadoop_t)
+corenet_tcp_connect_zope_port(hadoop_t)
+corenet_tcp_sendrecv_all_nodes(hadoop_t)
+corenet_tcp_sendrecv_all_ports(hadoop_t)
+corenet_tcp_sendrecv_generic_if(hadoop_t)
+corenet_tcp_connect_generic_port(hadoop_t)
+corenet_udp_bind_all_nodes(hadoop_t)
+corenet_udp_sendrecv_all_nodes(hadoop_t)
+corenet_udp_sendrecv_all_ports(hadoop_t)
+corenet_udp_sendrecv_generic_if(hadoop_t)
+
+dev_read_rand(hadoop_t)
+dev_read_sysfs(hadoop_t)
+dev_read_urand(hadoop_t)
+
+files_dontaudit_search_spool(hadoop_t)
+files_read_usr_files(hadoop_t)
+files_read_all_files(hadoop_t)
+
+fs_getattr_xattr_fs(hadoop_t)
+
+java_exec(hadoop_t)
+
+# This can be removed on anything post-el5
+libs_use_ld_so(hadoop_t)
+libs_use_shared_libs(hadoop_t)
+
+miscfiles_read_localization(hadoop_t)
+
+userdom_dontaudit_search_user_home_dirs(hadoop_t)
+userdom_use_user_terminals(hadoop_t)
+
+optional_policy(`
+ nis_use_ypbind(hadoop_t)
+')
+
+optional_policy(`
+ nscd_socket_use(hadoop_t)
+')
+
+########################################
+#
+# Hadoop datanode policy.
+#
+
+corenet_tcp_connect_hadoop_namenode_port(hadoop_datanode_t)
+fs_getattr_xattr_fs(hadoop_datanode_t)
+manage_dirs_pattern(hadoop_datanode_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+########################################
+#
+# Hadoop jobtracker policy.
+#
+
+corenet_tcp_bind_zope_port(hadoop_jobtracker_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_jobtracker_t)
+create_dirs_pattern(hadoop_jobtracker_t, hadoop_jobtracker_log_t, hadoop_jobtracker_log_t)
+manage_dirs_pattern(hadoop_jobtracker_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+########################################
+#
+# Hadoop namenode policy.
+#
+
+corenet_tcp_bind_hadoop_namenode_port(hadoop_namenode_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_namenode_t)
+manage_dirs_pattern(hadoop_namenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
+manage_files_pattern(hadoop_namenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+########################################
+#
+# Hadoop secondary namenode policy.
+#
+
+corenet_tcp_connect_hadoop_namenode_port(hadoop_secondarynamenode_t)
+manage_dirs_pattern(hadoop_secondarynamenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+########################################
+#
+# Hadoop tasktracker policy.
+#
+
+allow hadoop_tasktracker_t self:process signal;
+
+corenet_tcp_connect_zope_port(hadoop_tasktracker_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_tasktracker_t)
+
+filetrans_pattern(hadoop_tasktracker_t, hadoop_log_t, hadoop_tasktracker_log_t, dir)
+fs_getattr_xattr_fs(hadoop_tasktracker_t)
+fs_associate(hadoop_tasktracker_t)
+
+manage_dirs_pattern(hadoop_tasktracker_t, hadoop_var_lib_t, hadoop_var_lib_t)
+manage_dirs_pattern(hadoop_tasktracker_t, hadoop_tasktracker_log_t, hadoop_tasktracker_log_t);
+
+########################################
+#
+# Hadoop zookeeper client policy.
+#
+
+allow zookeeper_t self:process { getsched sigkill signal signull execmem };
+allow zookeeper_t self:fifo_file rw_fifo_file_perms;
+allow zookeeper_t self:tcp_socket create_stream_socket_perms;
+allow zookeeper_t self:udp_socket create_socket_perms;
+allow zookeeper_t zookeeper_server_t:process signull;
+
+read_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
+read_lnk_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
+
+setattr_dirs_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+append_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+create_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+read_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+setattr_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+logging_log_filetrans(zookeeper_t, zookeeper_log_t, file)
+
+manage_files_pattern(zookeeper_t, zookeeper_tmp_t, zookeeper_tmp_t)
+files_tmp_filetrans(zookeeper_t, zookeeper_tmp_t, file)
+files_manage_generic_tmp_dirs(zookeeper_t)
+
+can_exec(zookeeper_t, zookeeper_exec_t)
+
+kernel_read_network_state(zookeeper_t)
+kernel_read_system_state(zookeeper_t)
+
+corecmd_exec_bin(zookeeper_t)
+corecmd_exec_shell(zookeeper_t)
+
+dontaudit zookeeper_t self:netlink_route_socket rw_netlink_socket_perms;
+
+corenet_all_recvfrom_unlabeled(zookeeper_t)
+corenet_all_recvfrom_netlabel(zookeeper_t)
+corenet_tcp_bind_all_nodes(zookeeper_t)
+corenet_tcp_connect_zookeeper_client_port(zookeeper_t)
+corenet_tcp_sendrecv_all_nodes(zookeeper_t)
+corenet_tcp_sendrecv_all_ports(zookeeper_t)
+corenet_tcp_sendrecv_generic_if(zookeeper_t)
+corenet_tcp_connect_generic_port(zookeeper_t)
+corenet_udp_bind_all_nodes(zookeeper_t)
+corenet_udp_sendrecv_all_nodes(zookeeper_t)
+corenet_udp_sendrecv_all_ports(zookeeper_t)
+corenet_udp_sendrecv_generic_if(zookeeper_t)
+
+dev_read_rand(zookeeper_t)
+dev_read_sysfs(zookeeper_t)
+dev_read_urand(zookeeper_t)
+
+files_read_etc_files(zookeeper_t)
+files_read_usr_files(zookeeper_t)
+
+# This can be removed on anything post-el5
+libs_use_ld_so(zookeeper_t)
+libs_use_shared_libs(zookeeper_t)
+
+miscfiles_read_localization(zookeeper_t)
+
+sysnet_read_config(zookeeper_t)
+
+userdom_dontaudit_search_user_home_dirs(zookeeper_t)
+userdom_use_user_terminals(zookeeper_t)
+
+java_exec(zookeeper_t)
+
+optional_policy(`
+ nscd_socket_use(zookeeper_t)
+')
+
+########################################
+#
+# Hadoop zookeeper server policy.
+#
+
+allow zookeeper_server_t self:capability kill;
+allow zookeeper_server_t self:process { execmem getsched sigkill signal signull };
+allow zookeeper_server_t self:fifo_file rw_fifo_file_perms;
+allow zookeeper_server_t self:netlink_route_socket rw_netlink_socket_perms;
+allow zookeeper_server_t self:tcp_socket create_stream_socket_perms;
+
+read_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
+read_lnk_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
+
+manage_dirs_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
+manage_files_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
+files_var_lib_filetrans(zookeeper_server_t, zookeeper_server_var_t, { dir file })
+
+setattr_dirs_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+append_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+create_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+read_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+setattr_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+logging_log_filetrans(zookeeper_server_t, zookeeper_log_t, file)
+
+manage_files_pattern(zookeeper_server_t, zookeeper_server_var_run_t, zookeeper_server_var_run_t)
+files_pid_filetrans(zookeeper_server_t, zookeeper_server_var_run_t, file)
+
+manage_files_pattern(zookeeper_server_t, zookeeper_server_tmp_t, zookeeper_server_tmp_t)
+files_tmp_filetrans(zookeeper_server_t, zookeeper_server_tmp_t, file)
+files_manage_generic_tmp_files(zookeeper_server_t)
+files_manage_generic_tmp_dirs(zookeeper_server_t)
+
+can_exec(zookeeper_server_t, zookeeper_server_exec_t)
+
+kernel_read_network_state(zookeeper_server_t)
+kernel_read_system_state(zookeeper_server_t)
+
+corecmd_exec_bin(zookeeper_server_t)
+corecmd_exec_shell(zookeeper_server_t)
+
+corenet_all_recvfrom_unlabeled(zookeeper_server_t)
+corenet_all_recvfrom_netlabel(zookeeper_server_t)
+corenet_tcp_bind_all_nodes(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_client_port(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_election_port(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_leader_port(zookeeper_server_t)
+corenet_tcp_connect_zookeeper_election_port(zookeeper_server_t)
+corenet_tcp_connect_zookeeper_leader_port(zookeeper_server_t)
+corenet_tcp_sendrecv_generic_if(zookeeper_server_t)
+corenet_tcp_sendrecv_generic_node(zookeeper_server_t)
+corenet_tcp_sendrecv_all_ports(zookeeper_server_t)
+corenet_tcp_connect_generic_port(zookeeper_server_t)
+
+dev_read_rand(zookeeper_server_t)
+dev_read_sysfs(zookeeper_server_t)
+dev_read_urand(zookeeper_server_t)
+
+files_read_etc_files(zookeeper_server_t)
+files_read_usr_files(zookeeper_server_t)
+
+fs_getattr_xattr_fs(zookeeper_server_t)
+
+# This can be removed on anything post-el5
+libs_use_ld_so(zookeeper_server_t)
+libs_use_shared_libs(zookeeper_server_t)
+
+logging_send_syslog_msg(zookeeper_server_t)
+
+miscfiles_read_localization(zookeeper_server_t)
+
+sysnet_read_config(zookeeper_server_t)
+
+java_exec(zookeeper_server_t)
diff --git a/policy/modules/system/unconfined.te b/policy/modules/system/unconfined.te
index f976344..ac27264 100644
--- a/policy/modules/system/unconfined.te
+++ b/policy/modules/system/unconfined.te
@@ -118,6 +118,10 @@ optional_policy(`
')
optional_policy(`
+ hadoop_run(unconfined_t, unconfined_r)
+')
+
+optional_policy(`
inn_domtrans(unconfined_t)
')
@@ -210,6 +214,10 @@ optional_policy(`
xserver_domtrans(unconfined_t)
')
+optional_policy(`
+ zookeeper_run_client(unconfined_t, unconfined_r)
+')
+
########################################
#
# Unconfined Execmem Local policy
On 10/01/10 11:17, Paul Nuzzi wrote:
> On 10/01/2010 08:02 AM, Dominick Grift wrote:
>> On Thu, Sep 30, 2010 at 03:39:40PM -0400, Paul Nuzzi wrote:
>>> I updated the patch based on recommendations from the mailing list.
>>> All of hadoop's services are included in one module instead of
>>> individual ones. Unconfined and sysadm roles are given access to
>>> hadoop and zookeeper client domain transitions. The services are started
>>> using run_init. Let me know what you think.
>>
>> Why do some hadoop domain need to manage generic tmp?
>>
>> files_manage_generic_tmp_dirs(zookeeper_t)
>> files_manage_generic_tmp_dirs(hadoop_t)
>> files_manage_generic_tmp_dirs(hadoop_$1_initrc_t)
>> files_manage_generic_tmp_files(hadoop_$1_initrc_t)
>> files_manage_generic_tmp_files(hadoop_$1_t)
>> files_manage_generic_tmp_dirs(hadoop_$1_t)
>
> This has to be done for Java JMX to work. All of the files are written to
> /tmp/hsperfdata_(hadoop/zookeeper). /tmp/hsperfdata_ is labeled tmp_t while
> all the files for each service are labeled with hadoop_*_tmp_t. The first service
> will end up owning the directory if it is not labeled tmp_t.
The hsperfdata dir in /tmp certainly the bane of policy writers. Based
on a quick look through the policy, it looks like the only dir they
create in /tmp is this hsperfdata dir. I suggest you do something like
files_tmp_filetrans(hadoop_t, hadoop_hsperfdata_t, dir)
files_tmp_filetrans(zookeeper_t, hadoop_hsperfdata_t, dir)
filetrans_pattern(hadoop_t, hadoop_hsperfdata_t, hadoop_tmp_t, file)
filetrans_pattern(zookeeper_t, hadoop_hsperfdata_t, zookeeper_tmp_t, file)
--
Chris PeBenito
Tresys Technology, LLC
http://www.tresys.com | oss.tresys.com
On Fri, Oct 01, 2010 at 11:17:27AM -0400, Paul Nuzzi wrote:
> On 10/01/2010 08:02 AM, Dominick Grift wrote:
> > On Thu, Sep 30, 2010 at 03:39:40PM -0400, Paul Nuzzi wrote:
> >> I updated the patch based on recommendations from the mailing list.
> >> All of hadoop's services are included in one module instead of
> >> individual ones. Unconfined and sysadm roles are given access to
> >> hadoop and zookeeper client domain transitions. The services are started
> >> using run_init. Let me know what you think.
> >
> > Why do some hadoop domain need to manage generic tmp?
> >
> > files_manage_generic_tmp_dirs(zookeeper_t)
> > files_manage_generic_tmp_dirs(hadoop_t)
> > files_manage_generic_tmp_dirs(hadoop_$1_initrc_t)
> > files_manage_generic_tmp_files(hadoop_$1_initrc_t)
> > files_manage_generic_tmp_files(hadoop_$1_t)
> > files_manage_generic_tmp_dirs(hadoop_$1_t)
>
> This has to be done for Java JMX to work. All of the files are written to
> /tmp/hsperfdata_(hadoop/zookeeper). /tmp/hsperfdata_ is labeled tmp_t while
> all the files for each service are labeled with hadoop_*_tmp_t. The first service
> will end up owning the directory if it is not labeled tmp_t.
>
> > You probably need:
> >
> > files_search_pids() and files_search_locks() for hadoop_$1_initrc_t
> > becuase it needs to traverse /var/run and /var/lock/subsys to be able to manage its objects there.
>
> > Can use rw_fifo_file_perms here:
> >
> > allow hadoop_$1_initrc_t self:fifo_file { read write getattr ioctl };
> >
> > Might want to split this into hadoop_read_config_files and hadoop_exec_config_files.
> >
> > hadoop_rx_etc(hadoop_$1_initrc_t)
> >
> > This seems wrong. Why does it need that? use files_search_var_lib() if possible:
> >
> > files_read_var_lib_files(hadoop_$1_t)
> >
> > This is not a declaration and might want to use filetrans_pattern() instead:
> >
> > type_transition hadoop_$1_initrc_t hadoop_var_run_t:file hadoop_$1_initrc_var_run_t;
>
> Changed. Thanks for the comments.
>
> > Other then the above, there are some style issues:
> >
> > http://oss.tresys.com/projects/refpolicy/wiki/StyleGuide
> >
> > But i can help clean that up once above issues are resolved.
> >
>
> Is there a style checking script for refpolicy patches similar to the Linux kernel?
Not that i am aware of.
Are you sure that your entries in hadoop.fc work? You could check by intentionally mislabel the paths and children with chcon and then see if restorecon restores everything properly
>
>
> Signed-off-by: Paul Nuzzi <[email protected]>
>
> ---
> policy/modules/kernel/corenetwork.te.in | 4
> policy/modules/roles/sysadm.te | 8
> policy/modules/services/hadoop.fc | 53 ++++
> policy/modules/services/hadoop.if | 360 ++++++++++++++++++++++++++++++++
> policy/modules/services/hadoop.te | 360 ++++++++++++++++++++++++++++++++
> policy/modules/system/unconfined.te | 8
> 6 files changed, 793 insertions(+)
>
> diff --git a/policy/modules/kernel/corenetwork.te.in b/policy/modules/kernel/corenetwork.te.in
> index 2ecdde8..7a1b5de 100644
> --- a/policy/modules/kernel/corenetwork.te.in
> +++ b/policy/modules/kernel/corenetwork.te.in
> @@ -105,6 +105,7 @@ network_port(giftd, tcp,1213,s0)
> network_port(git, tcp,9418,s0, udp,9418,s0)
> network_port(gopher, tcp,70,s0, udp,70,s0)
> network_port(gpsd, tcp,2947,s0)
> +network_port(hadoop_namenode, tcp, 8020,s0)
> network_port(hddtemp, tcp,7634,s0)
> network_port(howl, tcp,5335,s0, udp,5353,s0)
> network_port(hplip, tcp,1782,s0, tcp,2207,s0, tcp,2208,s0, tcp, 8290,s0, tcp,50000,s0, tcp,50002,s0, tcp,8292,s0, tcp,9100,s0, tcp,9101,s0, tcp,9102,s0, tcp,9220,s0, tcp,9221,s0, tcp,9222,s0, tcp,9280,s0, tcp,9281,s0, tcp,9282,s0, tcp,9290,s0, tcp,9291,s0, tcp,9292,s0)
> @@ -211,6 +212,9 @@ network_port(xdmcp, udp,177,s0, tcp,177,s0)
> network_port(xen, tcp,8002,s0)
> network_port(xfs, tcp,7100,s0)
> network_port(xserver, tcp,6000-6020,s0)
> +network_port(zookeeper_client, tcp, 2181,s0)
> +network_port(zookeeper_election, tcp, 3888,s0)
> +network_port(zookeeper_leader, tcp, 2888,s0)
> network_port(zebra, tcp,2600-2604,s0, tcp,2606,s0, udp,2600-2604,s0, udp,2606,s0)
> network_port(zope, tcp,8021,s0)
>
> diff --git a/policy/modules/roles/sysadm.te b/policy/modules/roles/sysadm.te
> index cad05ff..b46b28b 100644
> --- a/policy/modules/roles/sysadm.te
> +++ b/policy/modules/roles/sysadm.te
> @@ -152,6 +152,10 @@ optional_policy(`
> ')
>
> optional_policy(`
> + hadoop_run(sysadm_t, sysadm_r)
> +')
> +
> +optional_policy(`
> # allow system administrator to use the ipsec script to look
> # at things (e.g., ipsec auto --status)
> # probably should create an ipsec_admin role for this kind of thing
> @@ -392,6 +396,10 @@ optional_policy(`
> yam_run(sysadm_t, sysadm_r)
> ')
>
> +optional_policy(`
> + zookeeper_run_client(sysadm_t, sysadm_r)
> +')
> +
> ifndef(`distro_redhat',`
> optional_policy(`
> auth_role(sysadm_r, sysadm_t)
> diff --git a/policy/modules/services/hadoop.fc b/policy/modules/services/hadoop.fc
> new file mode 100644
> index 0000000..5bdd554
> --- /dev/null
> +++ b/policy/modules/services/hadoop.fc
> @@ -0,0 +1,53 @@
> +/etc/hadoop.*(/.*)? gen_context(system_u:object_r:hadoop_etc_t,s0)
> +
> +/etc/rc\.d/init\.d/hadoop-(.*)?-datanode -- gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
> +/etc/rc\.d/init\.d/hadoop-(.*)?-jobtracker -- gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
> +/etc/rc\.d/init\.d/hadoop-(.*)?-namenode -- gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
> +/etc/rc\.d/init\.d/hadoop-(.*)?-secondarynamenode -- gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
> +/etc/rc\.d/init\.d/hadoop-(.*)?-tasktracker -- gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
> +/etc/rc\.d/init\.d/hadoop-zookeeper -- gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
> +/etc/init\.d/hadoop-datanode -- gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
> +/etc/init\.d/hadoop-jobtracker -- gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
> +/etc/init\.d/hadoop-namenode -- gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
> +/etc/init\.d/hadoop-secondarynamenode -- gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
> +/etc/init\.d/hadoop-tasktracker -- gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
> +/etc/init\.d/zookeeper -- gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
> +
> +/etc/zookeeper(/.*)? gen_context(system_u:object_r:zookeeper_etc_t,s0)
> +/etc/zookeeper\.dist(/.*)? gen_context(system_u:object_r:zookeeper_etc_t,s0)
> +
> +/usr/lib/hadoop(.*)?/bin/hadoop -- gen_context(system_u:object_r:hadoop_exec_t,s0)
> +
> +/usr/bin/zookeeper-client -- gen_context(system_u:object_r:zookeeper_exec_t,s0)
> +/usr/bin/zookeeper-server -- gen_context(system_u:object_r:zookeeper_server_exec_t,s0)
> +
> +/var/zookeeper(/.*)? gen_context(system_u:object_r:zookeeper_server_var_t,s0)
> +/var/lib/zookeeper(/.*)? gen_context(system_u:object_r:zookeeper_server_var_t,s0)
> +
> +/var/lib/hadoop(.*)? gen_context(system_u:object_r:hadoop_var_lib_t,s0)
> +/var/lib/hadoop(.*)?/cache/hadoop/dfs/data(/.*)? gen_context(system_u:object_r:hadoop_datanode_var_lib_t,s0)
> +/var/lib/hadoop(.*)?/cache/hadoop/dfs/name(/.*)? gen_context(system_u:object_r:hadoop_namenode_var_lib_t,s0)
> +/var/lib/hadoop(.*)?/cache/hadoop/dfs/namesecondary(/.*)? gen_context(system_u:object_r:hadoop_secondarynamenode_var_lib_t,s0)
> +/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/jobTracker(/.*)? gen_context(system_u:object_r:hadoop_jobtracker_var_lib_t,s0)
> +/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/taskTracker(/.*)? gen_context(system_u:object_r:hadoop_tasktracker_var_lib_t,s0)
> +
> +/var/lock/subsys/hadoop-datanode -- gen_context(system_u:object_r:hadoop_datanode_lock_t,s0)
> +/var/lock/subsys/hadoop-namenode -- gen_context(system_u:object_r:hadoop_namenode_lock_t,s0)
> +/var/lock/subsys/hadoop-jobtracker -- gen_context(system_u:object_r:hadoop_jobtracker_lock_t,s0)
> +/var/lock/subsys/hadoop-tasktracker -- gen_context(system_u:object_r:hadoop_tasktracker_lock_t,s0)
> +/var/lock/subsys/hadoop-secondarynamenode -- gen_context(system_u:object_r:hadoop_secondarynamenode_lock_t,s0)
> +
> +/var/log/hadoop(.*)? gen_context(system_u:object_r:hadoop_log_t,s0)
> +/var/log/hadoop(.*)?/hadoop-hadoop-datanode-(.*)? gen_context(system_u:object_r:hadoop_datanode_log_t,s0)
> +/var/log/hadoop(.*)?/hadoop-hadoop-jobtracker-(.*)? gen_context(system_u:object_r:hadoop_jobtracker_log_t,s0)
> +/var/log/hadoop(.*)?/hadoop-hadoop-namenode-(.*)? gen_context(system_u:object_r:hadoop_namenode_log_t,s0)
> +/var/log/hadoop(.*)?/hadoop-hadoop-secondarynamenode-(.*)? gen_context(system_u:object_r:hadoop_secondarynamenode_log_t,s0)
> +/var/log/hadoop(.*)?/hadoop-hadoop-tasktracker-(.*)? gen_context(system_u:object_r:hadoop_tasktracker_log_t,s0)
> +/var/log/zookeeper(/.*)? gen_context(system_u:object_r:zookeeper_log_t,s0)
> +
> +/var/run/hadoop(.*)? -d gen_context(system_u:object_r:hadoop_var_run_t,s0)
> +/var/run/hadoop(.*)?/hadoop-hadoop-datanode.pid -- gen_context(system_u:object_r:hadoop_datanode_initrc_var_run_t,s0)
> +/var/run/hadoop(.*)?/hadoop-hadoop-namenode.pid -- gen_context(system_u:object_r:hadoop_namenode_initrc_var_run_t,s0)
> +/var/run/hadoop(.*)?/hadoop-hadoop-jobtracker.pid -- gen_context(system_u:object_r:hadoop_jobtracker_initrc_var_run_t,s0)
> +/var/run/hadoop(.*)?/hadoop-hadoop-tasktracker.pid -- gen_context(system_u:object_r:hadoop_tasktracker_initrc_var_run_t,s0)
> +/var/run/hadoop(.*)?/hadoop-hadoop-secondarynamenode.pid -- gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_var_run_t,s0)
> diff --git a/policy/modules/services/hadoop.if b/policy/modules/services/hadoop.if
> new file mode 100644
> index 0000000..051e68c
> --- /dev/null
> +++ b/policy/modules/services/hadoop.if
> @@ -0,0 +1,360 @@
> +## <summary>Software for reliable, scalable, distributed computing.</summary>
> +
> +#######################################
> +## <summary>
> +## The template to define a hadoop domain.
> +## </summary>
> +## <param name="domain_prefix">
> +## <summary>
> +## Domain prefix to be used.
> +## </summary>
> +## </param>
> +#
> +template(`hadoop_domain_template',`
> + gen_require(`
> + attribute hadoop_domain;
> + type hadoop_log_t, hadoop_var_lib_t, hadoop_var_run_t;
> + type hadoop_exec_t;
> + ')
> +
> + ########################################
> + #
> + # Shared declarations.
> + #
> +
> + type hadoop_$1_t, hadoop_domain;
> + domain_type(hadoop_$1_t)
> + domain_entry_file(hadoop_$1_t, hadoop_exec_t)
> +
> + type hadoop_$1_initrc_t;
> + type hadoop_$1_initrc_exec_t;
> + init_script_domain(hadoop_$1_initrc_t, hadoop_$1_initrc_exec_t)
> +
> + role system_r types { hadoop_$1_initrc_t hadoop_$1_t };
> +
> + type hadoop_$1_lock_t;
> + files_lock_file(hadoop_$1_lock_t)
> + files_lock_filetrans(hadoop_$1_initrc_t, hadoop_$1_lock_t, file)
> +
> + type hadoop_$1_log_t;
> + logging_log_file(hadoop_$1_log_t)
> + filetrans_pattern(hadoop_$1_initrc_t, hadoop_log_t, hadoop_$1_log_t, {dir file})
> + filetrans_pattern(hadoop_$1_t, hadoop_log_t, hadoop_$1_log_t, {dir file})
> +
> + type hadoop_$1_var_lib_t;
> + files_type(hadoop_$1_var_lib_t)
> + filetrans_pattern(hadoop_$1_t, hadoop_var_lib_t, hadoop_$1_var_lib_t, file)
> +
> + type hadoop_$1_initrc_var_run_t;
> + files_pid_file(hadoop_$1_initrc_var_run_t)
> + filetrans_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_$1_initrc_var_run_t, file)
> +
> + type hadoop_$1_tmp_t;
> + files_tmp_file(hadoop_$1_tmp_t)
> + files_tmp_filetrans(hadoop_$1_t, hadoop_$1_tmp_t, file)
> +
> + ####################################
> + #
> + # Shared hadoop_$1 initrc policy.
> + #
> +
> + allow hadoop_$1_initrc_t hadoop_$1_t:process { signal signull };
> + allow hadoop_$1_initrc_t self:capability { setuid setgid };
> + allow hadoop_$1_initrc_t self:fifo_file rw_fifo_file_perms;
> + allow hadoop_$1_initrc_t self:process setsched;
> +
> + consoletype_exec(hadoop_$1_initrc_t)
> + corecmd_exec_bin(hadoop_$1_initrc_t)
> + corecmd_exec_shell(hadoop_$1_initrc_t)
> +
> + domtrans_pattern(hadoop_$1_initrc_t, hadoop_exec_t, hadoop_$1_t)
> + dontaudit hadoop_$1_initrc_t self:capability sys_tty_config;
> +
> + files_read_etc_files(hadoop_$1_initrc_t)
> + files_read_usr_files(hadoop_$1_initrc_t)
> + files_manage_generic_tmp_dirs(hadoop_$1_initrc_t)
> + files_manage_generic_tmp_files(hadoop_$1_initrc_t)
> + files_search_pids(hadoop_$1_initrc_t)
> + files_search_locks(hadoop_$1_initrc_t)
> + fs_getattr_xattr_fs(hadoop_$1_initrc_t)
> +
> + hadoop_exec_config_files(hadoop_$1_initrc_t)
> +
> + init_rw_utmp(hadoop_$1_initrc_t)
> + init_use_script_ptys(hadoop_$1_initrc_t)
> +
> + kernel_read_kernel_sysctls(hadoop_$1_initrc_t)
> + kernel_read_sysctl(hadoop_$1_initrc_t)
> + kernel_read_system_state(hadoop_$1_initrc_t)
> +
> + logging_send_syslog_msg(hadoop_$1_initrc_t)
> + logging_send_audit_msgs(hadoop_$1_initrc_t)
> + logging_search_logs(hadoop_$1_initrc_t)
> +
> + manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_lock_t, hadoop_$1_lock_t)
> + manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_initrc_var_run_t, hadoop_$1_initrc_var_run_t)
> + manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
> + manage_dirs_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_var_run_t)
> + manage_files_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_var_run_t)
> +
> + miscfiles_read_localization(hadoop_$1_initrc_t)
> +
> + optional_policy(`
> + nscd_socket_use(hadoop_$1_initrc_t)
> + ')
> +
> + term_use_generic_ptys(hadoop_$1_initrc_t)
> +
> + userdom_dontaudit_search_user_home_dirs(hadoop_$1_initrc_t)
> +
> + # This can be removed on anything post-el5
> + libs_use_ld_so(hadoop_$1_initrc_t)
> + libs_use_shared_libs(hadoop_$1_initrc_t)
> +
> + ####################################
> + #
> + # Shared hadoop_$1 policy.
> + #
> +
> + allow hadoop_$1_t hadoop_domain:process signull;
> + allow hadoop_$1_t self:fifo_file rw_fifo_file_perms;
> + allow hadoop_$1_t self:process execmem;
> + allow hadoop_$1_t hadoop_var_run_t:dir getattr;
> +
> + corecmd_exec_bin(hadoop_$1_t)
> + corecmd_exec_shell(hadoop_$1_t)
> +
> + dev_read_rand(hadoop_$1_t)
> + dev_read_urand(hadoop_$1_t)
> + dev_read_sysfs(hadoop_$1_t)
> + dontaudit hadoop_$1_t self:netlink_route_socket rw_netlink_socket_perms;
> +
> + files_manage_generic_tmp_files(hadoop_$1_t)
> + files_manage_generic_tmp_dirs(hadoop_$1_t)
> + files_read_etc_files(hadoop_$1_t)
> + files_search_pids(hadoop_$1_t)
> + files_search_var_lib(hadoop_$1_t)
> +
> + hadoop_exec_config_files(hadoop_$1_t)
> +
> + java_exec(hadoop_$1_t)
> +
> + kernel_read_network_state(hadoop_$1_t)
> + kernel_read_system_state(hadoop_$1_t)
> +
> + logging_search_logs(hadoop_$1_t)
> +
> + manage_dirs_pattern(hadoop_$1_t, hadoop_$1_var_lib_t, hadoop_$1_var_lib_t)
> + manage_files_pattern(hadoop_$1_t, hadoop_$1_log_t, hadoop_$1_log_t)
> + manage_files_pattern(hadoop_$1_t, hadoop_$1_var_lib_t, hadoop_$1_var_lib_t)
> + manage_files_pattern(hadoop_$1_t, hadoop_$1_tmp_t, hadoop_$1_tmp_t)
> + miscfiles_read_localization(hadoop_$1_t)
> +
> + optional_policy(`
> + nscd_socket_use(hadoop_$1_t)
> + ')
> +
> + sysnet_read_config(hadoop_$1_t)
> +
> + allow hadoop_$1_t self:tcp_socket create_stream_socket_perms;
> + corenet_all_recvfrom_unlabeled(hadoop_$1_t)
> + corenet_all_recvfrom_netlabel(hadoop_$1_t)
> + corenet_tcp_bind_all_nodes(hadoop_$1_t)
> + corenet_tcp_sendrecv_generic_if(hadoop_$1_t)
> + corenet_tcp_sendrecv_generic_node(hadoop_$1_t)
> + corenet_tcp_sendrecv_all_ports(hadoop_$1_t)
> + corenet_tcp_connect_generic_port(hadoop_$1_t)
> +
> + allow hadoop_$1_t self:udp_socket create_socket_perms;
> + corenet_udp_sendrecv_generic_if(hadoop_$1_t)
> + corenet_udp_sendrecv_all_nodes(hadoop_$1_t)
> + corenet_udp_bind_all_nodes(hadoop_$1_t)
> +
> + # This can be removed on anything post-el5
> + libs_use_ld_so(hadoop_$1_t)
> + libs_use_shared_libs(hadoop_$1_t)
> +')
> +
> +########################################
> +## <summary>
> +## Execute hadoop in the
> +## hadoop domain.
> +## </summary>
> +## <param name="domain">
> +## <summary>
> +## Domain allowed to transition.
> +## </summary>
> +## </param>
> +#
> +interface(`hadoop_domtrans',`
> + gen_require(`
> + type hadoop_t, hadoop_exec_t;
> + ')
> +
> + files_search_usr($1)
> + libs_search_lib($1)
> + domtrans_pattern($1, hadoop_exec_t, hadoop_t)
> +')
> +
> +########################################
> +## <summary>
> +## Execute hadoop in the hadoop domain,
> +## and allow the specified role the
> +## hadoop domain.
> +## </summary>
> +## <param name="domain">
> +## <summary>
> +## Domain allowed to transition.
> +## </summary>
> +## </param>
> +## <param name="role">
> +## <summary>
> +## Role allowed access.
> +## </summary>
> +## </param>
> +## <rolecap/>
> +#
> +interface(`hadoop_run',`
> + gen_require(`
> + type hadoop_t;
> + ')
> +
> + hadoop_domtrans($1)
> + role $2 types hadoop_t;
> +
> + allow $1 hadoop_t:process { ptrace signal_perms };
> + ps_process_pattern($1, hadoop_t)
> +')
> +
> +########################################
> +## <summary>
> +## Execute zookeeper client in the
> +## zookeeper client domain.
> +## </summary>
> +## <param name="domain">
> +## <summary>
> +## Domain allowed to transition.
> +## </summary>
> +## </param>
> +#
> +interface(`zookeeper_domtrans_client',`
> + gen_require(`
> + type zookeeper_t, zookeeper_exec_t;
> + ')
> +
> + corecmd_search_bin($1)
> + files_search_usr($1)
> + domtrans_pattern($1, zookeeper_exec_t, zookeeper_t)
> +')
> +
> +########################################
> +## <summary>
> +## Execute zookeeper server in the
> +## zookeeper server domain.
> +## </summary>
> +## <param name="domain">
> +## <summary>
> +## Domain allowed to transition.
> +## </summary>
> +## </param>
> +#
> +interface(`zookeeper_domtrans_server',`
> + gen_require(`
> + type zookeeper_server_t, zookeeper_server_exec_t;
> + ')
> +
> + corecmd_search_bin($1)
> + files_search_usr($1)
> + domtrans_pattern($1, zookeeper_server_exec_t, zookeeper_server_t)
> +')
> +
> +########################################
> +## <summary>
> +## Execute zookeeper server in the
> +## zookeeper domain.
> +## </summary>
> +## <param name="domain">
> +## <summary>
> +## Domain allowed to transition.
> +## </summary>
> +## </param>
> +#
> +interface(`zookeeper_initrc_domtrans_server',`
> + gen_require(`
> + type zookeeper_server_initrc_exec_t;
> + ')
> +
> + init_labeled_script_domtrans($1, zookeeper_server_initrc_exec_t)
> +')
> +
> +########################################
> +## <summary>
> +## Execute zookeeper client in the
> +## zookeeper client domain, and allow the
> +## specified role the zookeeper client domain.
> +## </summary>
> +## <param name="domain">
> +## <summary>
> +## Domain allowed to transition.
> +## </summary>
> +## </param>
> +## <param name="role">
> +## <summary>
> +## Role allowed access.
> +## </summary>
> +## </param>
> +## <rolecap/>
> +#
> +interface(`zookeeper_run_client',`
> + gen_require(`
> + type zookeeper_t;
> + ')
> +
> + zookeeper_domtrans_client($1)
> + role $2 types zookeeper_t;
> +
> + allow $1 zookeeper_t:process { ptrace signal_perms };
> + ps_process_pattern($1, zookeeper_t)
> +')
> +
> +########################################
> +## <summary>
> +## Give permission to a domain to read
> +## hadoop_etc_t
> +## </summary>
> +## <param name="domain">
> +## <summary>
> +## Domain needing read permission
> +## </summary>
> +## </param>
> +#
> +interface(`hadoop_read_config_files', `
> + gen_require(`
> + type hadoop_etc_t;
> + ')
> +
> + allow $1 hadoop_etc_t:dir search_dir_perms;
> + allow $1 hadoop_etc_t:lnk_file { read getattr };
> + allow $1 hadoop_etc_t:file read_file_perms;
> +')
> +
> +########################################
> +## <summary>
> +## Give permission to a domain to
> +## execute hadoop_etc_t
> +## </summary>
> +## <param name="domain">
> +## <summary>
> +## Domain needing read and execute
> +## permission
> +## </summary>
> +## </param>
> +#
> +interface(`hadoop_exec_config_files', `
> + gen_require(`
> + type hadoop_etc_t;
> + ')
> +
> + hadoop_read_config_files($1)
> + allow $1 hadoop_etc_t:file { execute execute_no_trans};
> +')
> diff --git a/policy/modules/services/hadoop.te b/policy/modules/services/hadoop.te
> new file mode 100644
> index 0000000..6a66962
> --- /dev/null
> +++ b/policy/modules/services/hadoop.te
> @@ -0,0 +1,360 @@
> +policy_module(hadoop, 1.0.0)
> +
> +########################################
> +#
> +# Hadoop declarations.
> +#
> +
> +attribute hadoop_domain;
> +
> +type hadoop_t;
> +type hadoop_exec_t;
> +application_domain(hadoop_t, hadoop_exec_t)
> +ubac_constrained(hadoop_t)
> +
> +type hadoop_etc_t;
> +files_config_file(hadoop_etc_t)
> +
> +type hadoop_var_lib_t;
> +files_type(hadoop_var_lib_t)
> +
> +type hadoop_log_t;
> +logging_log_file(hadoop_log_t)
> +
> +type hadoop_var_run_t;
> +files_pid_file(hadoop_var_run_t)
> +
> +type hadoop_tmp_t;
> +files_tmp_file(hadoop_tmp_t)
> +ubac_constrained(hadoop_tmp_t)
> +
> +hadoop_domain_template(datanode)
> +hadoop_domain_template(jobtracker)
> +hadoop_domain_template(namenode)
> +hadoop_domain_template(secondarynamenode)
> +hadoop_domain_template(tasktracker)
> +
> +########################################
> +#
> +# Hadoop zookeeper client declarations.
> +#
> +
> +type zookeeper_t;
> +type zookeeper_exec_t;
> +application_domain(zookeeper_t, zookeeper_exec_t)
> +ubac_constrained(zookeeper_t)
> +
> +type zookeeper_etc_t;
> +files_config_file(zookeeper_etc_t)
> +
> +type zookeeper_log_t;
> +logging_log_file(zookeeper_log_t)
> +
> +type zookeeper_tmp_t;
> +files_tmp_file(zookeeper_tmp_t)
> +ubac_constrained(zookeeper_tmp_t)
> +
> +########################################
> +#
> +# Hadoop zookeeper server declarations.
> +#
> +
> +type zookeeper_server_t;
> +type zookeeper_server_exec_t;
> +init_daemon_domain(zookeeper_server_t, zookeeper_server_exec_t)
> +
> +type zookeeper_server_initrc_exec_t;
> +init_script_file(zookeeper_server_initrc_exec_t)
> +
> +type zookeeper_server_var_t;
> +files_type(zookeeper_server_var_t)
> +
> +# This will need a file context specification.
> +type zookeeper_server_var_run_t;
> +files_pid_file(zookeeper_server_var_run_t)
> +
> +type zookeeper_server_tmp_t;
> +files_tmp_file(zookeeper_server_tmp_t)
> +
> +########################################
> +#
> +# Hadoop policy.
> +#
> +
> +allow hadoop_t self:capability sys_resource;
> +allow hadoop_t self:process { getsched setsched signal signull setrlimit execmem };
> +allow hadoop_t self:fifo_file rw_fifo_file_perms;
> +allow hadoop_t self:key write;
> +allow hadoop_t self:tcp_socket create_stream_socket_perms;
> +allow hadoop_t self:udp_socket create_socket_perms;
> +allow hadoop_t hadoop_domain:process signull;
> +
> +dontaudit hadoop_t self:netlink_route_socket rw_netlink_socket_perms;
> +
> +read_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
> +read_lnk_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
> +can_exec(hadoop_t, hadoop_etc_t)
> +
> +manage_dirs_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +manage_dirs_pattern(hadoop_t, hadoop_log_t, hadoop_log_t)
> +manage_dirs_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
> +manage_files_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +manage_files_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
> +
> +getattr_dirs_pattern(hadoop_t, hadoop_var_run_t, hadoop_var_run_t)
> +
> +files_tmp_filetrans(hadoop_t, hadoop_tmp_t, file)
> +files_manage_generic_tmp_dirs(hadoop_t)
> +
> +kernel_read_network_state(hadoop_t)
> +kernel_read_system_state(hadoop_t)
> +
> +corecmd_exec_bin(hadoop_t)
> +corecmd_exec_shell(hadoop_t)
> +
> +corenet_all_recvfrom_unlabeled(hadoop_t)
> +corenet_all_recvfrom_netlabel(hadoop_t)
> +corenet_sendrecv_portmap_client_packets(hadoop_t)
> +corenet_sendrecv_zope_client_packets(hadoop_t)
> +corenet_tcp_bind_all_nodes(hadoop_t)
> +corenet_tcp_connect_hadoop_namenode_port(hadoop_t)
> +corenet_tcp_connect_portmap_port(hadoop_t)
> +corenet_tcp_connect_zope_port(hadoop_t)
> +corenet_tcp_sendrecv_all_nodes(hadoop_t)
> +corenet_tcp_sendrecv_all_ports(hadoop_t)
> +corenet_tcp_sendrecv_generic_if(hadoop_t)
> +corenet_tcp_connect_generic_port(hadoop_t)
> +corenet_udp_bind_all_nodes(hadoop_t)
> +corenet_udp_sendrecv_all_nodes(hadoop_t)
> +corenet_udp_sendrecv_all_ports(hadoop_t)
> +corenet_udp_sendrecv_generic_if(hadoop_t)
> +
> +dev_read_rand(hadoop_t)
> +dev_read_sysfs(hadoop_t)
> +dev_read_urand(hadoop_t)
> +
> +files_dontaudit_search_spool(hadoop_t)
> +files_read_usr_files(hadoop_t)
> +files_read_all_files(hadoop_t)
> +
> +fs_getattr_xattr_fs(hadoop_t)
> +
> +java_exec(hadoop_t)
> +
> +# This can be removed on anything post-el5
> +libs_use_ld_so(hadoop_t)
> +libs_use_shared_libs(hadoop_t)
> +
> +miscfiles_read_localization(hadoop_t)
> +
> +userdom_dontaudit_search_user_home_dirs(hadoop_t)
> +userdom_use_user_terminals(hadoop_t)
> +
> +optional_policy(`
> + nis_use_ypbind(hadoop_t)
> +')
> +
> +optional_policy(`
> + nscd_socket_use(hadoop_t)
> +')
> +
> +########################################
> +#
> +# Hadoop datanode policy.
> +#
> +
> +corenet_tcp_connect_hadoop_namenode_port(hadoop_datanode_t)
> +fs_getattr_xattr_fs(hadoop_datanode_t)
> +manage_dirs_pattern(hadoop_datanode_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +
> +########################################
> +#
> +# Hadoop jobtracker policy.
> +#
> +
> +corenet_tcp_bind_zope_port(hadoop_jobtracker_t)
> +corenet_tcp_connect_hadoop_namenode_port(hadoop_jobtracker_t)
> +create_dirs_pattern(hadoop_jobtracker_t, hadoop_jobtracker_log_t, hadoop_jobtracker_log_t)
> +manage_dirs_pattern(hadoop_jobtracker_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +
> +########################################
> +#
> +# Hadoop namenode policy.
> +#
> +
> +corenet_tcp_bind_hadoop_namenode_port(hadoop_namenode_t)
> +corenet_tcp_connect_hadoop_namenode_port(hadoop_namenode_t)
> +manage_dirs_pattern(hadoop_namenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +manage_files_pattern(hadoop_namenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +
> +########################################
> +#
> +# Hadoop secondary namenode policy.
> +#
> +
> +corenet_tcp_connect_hadoop_namenode_port(hadoop_secondarynamenode_t)
> +manage_dirs_pattern(hadoop_secondarynamenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +
> +########################################
> +#
> +# Hadoop tasktracker policy.
> +#
> +
> +allow hadoop_tasktracker_t self:process signal;
> +
> +corenet_tcp_connect_zope_port(hadoop_tasktracker_t)
> +corenet_tcp_connect_hadoop_namenode_port(hadoop_tasktracker_t)
> +
> +filetrans_pattern(hadoop_tasktracker_t, hadoop_log_t, hadoop_tasktracker_log_t, dir)
> +fs_getattr_xattr_fs(hadoop_tasktracker_t)
> +fs_associate(hadoop_tasktracker_t)
> +
> +manage_dirs_pattern(hadoop_tasktracker_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +manage_dirs_pattern(hadoop_tasktracker_t, hadoop_tasktracker_log_t, hadoop_tasktracker_log_t);
> +
> +########################################
> +#
> +# Hadoop zookeeper client policy.
> +#
> +
> +allow zookeeper_t self:process { getsched sigkill signal signull execmem };
> +allow zookeeper_t self:fifo_file rw_fifo_file_perms;
> +allow zookeeper_t self:tcp_socket create_stream_socket_perms;
> +allow zookeeper_t self:udp_socket create_socket_perms;
> +allow zookeeper_t zookeeper_server_t:process signull;
> +
> +read_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
> +read_lnk_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
> +
> +setattr_dirs_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
> +append_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
> +create_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
> +read_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
> +setattr_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
> +logging_log_filetrans(zookeeper_t, zookeeper_log_t, file)
> +
> +manage_files_pattern(zookeeper_t, zookeeper_tmp_t, zookeeper_tmp_t)
> +files_tmp_filetrans(zookeeper_t, zookeeper_tmp_t, file)
> +files_manage_generic_tmp_dirs(zookeeper_t)
> +
> +can_exec(zookeeper_t, zookeeper_exec_t)
> +
> +kernel_read_network_state(zookeeper_t)
> +kernel_read_system_state(zookeeper_t)
> +
> +corecmd_exec_bin(zookeeper_t)
> +corecmd_exec_shell(zookeeper_t)
> +
> +dontaudit zookeeper_t self:netlink_route_socket rw_netlink_socket_perms;
> +
> +corenet_all_recvfrom_unlabeled(zookeeper_t)
> +corenet_all_recvfrom_netlabel(zookeeper_t)
> +corenet_tcp_bind_all_nodes(zookeeper_t)
> +corenet_tcp_connect_zookeeper_client_port(zookeeper_t)
> +corenet_tcp_sendrecv_all_nodes(zookeeper_t)
> +corenet_tcp_sendrecv_all_ports(zookeeper_t)
> +corenet_tcp_sendrecv_generic_if(zookeeper_t)
> +corenet_tcp_connect_generic_port(zookeeper_t)
> +corenet_udp_bind_all_nodes(zookeeper_t)
> +corenet_udp_sendrecv_all_nodes(zookeeper_t)
> +corenet_udp_sendrecv_all_ports(zookeeper_t)
> +corenet_udp_sendrecv_generic_if(zookeeper_t)
> +
> +dev_read_rand(zookeeper_t)
> +dev_read_sysfs(zookeeper_t)
> +dev_read_urand(zookeeper_t)
> +
> +files_read_etc_files(zookeeper_t)
> +files_read_usr_files(zookeeper_t)
> +
> +# This can be removed on anything post-el5
> +libs_use_ld_so(zookeeper_t)
> +libs_use_shared_libs(zookeeper_t)
> +
> +miscfiles_read_localization(zookeeper_t)
> +
> +sysnet_read_config(zookeeper_t)
> +
> +userdom_dontaudit_search_user_home_dirs(zookeeper_t)
> +userdom_use_user_terminals(zookeeper_t)
> +
> +java_exec(zookeeper_t)
> +
> +optional_policy(`
> + nscd_socket_use(zookeeper_t)
> +')
> +
> +########################################
> +#
> +# Hadoop zookeeper server policy.
> +#
> +
> +allow zookeeper_server_t self:capability kill;
> +allow zookeeper_server_t self:process { execmem getsched sigkill signal signull };
> +allow zookeeper_server_t self:fifo_file rw_fifo_file_perms;
> +allow zookeeper_server_t self:netlink_route_socket rw_netlink_socket_perms;
> +allow zookeeper_server_t self:tcp_socket create_stream_socket_perms;
> +
> +read_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
> +read_lnk_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
> +
> +manage_dirs_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
> +manage_files_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
> +files_var_lib_filetrans(zookeeper_server_t, zookeeper_server_var_t, { dir file })
> +
> +setattr_dirs_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
> +append_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
> +create_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
> +read_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
> +setattr_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
> +logging_log_filetrans(zookeeper_server_t, zookeeper_log_t, file)
> +
> +manage_files_pattern(zookeeper_server_t, zookeeper_server_var_run_t, zookeeper_server_var_run_t)
> +files_pid_filetrans(zookeeper_server_t, zookeeper_server_var_run_t, file)
> +
> +manage_files_pattern(zookeeper_server_t, zookeeper_server_tmp_t, zookeeper_server_tmp_t)
> +files_tmp_filetrans(zookeeper_server_t, zookeeper_server_tmp_t, file)
> +files_manage_generic_tmp_files(zookeeper_server_t)
> +files_manage_generic_tmp_dirs(zookeeper_server_t)
> +
> +can_exec(zookeeper_server_t, zookeeper_server_exec_t)
> +
> +kernel_read_network_state(zookeeper_server_t)
> +kernel_read_system_state(zookeeper_server_t)
> +
> +corecmd_exec_bin(zookeeper_server_t)
> +corecmd_exec_shell(zookeeper_server_t)
> +
> +corenet_all_recvfrom_unlabeled(zookeeper_server_t)
> +corenet_all_recvfrom_netlabel(zookeeper_server_t)
> +corenet_tcp_bind_all_nodes(zookeeper_server_t)
> +corenet_tcp_bind_zookeeper_client_port(zookeeper_server_t)
> +corenet_tcp_bind_zookeeper_election_port(zookeeper_server_t)
> +corenet_tcp_bind_zookeeper_leader_port(zookeeper_server_t)
> +corenet_tcp_connect_zookeeper_election_port(zookeeper_server_t)
> +corenet_tcp_connect_zookeeper_leader_port(zookeeper_server_t)
> +corenet_tcp_sendrecv_generic_if(zookeeper_server_t)
> +corenet_tcp_sendrecv_generic_node(zookeeper_server_t)
> +corenet_tcp_sendrecv_all_ports(zookeeper_server_t)
> +corenet_tcp_connect_generic_port(zookeeper_server_t)
> +
> +dev_read_rand(zookeeper_server_t)
> +dev_read_sysfs(zookeeper_server_t)
> +dev_read_urand(zookeeper_server_t)
> +
> +files_read_etc_files(zookeeper_server_t)
> +files_read_usr_files(zookeeper_server_t)
> +
> +fs_getattr_xattr_fs(zookeeper_server_t)
> +
> +# This can be removed on anything post-el5
> +libs_use_ld_so(zookeeper_server_t)
> +libs_use_shared_libs(zookeeper_server_t)
> +
> +logging_send_syslog_msg(zookeeper_server_t)
> +
> +miscfiles_read_localization(zookeeper_server_t)
> +
> +sysnet_read_config(zookeeper_server_t)
> +
> +java_exec(zookeeper_server_t)
> diff --git a/policy/modules/system/unconfined.te b/policy/modules/system/unconfined.te
> index f976344..ac27264 100644
> --- a/policy/modules/system/unconfined.te
> +++ b/policy/modules/system/unconfined.te
> @@ -118,6 +118,10 @@ optional_policy(`
> ')
>
> optional_policy(`
> + hadoop_run(unconfined_t, unconfined_r)
> +')
> +
> +optional_policy(`
> inn_domtrans(unconfined_t)
> ')
>
> @@ -210,6 +214,10 @@ optional_policy(`
> xserver_domtrans(unconfined_t)
> ')
>
> +optional_policy(`
> + zookeeper_run_client(unconfined_t, unconfined_r)
> +')
> +
> ########################################
> #
> # Unconfined Execmem Local policy
> _______________________________________________
> refpolicy mailing list
> refpolicy at oss.tresys.com
> http://oss.tresys.com/mailman/listinfo/refpolicy
-------------- next part --------------
A non-text attachment was scrubbed...
Name: not available
Type: application/pgp-signature
Size: 198 bytes
Desc: not available
Url : http://oss.tresys.com/pipermail/refpolicy/attachments/20101001/97dd83e6/attachment-0001.bin
On 10/01/2010 02:01 PM, Dominick Grift wrote:
> On Fri, Oct 01, 2010 at 11:17:27AM -0400, Paul Nuzzi wrote:
>> On 10/01/2010 08:02 AM, Dominick Grift wrote:
>>> On Thu, Sep 30, 2010 at 03:39:40PM -0400, Paul Nuzzi wrote:
>>>> I updated the patch based on recommendations from the mailing list.
>>>> All of hadoop's services are included in one module instead of
>>>> individual ones. Unconfined and sysadm roles are given access to
>>>> hadoop and zookeeper client domain transitions. The services are started
>>>> using run_init. Let me know what you think.
>>>
>>> Why do some hadoop domain need to manage generic tmp?
>>>
>>> files_manage_generic_tmp_dirs(zookeeper_t)
>>> files_manage_generic_tmp_dirs(hadoop_t)
>>> files_manage_generic_tmp_dirs(hadoop_$1_initrc_t)
>>> files_manage_generic_tmp_files(hadoop_$1_initrc_t)
>>> files_manage_generic_tmp_files(hadoop_$1_t)
>>> files_manage_generic_tmp_dirs(hadoop_$1_t)
>>
>> This has to be done for Java JMX to work. All of the files are written to
>> /tmp/hsperfdata_(hadoop/zookeeper). /tmp/hsperfdata_ is labeled tmp_t while
>> all the files for each service are labeled with hadoop_*_tmp_t. The first service
>> will end up owning the directory if it is not labeled tmp_t.
>>
>>> You probably need:
>>>
>>> files_search_pids() and files_search_locks() for hadoop_$1_initrc_t
>>> becuase it needs to traverse /var/run and /var/lock/subsys to be able to manage its objects there.
>>
>>> Can use rw_fifo_file_perms here:
>>>
>>> allow hadoop_$1_initrc_t self:fifo_file { read write getattr ioctl };
>>>
>>> Might want to split this into hadoop_read_config_files and hadoop_exec_config_files.
>>>
>>> hadoop_rx_etc(hadoop_$1_initrc_t)
>>>
>>> This seems wrong. Why does it need that? use files_search_var_lib() if possible:
>>>
>>> files_read_var_lib_files(hadoop_$1_t)
>>>
>>> This is not a declaration and might want to use filetrans_pattern() instead:
>>>
>>> type_transition hadoop_$1_initrc_t hadoop_var_run_t:file hadoop_$1_initrc_var_run_t;
>>
>> Changed. Thanks for the comments.
>>
>>> Other then the above, there are some style issues:
>>>
>>> http://oss.tresys.com/projects/refpolicy/wiki/StyleGuide
>>>
>>> But i can help clean that up once above issues are resolved.
>>>
>>
>> Is there a style checking script for refpolicy patches similar to the Linux kernel?
>
> Not that i am aware of.
> Are you sure that your entries in hadoop.fc work? You could check by intentionally mislabel the paths and children with chcon and then see if restorecon restores everything properly
Based on testing the paths get labelled correctly with restorecon. I am having an issue with the
kernel not labelling files and directories correctly because of wildcards in /var/lib/hadoop.
I gave the services enough permission to relabel what they needed during runtime. I didn't want
to hard code the directory names because the policy would lose version independence.
>>
>>
>> Signed-off-by: Paul Nuzzi <[email protected]>
On 10/01/2010 01:56 PM, Christopher J. PeBenito wrote:
> On 10/01/10 11:17, Paul Nuzzi wrote:
>> On 10/01/2010 08:02 AM, Dominick Grift wrote:
>>> On Thu, Sep 30, 2010 at 03:39:40PM -0400, Paul Nuzzi wrote:
>>>> I updated the patch based on recommendations from the mailing list.
>>>> All of hadoop's services are included in one module instead of
>>>> individual ones. Unconfined and sysadm roles are given access to
>>>> hadoop and zookeeper client domain transitions. The services are started
>>>> using run_init. Let me know what you think.
>>>
>>> Why do some hadoop domain need to manage generic tmp?
>>>
>>> files_manage_generic_tmp_dirs(zookeeper_t)
>>> files_manage_generic_tmp_dirs(hadoop_t)
>>> files_manage_generic_tmp_dirs(hadoop_$1_initrc_t)
>>> files_manage_generic_tmp_files(hadoop_$1_initrc_t)
>>> files_manage_generic_tmp_files(hadoop_$1_t)
>>> files_manage_generic_tmp_dirs(hadoop_$1_t)
>>
>> This has to be done for Java JMX to work. All of the files are written to
>> /tmp/hsperfdata_(hadoop/zookeeper). /tmp/hsperfdata_ is labeled tmp_t while
>> all the files for each service are labeled with hadoop_*_tmp_t. The first service
>> will end up owning the directory if it is not labeled tmp_t.
>
> The hsperfdata dir in /tmp certainly the bane of policy writers. Based on a quick look through the policy, it looks like the only dir they create in /tmp is this hsperfdata dir. I suggest you do something like
>
> files_tmp_filetrans(hadoop_t, hadoop_hsperfdata_t, dir)
> files_tmp_filetrans(zookeeper_t, hadoop_hsperfdata_t, dir)
>
> filetrans_pattern(hadoop_t, hadoop_hsperfdata_t, hadoop_tmp_t, file)
> filetrans_pattern(zookeeper_t, hadoop_hsperfdata_t, zookeeper_tmp_t, file)
>
That looks like a better way to handle the tmp_t problem.
I changed the patch with your comments. Hopefully this will be one of the last updates.
Tested on a CDH3 cluster as a module without any problems.
Signed-off-by: Paul Nuzzi <[email protected]>
---
policy/modules/kernel/corenetwork.te.in | 4
policy/modules/roles/sysadm.te | 8
policy/modules/services/hadoop.fc | 54 ++++
policy/modules/services/hadoop.if | 358 ++++++++++++++++++++++++++++++
policy/modules/services/hadoop.te | 380 ++++++++++++++++++++++++++++++++
policy/modules/system/unconfined.te | 8
6 files changed, 812 insertions(+)
diff --git a/policy/modules/kernel/corenetwork.te.in b/policy/modules/kernel/corenetwork.te.in
index 2ecdde8..7a1b5de 100644
--- a/policy/modules/kernel/corenetwork.te.in
+++ b/policy/modules/kernel/corenetwork.te.in
@@ -105,6 +105,7 @@ network_port(giftd, tcp,1213,s0)
network_port(git, tcp,9418,s0, udp,9418,s0)
network_port(gopher, tcp,70,s0, udp,70,s0)
network_port(gpsd, tcp,2947,s0)
+network_port(hadoop_namenode, tcp, 8020,s0)
network_port(hddtemp, tcp,7634,s0)
network_port(howl, tcp,5335,s0, udp,5353,s0)
network_port(hplip, tcp,1782,s0, tcp,2207,s0, tcp,2208,s0, tcp, 8290,s0, tcp,50000,s0, tcp,50002,s0, tcp,8292,s0, tcp,9100,s0, tcp,9101,s0, tcp,9102,s0, tcp,9220,s0, tcp,9221,s0, tcp,9222,s0, tcp,9280,s0, tcp,9281,s0, tcp,9282,s0, tcp,9290,s0, tcp,9291,s0, tcp,9292,s0)
@@ -211,6 +212,9 @@ network_port(xdmcp, udp,177,s0, tcp,177,s0)
network_port(xen, tcp,8002,s0)
network_port(xfs, tcp,7100,s0)
network_port(xserver, tcp,6000-6020,s0)
+network_port(zookeeper_client, tcp, 2181,s0)
+network_port(zookeeper_election, tcp, 3888,s0)
+network_port(zookeeper_leader, tcp, 2888,s0)
network_port(zebra, tcp,2600-2604,s0, tcp,2606,s0, udp,2600-2604,s0, udp,2606,s0)
network_port(zope, tcp,8021,s0)
diff --git a/policy/modules/roles/sysadm.te b/policy/modules/roles/sysadm.te
index cad05ff..b46b28b 100644
--- a/policy/modules/roles/sysadm.te
+++ b/policy/modules/roles/sysadm.te
@@ -152,6 +152,10 @@ optional_policy(`
')
optional_policy(`
+ hadoop_run(sysadm_t, sysadm_r)
+')
+
+optional_policy(`
# allow system administrator to use the ipsec script to look
# at things (e.g., ipsec auto --status)
# probably should create an ipsec_admin role for this kind of thing
@@ -392,6 +396,10 @@ optional_policy(`
yam_run(sysadm_t, sysadm_r)
')
+optional_policy(`
+ zookeeper_run_client(sysadm_t, sysadm_r)
+')
+
ifndef(`distro_redhat',`
optional_policy(`
auth_role(sysadm_r, sysadm_t)
diff --git a/policy/modules/services/hadoop.fc b/policy/modules/services/hadoop.fc
new file mode 100644
index 0000000..15c61ed
--- /dev/null
+++ b/policy/modules/services/hadoop.fc
@@ -0,0 +1,54 @@
+/etc/hadoop.*(/.*)? gen_context(system_u:object_r:hadoop_etc_t,s0)
+
+/etc/rc\.d/init\.d/hadoop-(.*)?-datanode -- gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-jobtracker -- gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-namenode -- gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-secondarynamenode -- gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-tasktracker -- gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-zookeeper -- gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
+/etc/init\.d/hadoop-datanode -- gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
+/etc/init\.d/hadoop-jobtracker -- gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
+/etc/init\.d/hadoop-namenode -- gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
+/etc/init\.d/hadoop-secondarynamenode -- gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
+/etc/init\.d/hadoop-tasktracker -- gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
+/etc/init\.d/zookeeper -- gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
+
+/etc/zookeeper(/.*)? gen_context(system_u:object_r:zookeeper_etc_t,s0)
+/etc/zookeeper\.dist(/.*)? gen_context(system_u:object_r:zookeeper_etc_t,s0)
+
+/usr/lib/hadoop(.*)?/bin/hadoop -- gen_context(system_u:object_r:hadoop_exec_t,s0)
+
+/usr/bin/zookeeper-client -- gen_context(system_u:object_r:zookeeper_exec_t,s0)
+/usr/bin/zookeeper-server -- gen_context(system_u:object_r:zookeeper_server_exec_t,s0)
+
+/var/zookeeper(/.*)? gen_context(system_u:object_r:zookeeper_server_var_t,s0)
+/var/lib/zookeeper(/.*)? gen_context(system_u:object_r:zookeeper_server_var_t,s0)
+
+/var/lib/hadoop(.*)? gen_context(system_u:object_r:hadoop_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/data(/.*)? gen_context(system_u:object_r:hadoop_datanode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/name(/.*)? gen_context(system_u:object_r:hadoop_namenode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/namesecondary(/.*)? gen_context(system_u:object_r:hadoop_secondarynamenode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/jobTracker(/.*)? gen_context(system_u:object_r:hadoop_jobtracker_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/taskTracker(/.*)? gen_context(system_u:object_r:hadoop_tasktracker_var_lib_t,s0)
+
+/var/lock/subsys/hadoop-datanode -- gen_context(system_u:object_r:hadoop_datanode_lock_t,s0)
+/var/lock/subsys/hadoop-namenode -- gen_context(system_u:object_r:hadoop_namenode_lock_t,s0)
+/var/lock/subsys/hadoop-jobtracker -- gen_context(system_u:object_r:hadoop_jobtracker_lock_t,s0)
+/var/lock/subsys/hadoop-tasktracker -- gen_context(system_u:object_r:hadoop_tasktracker_lock_t,s0)
+/var/lock/subsys/hadoop-secondarynamenode -- gen_context(system_u:object_r:hadoop_secondarynamenode_lock_t,s0)
+
+/var/log/hadoop(.*)? gen_context(system_u:object_r:hadoop_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-datanode-(.*)? gen_context(system_u:object_r:hadoop_datanode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-jobtracker-(.*)? gen_context(system_u:object_r:hadoop_jobtracker_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-namenode-(.*)? gen_context(system_u:object_r:hadoop_namenode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-secondarynamenode-(.*)? gen_context(system_u:object_r:hadoop_secondarynamenode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-tasktracker-(.*)? gen_context(system_u:object_r:hadoop_tasktracker_log_t,s0)
+/var/log/hadoop(.*)?/history(/.*)? gen_context(system_u:object_r:hadoop_jobtracker_log_t,s0)
+/var/log/zookeeper(/.*)? gen_context(system_u:object_r:zookeeper_log_t,s0)
+
+/var/run/hadoop(.*)? -d gen_context(system_u:object_r:hadoop_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-datanode.pid -- gen_context(system_u:object_r:hadoop_datanode_initrc_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-namenode.pid -- gen_context(system_u:object_r:hadoop_namenode_initrc_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-jobtracker.pid -- gen_context(system_u:object_r:hadoop_jobtracker_initrc_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-tasktracker.pid -- gen_context(system_u:object_r:hadoop_tasktracker_initrc_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-secondarynamenode.pid -- gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_var_run_t,s0)
diff --git a/policy/modules/services/hadoop.if b/policy/modules/services/hadoop.if
new file mode 100644
index 0000000..33108a3
--- /dev/null
+++ b/policy/modules/services/hadoop.if
@@ -0,0 +1,358 @@
+## <summary>Software for reliable, scalable, distributed computing.</summary>
+
+#######################################
+## <summary>
+## The template to define a hadoop domain.
+## </summary>
+## <param name="domain_prefix">
+## <summary>
+## Domain prefix to be used.
+## </summary>
+## </param>
+#
+template(`hadoop_domain_template',`
+ gen_require(`
+ attribute hadoop_domain;
+ type hadoop_log_t, hadoop_var_lib_t, hadoop_var_run_t;
+ type hadoop_exec_t, hadoop_hsperfdata_t;
+ ')
+
+ ########################################
+ #
+ # Shared declarations.
+ #
+
+ type hadoop_$1_t, hadoop_domain;
+ domain_type(hadoop_$1_t)
+ domain_entry_file(hadoop_$1_t, hadoop_exec_t)
+
+ type hadoop_$1_initrc_t;
+ type hadoop_$1_initrc_exec_t;
+ init_script_domain(hadoop_$1_initrc_t, hadoop_$1_initrc_exec_t)
+
+ role system_r types { hadoop_$1_initrc_t hadoop_$1_t };
+
+ type hadoop_$1_lock_t;
+ files_lock_file(hadoop_$1_lock_t)
+ files_lock_filetrans(hadoop_$1_initrc_t, hadoop_$1_lock_t, file)
+
+ type hadoop_$1_log_t;
+ logging_log_file(hadoop_$1_log_t)
+ filetrans_pattern(hadoop_$1_initrc_t, hadoop_log_t, hadoop_$1_log_t, {dir file})
+ filetrans_pattern(hadoop_$1_t, hadoop_log_t, hadoop_$1_log_t, {dir file})
+
+ type hadoop_$1_var_lib_t;
+ files_type(hadoop_$1_var_lib_t)
+ filetrans_pattern(hadoop_$1_t, hadoop_var_lib_t, hadoop_$1_var_lib_t, file)
+
+ type hadoop_$1_initrc_var_run_t;
+ files_pid_file(hadoop_$1_initrc_var_run_t)
+ filetrans_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_$1_initrc_var_run_t, file)
+
+ type hadoop_$1_tmp_t;
+ files_tmp_file(hadoop_$1_tmp_t)
+ files_tmp_filetrans(hadoop_$1_t, hadoop_hsperfdata_t, dir)
+ filetrans_pattern(hadoop_$1_t, hadoop_hsperfdata_t, hadoop_$1_tmp_t, file)
+
+ ####################################
+ #
+ # Shared hadoop_$1 initrc policy.
+ #
+
+ allow hadoop_$1_initrc_t hadoop_$1_t:process { signal signull };
+ allow hadoop_$1_initrc_t self:capability { setuid setgid };
+ allow hadoop_$1_initrc_t self:fifo_file rw_fifo_file_perms;
+ allow hadoop_$1_initrc_t self:process setsched;
+
+ consoletype_exec(hadoop_$1_initrc_t)
+ corecmd_exec_bin(hadoop_$1_initrc_t)
+ corecmd_exec_shell(hadoop_$1_initrc_t)
+
+ domtrans_pattern(hadoop_$1_initrc_t, hadoop_exec_t, hadoop_$1_t)
+ dontaudit hadoop_$1_initrc_t self:capability sys_tty_config;
+
+ files_read_etc_files(hadoop_$1_initrc_t)
+ files_read_usr_files(hadoop_$1_initrc_t)
+ files_search_pids(hadoop_$1_initrc_t)
+ files_search_locks(hadoop_$1_initrc_t)
+ fs_getattr_xattr_fs(hadoop_$1_initrc_t)
+
+ hadoop_exec_config_files(hadoop_$1_initrc_t)
+
+ init_rw_utmp(hadoop_$1_initrc_t)
+ init_use_script_ptys(hadoop_$1_initrc_t)
+
+ kernel_read_kernel_sysctls(hadoop_$1_initrc_t)
+ kernel_read_sysctl(hadoop_$1_initrc_t)
+ kernel_read_system_state(hadoop_$1_initrc_t)
+
+ logging_send_syslog_msg(hadoop_$1_initrc_t)
+ logging_send_audit_msgs(hadoop_$1_initrc_t)
+ logging_search_logs(hadoop_$1_initrc_t)
+
+ manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_lock_t, hadoop_$1_lock_t)
+ manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_initrc_var_run_t, hadoop_$1_initrc_var_run_t)
+ manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
+ manage_dirs_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_var_run_t)
+ manage_files_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_var_run_t)
+
+ miscfiles_read_localization(hadoop_$1_initrc_t)
+
+ optional_policy(`
+ nscd_socket_use(hadoop_$1_initrc_t)
+ ')
+
+ term_use_generic_ptys(hadoop_$1_initrc_t)
+
+ userdom_dontaudit_search_user_home_dirs(hadoop_$1_initrc_t)
+
+ # This can be removed on anything post-el5
+ libs_use_ld_so(hadoop_$1_initrc_t)
+ libs_use_shared_libs(hadoop_$1_initrc_t)
+
+ ####################################
+ #
+ # Shared hadoop_$1 policy.
+ #
+
+ allow hadoop_$1_t hadoop_domain:process signull;
+ allow hadoop_$1_t self:fifo_file rw_fifo_file_perms;
+ allow hadoop_$1_t self:process execmem;
+ allow hadoop_$1_t hadoop_var_run_t:dir getattr;
+
+ corecmd_exec_bin(hadoop_$1_t)
+ corecmd_exec_shell(hadoop_$1_t)
+
+ dev_read_rand(hadoop_$1_t)
+ dev_read_urand(hadoop_$1_t)
+ dev_read_sysfs(hadoop_$1_t)
+ dontaudit hadoop_$1_t self:netlink_route_socket rw_netlink_socket_perms;
+
+ files_read_etc_files(hadoop_$1_t)
+ files_search_pids(hadoop_$1_t)
+ files_search_var_lib(hadoop_$1_t)
+
+ hadoop_exec_config_files(hadoop_$1_t)
+
+ java_exec(hadoop_$1_t)
+
+ kernel_read_network_state(hadoop_$1_t)
+ kernel_read_system_state(hadoop_$1_t)
+
+ logging_search_logs(hadoop_$1_t)
+
+ manage_dirs_pattern(hadoop_$1_t, hadoop_$1_var_lib_t, hadoop_$1_var_lib_t)
+ manage_dirs_pattern(hadoop_$1_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
+ manage_files_pattern(hadoop_$1_t, hadoop_$1_log_t, hadoop_$1_log_t)
+ manage_files_pattern(hadoop_$1_t, hadoop_$1_var_lib_t, hadoop_$1_var_lib_t)
+ manage_files_pattern(hadoop_$1_t, hadoop_$1_tmp_t, hadoop_$1_tmp_t)
+ miscfiles_read_localization(hadoop_$1_t)
+
+ optional_policy(`
+ nscd_socket_use(hadoop_$1_t)
+ ')
+
+ sysnet_read_config(hadoop_$1_t)
+
+ allow hadoop_$1_t self:tcp_socket create_stream_socket_perms;
+ corenet_all_recvfrom_unlabeled(hadoop_$1_t)
+ corenet_all_recvfrom_netlabel(hadoop_$1_t)
+ corenet_tcp_bind_all_nodes(hadoop_$1_t)
+ corenet_tcp_sendrecv_generic_if(hadoop_$1_t)
+ corenet_tcp_sendrecv_generic_node(hadoop_$1_t)
+ corenet_tcp_sendrecv_all_ports(hadoop_$1_t)
+ corenet_tcp_connect_generic_port(hadoop_$1_t)
+
+ allow hadoop_$1_t self:udp_socket create_socket_perms;
+ corenet_udp_sendrecv_generic_if(hadoop_$1_t)
+ corenet_udp_sendrecv_all_nodes(hadoop_$1_t)
+ corenet_udp_bind_all_nodes(hadoop_$1_t)
+
+ # This can be removed on anything post-el5
+ libs_use_ld_so(hadoop_$1_t)
+ libs_use_shared_libs(hadoop_$1_t)
+')
+
+########################################
+## <summary>
+## Execute hadoop in the
+## hadoop domain.
+## </summary>
+## <param name="domain">
+## <summary>
+## Domain allowed to transition.
+## </summary>
+## </param>
+#
+interface(`hadoop_domtrans',`
+ gen_require(`
+ type hadoop_t, hadoop_exec_t;
+ ')
+
+ files_search_usr($1)
+ libs_search_lib($1)
+ domtrans_pattern($1, hadoop_exec_t, hadoop_t)
+')
+
+########################################
+## <summary>
+## Execute hadoop in the hadoop domain,
+## and allow the specified role the
+## hadoop domain.
+## </summary>
+## <param name="domain">
+## <summary>
+## Domain allowed to transition.
+## </summary>
+## </param>
+## <param name="role">
+## <summary>
+## Role allowed access.
+## </summary>
+## </param>
+## <rolecap/>
+#
+interface(`hadoop_run',`
+ gen_require(`
+ type hadoop_t;
+ ')
+
+ hadoop_domtrans($1)
+ role $2 types hadoop_t;
+
+ allow $1 hadoop_t:process { ptrace signal_perms };
+ ps_process_pattern($1, hadoop_t)
+')
+
+########################################
+## <summary>
+## Execute zookeeper client in the
+## zookeeper client domain.
+## </summary>
+## <param name="domain">
+## <summary>
+## Domain allowed to transition.
+## </summary>
+## </param>
+#
+interface(`zookeeper_domtrans_client',`
+ gen_require(`
+ type zookeeper_t, zookeeper_exec_t;
+ ')
+
+ corecmd_search_bin($1)
+ files_search_usr($1)
+ domtrans_pattern($1, zookeeper_exec_t, zookeeper_t)
+')
+
+########################################
+## <summary>
+## Execute zookeeper server in the
+## zookeeper server domain.
+## </summary>
+## <param name="domain">
+## <summary>
+## Domain allowed to transition.
+## </summary>
+## </param>
+#
+interface(`zookeeper_domtrans_server',`
+ gen_require(`
+ type zookeeper_server_t, zookeeper_server_exec_t;
+ ')
+
+ corecmd_search_bin($1)
+ files_search_usr($1)
+ domtrans_pattern($1, zookeeper_server_exec_t, zookeeper_server_t)
+')
+
+########################################
+## <summary>
+## Execute zookeeper server in the
+## zookeeper domain.
+## </summary>
+## <param name="domain">
+## <summary>
+## Domain allowed to transition.
+## </summary>
+## </param>
+#
+interface(`zookeeper_initrc_domtrans_server',`
+ gen_require(`
+ type zookeeper_server_initrc_exec_t;
+ ')
+
+ init_labeled_script_domtrans($1, zookeeper_server_initrc_exec_t)
+')
+
+########################################
+## <summary>
+## Execute zookeeper client in the
+## zookeeper client domain, and allow the
+## specified role the zookeeper client domain.
+## </summary>
+## <param name="domain">
+## <summary>
+## Domain allowed to transition.
+## </summary>
+## </param>
+## <param name="role">
+## <summary>
+## Role allowed access.
+## </summary>
+## </param>
+## <rolecap/>
+#
+interface(`zookeeper_run_client',`
+ gen_require(`
+ type zookeeper_t;
+ ')
+
+ zookeeper_domtrans_client($1)
+ role $2 types zookeeper_t;
+
+ allow $1 zookeeper_t:process { ptrace signal_perms };
+ ps_process_pattern($1, zookeeper_t)
+')
+
+########################################
+## <summary>
+## Give permission to a domain to read
+## hadoop_etc_t
+## </summary>
+## <param name="domain">
+## <summary>
+## Domain needing read permission
+## </summary>
+## </param>
+#
+interface(`hadoop_read_config_files', `
+ gen_require(`
+ type hadoop_etc_t;
+ ')
+
+ allow $1 hadoop_etc_t:dir search_dir_perms;
+ allow $1 hadoop_etc_t:lnk_file { read getattr };
+ allow $1 hadoop_etc_t:file read_file_perms;
+')
+
+########################################
+## <summary>
+## Give permission to a domain to
+## execute hadoop_etc_t
+## </summary>
+## <param name="domain">
+## <summary>
+## Domain needing read and execute
+## permission
+## </summary>
+## </param>
+#
+interface(`hadoop_exec_config_files', `
+ gen_require(`
+ type hadoop_etc_t;
+ ')
+
+ hadoop_read_config_files($1)
+ allow $1 hadoop_etc_t:file { execute execute_no_trans};
+')
diff --git a/policy/modules/services/hadoop.te b/policy/modules/services/hadoop.te
new file mode 100644
index 0000000..519aebb
--- /dev/null
+++ b/policy/modules/services/hadoop.te
@@ -0,0 +1,380 @@
+policy_module(hadoop, 1.0.0)
+
+########################################
+#
+# Hadoop declarations.
+#
+
+attribute hadoop_domain;
+
+type hadoop_t;
+type hadoop_exec_t;
+application_domain(hadoop_t, hadoop_exec_t)
+ubac_constrained(hadoop_t)
+
+type hadoop_etc_t;
+files_config_file(hadoop_etc_t)
+
+type hadoop_var_lib_t;
+files_type(hadoop_var_lib_t)
+
+type hadoop_log_t;
+logging_log_file(hadoop_log_t)
+
+type hadoop_var_run_t;
+files_pid_file(hadoop_var_run_t)
+
+type hadoop_tmp_t;
+files_tmp_file(hadoop_tmp_t)
+ubac_constrained(hadoop_tmp_t)
+
+type hadoop_hsperfdata_t;
+files_tmp_file(hadoop_hsperfdata_t)
+ubac_constrained(hadoop_hsperfdata_t)
+
+hadoop_domain_template(datanode)
+hadoop_domain_template(jobtracker)
+hadoop_domain_template(namenode)
+hadoop_domain_template(secondarynamenode)
+hadoop_domain_template(tasktracker)
+
+########################################
+#
+# Hadoop zookeeper client declarations.
+#
+
+type zookeeper_t;
+type zookeeper_exec_t;
+application_domain(zookeeper_t, zookeeper_exec_t)
+ubac_constrained(zookeeper_t)
+
+type zookeeper_etc_t;
+files_config_file(zookeeper_etc_t)
+
+type zookeeper_log_t;
+logging_log_file(zookeeper_log_t)
+
+type zookeeper_tmp_t;
+files_tmp_file(zookeeper_tmp_t)
+ubac_constrained(zookeeper_tmp_t)
+
+########################################
+#
+# Hadoop zookeeper server declarations.
+#
+
+type zookeeper_server_t;
+type zookeeper_server_exec_t;
+init_daemon_domain(zookeeper_server_t, zookeeper_server_exec_t)
+
+type zookeeper_server_initrc_exec_t;
+init_script_file(zookeeper_server_initrc_exec_t)
+
+type zookeeper_server_var_t;
+files_type(zookeeper_server_var_t)
+
+# This will need a file context specification.
+type zookeeper_server_var_run_t;
+files_pid_file(zookeeper_server_var_run_t)
+
+type zookeeper_server_tmp_t;
+files_tmp_file(zookeeper_server_tmp_t)
+
+########################################
+#
+# Hadoop policy.
+#
+
+allow hadoop_t self:capability sys_resource;
+allow hadoop_t self:process { getsched setsched signal signull setrlimit execmem };
+allow hadoop_t self:fifo_file rw_fifo_file_perms;
+allow hadoop_t self:key write;
+allow hadoop_t self:tcp_socket create_stream_socket_perms;
+allow hadoop_t self:udp_socket create_socket_perms;
+allow hadoop_t hadoop_domain:process signull;
+
+dontaudit hadoop_t self:netlink_route_socket rw_netlink_socket_perms;
+
+read_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
+read_lnk_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
+can_exec(hadoop_t, hadoop_etc_t)
+
+manage_dirs_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
+manage_dirs_pattern(hadoop_t, hadoop_log_t, hadoop_log_t)
+manage_dirs_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
+manage_dirs_pattern(hadoop_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
+manage_files_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
+manage_files_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
+
+getattr_dirs_pattern(hadoop_t, hadoop_var_run_t, hadoop_var_run_t)
+
+files_tmp_filetrans(hadoop_t, hadoop_hsperfdata_t, dir)
+filetrans_pattern(hadoop_t, hadoop_hsperfdata_t, hadoop_tmp_t, file)
+
+kernel_read_network_state(hadoop_t)
+kernel_read_system_state(hadoop_t)
+
+corecmd_exec_bin(hadoop_t)
+corecmd_exec_shell(hadoop_t)
+
+corenet_all_recvfrom_unlabeled(hadoop_t)
+corenet_all_recvfrom_netlabel(hadoop_t)
+corenet_sendrecv_hadoop_namenode_client_packets(hadoop_t)
+corenet_sendrecv_portmap_client_packets(hadoop_t)
+corenet_sendrecv_zope_client_packets(hadoop_t)
+corenet_tcp_bind_all_nodes(hadoop_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_t)
+corenet_tcp_connect_portmap_port(hadoop_t)
+corenet_tcp_connect_zope_port(hadoop_t)
+corenet_tcp_sendrecv_all_nodes(hadoop_t)
+corenet_tcp_sendrecv_all_ports(hadoop_t)
+corenet_tcp_sendrecv_generic_if(hadoop_t)
+corenet_tcp_connect_generic_port(hadoop_t)
+corenet_udp_bind_all_nodes(hadoop_t)
+corenet_udp_sendrecv_all_nodes(hadoop_t)
+corenet_udp_sendrecv_all_ports(hadoop_t)
+corenet_udp_sendrecv_generic_if(hadoop_t)
+
+dev_read_rand(hadoop_t)
+dev_read_sysfs(hadoop_t)
+dev_read_urand(hadoop_t)
+
+files_dontaudit_search_spool(hadoop_t)
+files_read_usr_files(hadoop_t)
+files_read_all_files(hadoop_t)
+
+fs_getattr_xattr_fs(hadoop_t)
+
+java_exec(hadoop_t)
+
+# This can be removed on anything post-el5
+libs_use_ld_so(hadoop_t)
+libs_use_shared_libs(hadoop_t)
+
+miscfiles_read_localization(hadoop_t)
+
+userdom_dontaudit_search_user_home_dirs(hadoop_t)
+userdom_use_user_terminals(hadoop_t)
+
+optional_policy(`
+ nis_use_ypbind(hadoop_t)
+')
+
+optional_policy(`
+ nscd_socket_use(hadoop_t)
+')
+
+########################################
+#
+# Hadoop datanode policy.
+#
+
+allow hadoop_datanode_t self:process signal;
+corenet_tcp_connect_hadoop_namenode_port(hadoop_datanode_t)
+fs_getattr_xattr_fs(hadoop_datanode_t)
+manage_dirs_pattern(hadoop_datanode_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+########################################
+#
+# Hadoop jobtracker policy.
+#
+
+corenet_tcp_bind_zope_port(hadoop_jobtracker_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_jobtracker_t)
+create_dirs_pattern(hadoop_jobtracker_t, hadoop_jobtracker_log_t, hadoop_jobtracker_log_t)
+manage_dirs_pattern(hadoop_jobtracker_t, hadoop_var_lib_t, hadoop_var_lib_t)
+setattr_dirs_pattern(hadoop_jobtracker_t, hadoop_jobtracker_log_t, hadoop_jobtracker_log_t)
+
+########################################
+#
+# Hadoop namenode policy.
+#
+
+corenet_tcp_bind_hadoop_namenode_port(hadoop_namenode_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_namenode_t)
+manage_dirs_pattern(hadoop_namenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
+manage_files_pattern(hadoop_namenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+########################################
+#
+# Hadoop secondary namenode policy.
+#
+
+corenet_tcp_connect_hadoop_namenode_port(hadoop_secondarynamenode_t)
+manage_dirs_pattern(hadoop_secondarynamenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+########################################
+#
+# Hadoop tasktracker policy.
+#
+
+allow hadoop_tasktracker_t self:process signal;
+
+corenet_tcp_connect_zope_port(hadoop_tasktracker_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_tasktracker_t)
+
+filetrans_pattern(hadoop_tasktracker_t, hadoop_log_t, hadoop_tasktracker_log_t, dir)
+fs_getattr_xattr_fs(hadoop_tasktracker_t)
+fs_associate(hadoop_tasktracker_t)
+
+manage_dirs_pattern(hadoop_tasktracker_t, hadoop_var_lib_t, hadoop_var_lib_t)
+manage_dirs_pattern(hadoop_tasktracker_t, hadoop_tasktracker_log_t, hadoop_tasktracker_log_t);
+
+########################################
+#
+# Hadoop zookeeper client policy.
+#
+
+allow zookeeper_t self:process { getsched sigkill signal signull execmem };
+allow zookeeper_t self:fifo_file rw_fifo_file_perms;
+allow zookeeper_t self:tcp_socket create_stream_socket_perms;
+allow zookeeper_t self:udp_socket create_socket_perms;
+allow zookeeper_t zookeeper_server_t:process signull;
+
+read_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
+read_lnk_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
+
+setattr_dirs_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+append_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+create_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+read_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+setattr_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+logging_log_filetrans(zookeeper_t, zookeeper_log_t, file)
+
+manage_dirs_pattern(zookeeper_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
+manage_files_pattern(zookeeper_t, zookeeper_tmp_t, zookeeper_tmp_t)
+files_tmp_filetrans(zookeeper_t, hadoop_hsperfdata_t, dir)
+filetrans_pattern(zookeeper_t, hadoop_hsperfdata_t, zookeeper_tmp_t, file)
+
+can_exec(zookeeper_t, zookeeper_exec_t)
+
+kernel_read_network_state(zookeeper_t)
+kernel_read_system_state(zookeeper_t)
+
+corecmd_exec_bin(zookeeper_t)
+corecmd_exec_shell(zookeeper_t)
+
+dontaudit zookeeper_t self:netlink_route_socket rw_netlink_socket_perms;
+
+corenet_all_recvfrom_unlabeled(zookeeper_t)
+corenet_all_recvfrom_netlabel(zookeeper_t)
+corenet_sendrecv_zookeeper_client_client_packets(zookeeper_t)
+corenet_tcp_bind_all_nodes(zookeeper_t)
+corenet_tcp_connect_zookeeper_client_port(zookeeper_t)
+corenet_tcp_sendrecv_all_nodes(zookeeper_t)
+corenet_tcp_sendrecv_all_ports(zookeeper_t)
+corenet_tcp_sendrecv_generic_if(zookeeper_t)
+corenet_tcp_connect_generic_port(zookeeper_t)
+corenet_udp_bind_all_nodes(zookeeper_t)
+corenet_udp_sendrecv_all_nodes(zookeeper_t)
+corenet_udp_sendrecv_all_ports(zookeeper_t)
+corenet_udp_sendrecv_generic_if(zookeeper_t)
+
+dev_read_rand(zookeeper_t)
+dev_read_sysfs(zookeeper_t)
+dev_read_urand(zookeeper_t)
+
+files_read_etc_files(zookeeper_t)
+files_read_usr_files(zookeeper_t)
+
+# This can be removed on anything post-el5
+libs_use_ld_so(zookeeper_t)
+libs_use_shared_libs(zookeeper_t)
+
+miscfiles_read_localization(zookeeper_t)
+
+sysnet_read_config(zookeeper_t)
+
+userdom_dontaudit_search_user_home_dirs(zookeeper_t)
+userdom_use_user_terminals(zookeeper_t)
+
+java_exec(zookeeper_t)
+
+optional_policy(`
+ nscd_socket_use(zookeeper_t)
+')
+
+########################################
+#
+# Hadoop zookeeper server policy.
+#
+
+allow zookeeper_server_t self:capability kill;
+allow zookeeper_server_t self:process { execmem getsched sigkill signal signull };
+allow zookeeper_server_t self:fifo_file rw_fifo_file_perms;
+allow zookeeper_server_t self:netlink_route_socket rw_netlink_socket_perms;
+allow zookeeper_server_t self:tcp_socket create_stream_socket_perms;
+allow zookeeper_server_t self:udp_socket create_socket_perms;
+
+read_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
+read_lnk_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
+
+manage_dirs_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
+manage_files_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
+files_var_lib_filetrans(zookeeper_server_t, zookeeper_server_var_t, { dir file })
+
+setattr_dirs_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+append_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+create_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+read_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+setattr_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+logging_log_filetrans(zookeeper_server_t, zookeeper_log_t, file)
+
+manage_files_pattern(zookeeper_server_t, zookeeper_server_var_run_t, zookeeper_server_var_run_t)
+files_pid_filetrans(zookeeper_server_t, zookeeper_server_var_run_t, file)
+
+manage_dirs_pattern(zookeeper_server_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
+manage_files_pattern(zookeeper_server_t, zookeeper_server_tmp_t, zookeeper_server_tmp_t)
+files_tmp_filetrans(zookeeper_server_t, hadoop_hsperfdata_t, dir)
+filetrans_pattern(zookeeper_server_t, hadoop_hsperfdata_t, zookeeper_server_tmp_t, file)
+
+can_exec(zookeeper_server_t, zookeeper_server_exec_t)
+
+kernel_read_network_state(zookeeper_server_t)
+kernel_read_system_state(zookeeper_server_t)
+
+corecmd_exec_bin(zookeeper_server_t)
+corecmd_exec_shell(zookeeper_server_t)
+
+corenet_all_recvfrom_unlabeled(zookeeper_server_t)
+corenet_all_recvfrom_netlabel(zookeeper_server_t)
+corenet_sendrecv_zookeeper_election_client_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_leader_client_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_client_server_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_election_server_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_leader_server_packets(zookeeper_server_t)
+corenet_tcp_bind_all_nodes(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_client_port(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_election_port(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_leader_port(zookeeper_server_t)
+corenet_tcp_connect_zookeeper_election_port(zookeeper_server_t)
+corenet_tcp_connect_zookeeper_leader_port(zookeeper_server_t)
+corenet_tcp_sendrecv_generic_if(zookeeper_server_t)
+corenet_tcp_sendrecv_generic_node(zookeeper_server_t)
+corenet_tcp_sendrecv_all_ports(zookeeper_server_t)
+corenet_tcp_connect_generic_port(zookeeper_server_t)
+corenet_udp_sendrecv_generic_if(zookeeper_server_t)
+corenet_udp_sendrecv_all_nodes(zookeeper_server_t)
+corenet_udp_sendrecv_all_ports(zookeeper_server_t)
+corenet_udp_bind_all_nodes(zookeeper_server_t)
+
+dev_read_rand(zookeeper_server_t)
+dev_read_sysfs(zookeeper_server_t)
+dev_read_urand(zookeeper_server_t)
+
+files_read_etc_files(zookeeper_server_t)
+files_read_usr_files(zookeeper_server_t)
+
+fs_getattr_xattr_fs(zookeeper_server_t)
+
+# This can be removed on anything post-el5
+libs_use_ld_so(zookeeper_server_t)
+libs_use_shared_libs(zookeeper_server_t)
+
+logging_send_syslog_msg(zookeeper_server_t)
+
+miscfiles_read_localization(zookeeper_server_t)
+
+sysnet_read_config(zookeeper_server_t)
+
+java_exec(zookeeper_server_t)
diff --git a/policy/modules/system/unconfined.te b/policy/modules/system/unconfined.te
index f976344..ac27264 100644
--- a/policy/modules/system/unconfined.te
+++ b/policy/modules/system/unconfined.te
@@ -118,6 +118,10 @@ optional_policy(`
')
optional_policy(`
+ hadoop_run(unconfined_t, unconfined_r)
+')
+
+optional_policy(`
inn_domtrans(unconfined_t)
')
@@ -210,6 +214,10 @@ optional_policy(`
xserver_domtrans(unconfined_t)
')
+optional_policy(`
+ zookeeper_run_client(unconfined_t, unconfined_r)
+')
+
########################################
#
# Unconfined Execmem Local policy
On 10/04/10 13:15, Paul Nuzzi wrote:
> On 10/01/2010 01:56 PM, Christopher J. PeBenito wrote:
>> On 10/01/10 11:17, Paul Nuzzi wrote:
>>> On 10/01/2010 08:02 AM, Dominick Grift wrote:
>>>> On Thu, Sep 30, 2010 at 03:39:40PM -0400, Paul Nuzzi wrote:
>>>>> I updated the patch based on recommendations from the mailing list.
>>>>> All of hadoop's services are included in one module instead of
>>>>> individual ones. Unconfined and sysadm roles are given access to
>>>>> hadoop and zookeeper client domain transitions. The services are started
>>>>> using run_init. Let me know what you think.
>>>>
>>>> Why do some hadoop domain need to manage generic tmp?
>>>>
>>>> files_manage_generic_tmp_dirs(zookeeper_t)
>>>> files_manage_generic_tmp_dirs(hadoop_t)
>>>> files_manage_generic_tmp_dirs(hadoop_$1_initrc_t)
>>>> files_manage_generic_tmp_files(hadoop_$1_initrc_t)
>>>> files_manage_generic_tmp_files(hadoop_$1_t)
>>>> files_manage_generic_tmp_dirs(hadoop_$1_t)
>>>
>>> This has to be done for Java JMX to work. All of the files are written to
>>> /tmp/hsperfdata_(hadoop/zookeeper). /tmp/hsperfdata_ is labeled tmp_t while
>>> all the files for each service are labeled with hadoop_*_tmp_t. The first service
>>> will end up owning the directory if it is not labeled tmp_t.
>>
>> The hsperfdata dir in /tmp certainly the bane of policy writers. Based on a quick look through the policy, it looks like the only dir they create in /tmp is this hsperfdata dir. I suggest you do something like
>>
>> files_tmp_filetrans(hadoop_t, hadoop_hsperfdata_t, dir)
>> files_tmp_filetrans(zookeeper_t, hadoop_hsperfdata_t, dir)
>>
>> filetrans_pattern(hadoop_t, hadoop_hsperfdata_t, hadoop_tmp_t, file)
>> filetrans_pattern(zookeeper_t, hadoop_hsperfdata_t, zookeeper_tmp_t, file)
>>
>
> That looks like a better way to handle the tmp_t problem.
>
> I changed the patch with your comments. Hopefully this will be one of the last updates.
> Tested on a CDH3 cluster as a module without any problems.
There are several little issues with style, but it'll be easier just to
fix them when its committed.
Other comments inline.
> diff --git a/policy/modules/kernel/corenetwork.te.in b/policy/modules/kernel/corenetwork.te.in
> index 2ecdde8..7a1b5de 100644
> --- a/policy/modules/kernel/corenetwork.te.in
> +++ b/policy/modules/kernel/corenetwork.te.in
> @@ -105,6 +105,7 @@ network_port(giftd, tcp,1213,s0)
> network_port(git, tcp,9418,s0, udp,9418,s0)
> network_port(gopher, tcp,70,s0, udp,70,s0)
> network_port(gpsd, tcp,2947,s0)
> +network_port(hadoop_namenode, tcp, 8020,s0)
It seems like it would be sufficient to call it "hadoop".
> network_port(hddtemp, tcp,7634,s0)
> network_port(howl, tcp,5335,s0, udp,5353,s0)
> network_port(hplip, tcp,1782,s0, tcp,2207,s0, tcp,2208,s0, tcp, 8290,s0, tcp,50000,s0, tcp,50002,s0, tcp,8292,s0, tcp,9100,s0, tcp,9101,s0, tcp,9102,s0, tcp,9220,s0, tcp,9221,s0, tcp,9222,s0, tcp,9280,s0, tcp,9281,s0, tcp,9282,s0, tcp,9290,s0, tcp,9291,s0, tcp,9292,s0)
> @@ -211,6 +212,9 @@ network_port(xdmcp, udp,177,s0, tcp,177,s0)
> network_port(xen, tcp,8002,s0)
> network_port(xfs, tcp,7100,s0)
> network_port(xserver, tcp,6000-6020,s0)
> +network_port(zookeeper_client, tcp, 2181,s0)
> +network_port(zookeeper_election, tcp, 3888,s0)
> +network_port(zookeeper_leader, tcp, 2888,s0)
> network_port(zebra, tcp,2600-2604,s0, tcp,2606,s0, udp,2600-2604,s0, udp,2606,s0)
> network_port(zope, tcp,8021,s0)
>
> diff --git a/policy/modules/roles/sysadm.te b/policy/modules/roles/sysadm.te
> index cad05ff..b46b28b 100644
> --- a/policy/modules/roles/sysadm.te
> +++ b/policy/modules/roles/sysadm.te
> @@ -152,6 +152,10 @@ optional_policy(`
> ')
>
> optional_policy(`
> + hadoop_run(sysadm_t, sysadm_r)
> +')
> +optional_policy(`
> # allow system administrator to use the ipsec script to look
> # at things (e.g., ipsec auto --status)
> # probably should create an ipsec_admin role for this kind of thing
> @@ -392,6 +396,10 @@ optional_policy(`
> yam_run(sysadm_t, sysadm_r)
> ')
>
> +optional_policy(`
> + zookeeper_run_client(sysadm_t, sysadm_r)
> +')
> +
> ifndef(`distro_redhat',`
> optional_policy(`
> auth_role(sysadm_r, sysadm_t)
> diff --git a/policy/modules/services/hadoop.fc b/policy/modules/services/hadoop.fc
> new file mode 100644
> index 0000000..15c61ed
> --- /dev/null
> +++ b/policy/modules/services/hadoop.fc
> @@ -0,0 +1,54 @@
> +/etc/hadoop.*(/.*)? gen_context(system_u:object_r:hadoop_etc_t,s0)
> +
> +/etc/rc\.d/init\.d/hadoop-(.*)?-datanode -- gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
> +/etc/rc\.d/init\.d/hadoop-(.*)?-jobtracker -- gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
> +/etc/rc\.d/init\.d/hadoop-(.*)?-namenode -- gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
> +/etc/rc\.d/init\.d/hadoop-(.*)?-secondarynamenode -- gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
> +/etc/rc\.d/init\.d/hadoop-(.*)?-tasktracker -- gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
> +/etc/rc\.d/init\.d/hadoop-zookeeper -- gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
> +/etc/init\.d/hadoop-datanode -- gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
> +/etc/init\.d/hadoop-jobtracker -- gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
> +/etc/init\.d/hadoop-namenode -- gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
> +/etc/init\.d/hadoop-secondarynamenode -- gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
> +/etc/init\.d/hadoop-tasktracker -- gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
> +/etc/init\.d/zookeeper -- gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
> +
> +/etc/zookeeper(/.*)? gen_context(system_u:object_r:zookeeper_etc_t,s0)
> +/etc/zookeeper\.dist(/.*)? gen_context(system_u:object_r:zookeeper_etc_t,s0)
> +
> +/usr/lib/hadoop(.*)?/bin/hadoop -- gen_context(system_u:object_r:hadoop_exec_t,s0)
> +
> +/usr/bin/zookeeper-client -- gen_context(system_u:object_r:zookeeper_exec_t,s0)
> +/usr/bin/zookeeper-server -- gen_context(system_u:object_r:zookeeper_server_exec_t,s0)
> +
> +/var/zookeeper(/.*)? gen_context(system_u:object_r:zookeeper_server_var_t,s0)
> +/var/lib/zookeeper(/.*)? gen_context(system_u:object_r:zookeeper_server_var_t,s0)
> +
> +/var/lib/hadoop(.*)? gen_context(system_u:object_r:hadoop_var_lib_t,s0)
> +/var/lib/hadoop(.*)?/cache/hadoop/dfs/data(/.*)? gen_context(system_u:object_r:hadoop_datanode_var_lib_t,s0)
> +/var/lib/hadoop(.*)?/cache/hadoop/dfs/name(/.*)? gen_context(system_u:object_r:hadoop_namenode_var_lib_t,s0)
> +/var/lib/hadoop(.*)?/cache/hadoop/dfs/namesecondary(/.*)? gen_context(system_u:object_r:hadoop_secondarynamenode_var_lib_t,s0)
> +/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/jobTracker(/.*)? gen_context(system_u:object_r:hadoop_jobtracker_var_lib_t,s0)
> +/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/taskTracker(/.*)? gen_context(system_u:object_r:hadoop_tasktracker_var_lib_t,s0)
> +
> +/var/lock/subsys/hadoop-datanode -- gen_context(system_u:object_r:hadoop_datanode_lock_t,s0)
> +/var/lock/subsys/hadoop-namenode -- gen_context(system_u:object_r:hadoop_namenode_lock_t,s0)
> +/var/lock/subsys/hadoop-jobtracker -- gen_context(system_u:object_r:hadoop_jobtracker_lock_t,s0)
> +/var/lock/subsys/hadoop-tasktracker -- gen_context(system_u:object_r:hadoop_tasktracker_lock_t,s0)
> +/var/lock/subsys/hadoop-secondarynamenode -- gen_context(system_u:object_r:hadoop_secondarynamenode_lock_t,s0)
> +
> +/var/log/hadoop(.*)? gen_context(system_u:object_r:hadoop_log_t,s0)
> +/var/log/hadoop(.*)?/hadoop-hadoop-datanode-(.*)? gen_context(system_u:object_r:hadoop_datanode_log_t,s0)
> +/var/log/hadoop(.*)?/hadoop-hadoop-jobtracker-(.*)? gen_context(system_u:object_r:hadoop_jobtracker_log_t,s0)
> +/var/log/hadoop(.*)?/hadoop-hadoop-namenode-(.*)? gen_context(system_u:object_r:hadoop_namenode_log_t,s0)
> +/var/log/hadoop(.*)?/hadoop-hadoop-secondarynamenode-(.*)? gen_context(system_u:object_r:hadoop_secondarynamenode_log_t,s0)
> +/var/log/hadoop(.*)?/hadoop-hadoop-tasktracker-(.*)? gen_context(system_u:object_r:hadoop_tasktracker_log_t,s0)
> +/var/log/hadoop(.*)?/history(/.*)? gen_context(system_u:object_r:hadoop_jobtracker_log_t,s0)
> +/var/log/zookeeper(/.*)? gen_context(system_u:object_r:zookeeper_log_t,s0)
> +
> +/var/run/hadoop(.*)? -d gen_context(system_u:object_r:hadoop_var_run_t,s0)
> +/var/run/hadoop(.*)?/hadoop-hadoop-datanode.pid -- gen_context(system_u:object_r:hadoop_datanode_initrc_var_run_t,s0)
> +/var/run/hadoop(.*)?/hadoop-hadoop-namenode.pid -- gen_context(system_u:object_r:hadoop_namenode_initrc_var_run_t,s0)
> +/var/run/hadoop(.*)?/hadoop-hadoop-jobtracker.pid -- gen_context(system_u:object_r:hadoop_jobtracker_initrc_var_run_t,s0)
> +/var/run/hadoop(.*)?/hadoop-hadoop-tasktracker.pid -- gen_context(system_u:object_r:hadoop_tasktracker_initrc_var_run_t,s0)
> +/var/run/hadoop(.*)?/hadoop-hadoop-secondarynamenode.pid -- gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_var_run_t,s0)
Missing some escaping on the periods: \.pid
> diff --git a/policy/modules/services/hadoop.if b/policy/modules/services/hadoop.if
> new file mode 100644
> index 0000000..33108a3
> --- /dev/null
> +++ b/policy/modules/services/hadoop.if
> @@ -0,0 +1,358 @@
> +##<summary>Software for reliable, scalable, distributed computing.</summary>
> +
> +#######################################
> +##<summary>
> +## The template to define a hadoop domain.
> +##</summary>
> +##<param name="domain_prefix">
> +## <summary>
> +## Domain prefix to be used.
> +## </summary>
> +##</param>
> +#
> +template(`hadoop_domain_template',`
> + gen_require(`
> + attribute hadoop_domain;
> + type hadoop_log_t, hadoop_var_lib_t, hadoop_var_run_t;
> + type hadoop_exec_t, hadoop_hsperfdata_t;
> + ')
> +
> + ########################################
> + #
> + # Shared declarations.
> + #
> +
> + type hadoop_$1_t, hadoop_domain;
> + domain_type(hadoop_$1_t)
> + domain_entry_file(hadoop_$1_t, hadoop_exec_t)
> +
> + type hadoop_$1_initrc_t;
> + type hadoop_$1_initrc_exec_t;
> + init_script_domain(hadoop_$1_initrc_t, hadoop_$1_initrc_exec_t)
> +
> + role system_r types { hadoop_$1_initrc_t hadoop_$1_t };
> +
> + type hadoop_$1_lock_t;
> + files_lock_file(hadoop_$1_lock_t)
> + files_lock_filetrans(hadoop_$1_initrc_t, hadoop_$1_lock_t, file)
> +
> + type hadoop_$1_log_t;
> + logging_log_file(hadoop_$1_log_t)
> + filetrans_pattern(hadoop_$1_initrc_t, hadoop_log_t, hadoop_$1_log_t, {dir file})
> + filetrans_pattern(hadoop_$1_t, hadoop_log_t, hadoop_$1_log_t, {dir file})
> +
> + type hadoop_$1_var_lib_t;
> + files_type(hadoop_$1_var_lib_t)
> + filetrans_pattern(hadoop_$1_t, hadoop_var_lib_t, hadoop_$1_var_lib_t, file)
> +
> + type hadoop_$1_initrc_var_run_t;
> + files_pid_file(hadoop_$1_initrc_var_run_t)
> + filetrans_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_$1_initrc_var_run_t, file)
> +
> + type hadoop_$1_tmp_t;
> + files_tmp_file(hadoop_$1_tmp_t)
> + files_tmp_filetrans(hadoop_$1_t, hadoop_hsperfdata_t, dir)
> + filetrans_pattern(hadoop_$1_t, hadoop_hsperfdata_t, hadoop_$1_tmp_t, file)
> +
> + ####################################
> + #
> + # Shared hadoop_$1 initrc policy.
> + #
> +
> + allow hadoop_$1_initrc_t hadoop_$1_t:process { signal signull };
> + allow hadoop_$1_initrc_t self:capability { setuid setgid };
> + allow hadoop_$1_initrc_t self:fifo_file rw_fifo_file_perms;
> + allow hadoop_$1_initrc_t self:process setsched;
> +
> + consoletype_exec(hadoop_$1_initrc_t)
> + corecmd_exec_bin(hadoop_$1_initrc_t)
> + corecmd_exec_shell(hadoop_$1_initrc_t)
> +
> + domtrans_pattern(hadoop_$1_initrc_t, hadoop_exec_t, hadoop_$1_t)
> + dontaudit hadoop_$1_initrc_t self:capability sys_tty_config;
> +
> + files_read_etc_files(hadoop_$1_initrc_t)
> + files_read_usr_files(hadoop_$1_initrc_t)
> + files_search_pids(hadoop_$1_initrc_t)
> + files_search_locks(hadoop_$1_initrc_t)
> + fs_getattr_xattr_fs(hadoop_$1_initrc_t)
> +
> + hadoop_exec_config_files(hadoop_$1_initrc_t)
> +
> + init_rw_utmp(hadoop_$1_initrc_t)
> + init_use_script_ptys(hadoop_$1_initrc_t)
> +
> + kernel_read_kernel_sysctls(hadoop_$1_initrc_t)
> + kernel_read_sysctl(hadoop_$1_initrc_t)
> + kernel_read_system_state(hadoop_$1_initrc_t)
> +
> + logging_send_syslog_msg(hadoop_$1_initrc_t)
> + logging_send_audit_msgs(hadoop_$1_initrc_t)
> + logging_search_logs(hadoop_$1_initrc_t)
> +
> + manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_lock_t, hadoop_$1_lock_t)
> + manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_initrc_var_run_t, hadoop_$1_initrc_var_run_t)
> + manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
> + manage_dirs_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_var_run_t)
> + manage_files_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_var_run_t)
> +
> + miscfiles_read_localization(hadoop_$1_initrc_t)
> +
> + optional_policy(`
> + nscd_socket_use(hadoop_$1_initrc_t)
> + ')
> +
> + term_use_generic_ptys(hadoop_$1_initrc_t)
> +
> + userdom_dontaudit_search_user_home_dirs(hadoop_$1_initrc_t)
> +
> + # This can be removed on anything post-el5
> + libs_use_ld_so(hadoop_$1_initrc_t)
> + libs_use_shared_libs(hadoop_$1_initrc_t)
Upstream handles shared libs appropriately, so this should be removed.
There are other instances of this in the patch that can be removed too.
> + ####################################
> + #
> + # Shared hadoop_$1 policy.
> + #
> +
> + allow hadoop_$1_t hadoop_domain:process signull;
> + allow hadoop_$1_t self:fifo_file rw_fifo_file_perms;
> + allow hadoop_$1_t self:process execmem;
> + allow hadoop_$1_t hadoop_var_run_t:dir getattr;
> +
> + corecmd_exec_bin(hadoop_$1_t)
> + corecmd_exec_shell(hadoop_$1_t)
> +
> + dev_read_rand(hadoop_$1_t)
> + dev_read_urand(hadoop_$1_t)
> + dev_read_sysfs(hadoop_$1_t)
> + dontaudit hadoop_$1_t self:netlink_route_socket rw_netlink_socket_perms;
> +
> + files_read_etc_files(hadoop_$1_t)
> + files_search_pids(hadoop_$1_t)
> + files_search_var_lib(hadoop_$1_t)
> +
> + hadoop_exec_config_files(hadoop_$1_t)
> +
> + java_exec(hadoop_$1_t)
> +
> + kernel_read_network_state(hadoop_$1_t)
> + kernel_read_system_state(hadoop_$1_t)
> +
> + logging_search_logs(hadoop_$1_t)
> +
> + manage_dirs_pattern(hadoop_$1_t, hadoop_$1_var_lib_t, hadoop_$1_var_lib_t)
> + manage_dirs_pattern(hadoop_$1_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
> + manage_files_pattern(hadoop_$1_t, hadoop_$1_log_t, hadoop_$1_log_t)
> + manage_files_pattern(hadoop_$1_t, hadoop_$1_var_lib_t, hadoop_$1_var_lib_t)
> + manage_files_pattern(hadoop_$1_t, hadoop_$1_tmp_t, hadoop_$1_tmp_t)
> + miscfiles_read_localization(hadoop_$1_t)
> +
> + optional_policy(`
> + nscd_socket_use(hadoop_$1_t)
> + ')
> +
> + sysnet_read_config(hadoop_$1_t)
> +
> + allow hadoop_$1_t self:tcp_socket create_stream_socket_perms;
> + corenet_all_recvfrom_unlabeled(hadoop_$1_t)
> + corenet_all_recvfrom_netlabel(hadoop_$1_t)
> + corenet_tcp_bind_all_nodes(hadoop_$1_t)
> + corenet_tcp_sendrecv_generic_if(hadoop_$1_t)
> + corenet_tcp_sendrecv_generic_node(hadoop_$1_t)
> + corenet_tcp_sendrecv_all_ports(hadoop_$1_t)
> + corenet_tcp_connect_generic_port(hadoop_$1_t)
This looks questionable. The port it connects to can't be identified?
> +
> + allow hadoop_$1_t self:udp_socket create_socket_perms;
> + corenet_udp_sendrecv_generic_if(hadoop_$1_t)
> + corenet_udp_sendrecv_all_nodes(hadoop_$1_t)
> + corenet_udp_bind_all_nodes(hadoop_$1_t)
> +
> + # This can be removed on anything post-el5
> + libs_use_ld_so(hadoop_$1_t)
> + libs_use_shared_libs(hadoop_$1_t)
> +')
> +
> +########################################
> +##<summary>
> +## Execute hadoop in the
> +## hadoop domain.
> +##</summary>
> +##<param name="domain">
> +## <summary>
> +## Domain allowed to transition.
> +## </summary>
> +##</param>
> +#
> +interface(`hadoop_domtrans',`
> + gen_require(`
> + type hadoop_t, hadoop_exec_t;
> + ')
> +
> + files_search_usr($1)
> + libs_search_lib($1)
> + domtrans_pattern($1, hadoop_exec_t, hadoop_t)
> +')
> +
> +########################################
> +##<summary>
> +## Execute hadoop in the hadoop domain,
> +## and allow the specified role the
> +## hadoop domain.
> +##</summary>
> +##<param name="domain">
> +## <summary>
> +## Domain allowed to transition.
> +## </summary>
> +##</param>
> +##<param name="role">
> +## <summary>
> +## Role allowed access.
> +## </summary>
> +##</param>
> +##<rolecap/>
> +#
> +interface(`hadoop_run',`
> + gen_require(`
> + type hadoop_t;
> + ')
> +
> + hadoop_domtrans($1)
> + role $2 types hadoop_t;
> +
> + allow $1 hadoop_t:process { ptrace signal_perms };
> + ps_process_pattern($1, hadoop_t)
> +')
> +
> +########################################
> +##<summary>
> +## Execute zookeeper client in the
> +## zookeeper client domain.
> +##</summary>
> +##<param name="domain">
> +## <summary>
> +## Domain allowed to transition.
> +## </summary>
> +##</param>
> +#
> +interface(`zookeeper_domtrans_client',`
The convention is to have the interface name first. So this should be
something like hadoop_domtrans_zookeper_client.
> + gen_require(`
> + type zookeeper_t, zookeeper_exec_t;
> + ')
> +
> + corecmd_search_bin($1)
> + files_search_usr($1)
> + domtrans_pattern($1, zookeeper_exec_t, zookeeper_t)
> +')
> +
> +########################################
> +##<summary>
> +## Execute zookeeper server in the
> +## zookeeper server domain.
> +##</summary>
> +##<param name="domain">
> +## <summary>
> +## Domain allowed to transition.
> +## </summary>
> +##</param>
> +#
> +interface(`zookeeper_domtrans_server',`
> + gen_require(`
> + type zookeeper_server_t, zookeeper_server_exec_t;
> + ')
> +
> + corecmd_search_bin($1)
> + files_search_usr($1)
> + domtrans_pattern($1, zookeeper_server_exec_t, zookeeper_server_t)
> +')
> +
> +########################################
> +##<summary>
> +## Execute zookeeper server in the
> +## zookeeper domain.
> +##</summary>
> +##<param name="domain">
> +## <summary>
> +## Domain allowed to transition.
> +## </summary>
> +##</param>
> +#
> +interface(`zookeeper_initrc_domtrans_server',`
> + gen_require(`
> + type zookeeper_server_initrc_exec_t;
> + ')
> +
> + init_labeled_script_domtrans($1, zookeeper_server_initrc_exec_t)
> +')
> +
> +########################################
> +##<summary>
> +## Execute zookeeper client in the
> +## zookeeper client domain, and allow the
> +## specified role the zookeeper client domain.
> +##</summary>
> +##<param name="domain">
> +## <summary>
> +## Domain allowed to transition.
> +## </summary>
> +##</param>
> +##<param name="role">
> +## <summary>
> +## Role allowed access.
> +## </summary>
> +##</param>
> +##<rolecap/>
> +#
> +interface(`zookeeper_run_client',`
> + gen_require(`
> + type zookeeper_t;
> + ')
> +
> + zookeeper_domtrans_client($1)
> + role $2 types zookeeper_t;
> +
> + allow $1 zookeeper_t:process { ptrace signal_perms };
> + ps_process_pattern($1, zookeeper_t)
> +')
> +
> +########################################
> +##<summary>
> +## Give permission to a domain to read
> +## hadoop_etc_t
> +##</summary>
> +##<param name="domain">
> +##<summary>
> +## Domain needing read permission
> +##</summary>
> +##</param>
> +#
> +interface(`hadoop_read_config_files', `
> + gen_require(`
> + type hadoop_etc_t;
> + ')
> +
> + allow $1 hadoop_etc_t:dir search_dir_perms;
> + allow $1 hadoop_etc_t:lnk_file { read getattr };
> + allow $1 hadoop_etc_t:file read_file_perms;
> +')
> +
> +########################################
> +##<summary>
> +## Give permission to a domain to
> +## execute hadoop_etc_t
> +##</summary>
> +##<param name="domain">
> +##<summary>
> +## Domain needing read and execute
> +## permission
> +##</summary>
> +##</param>
> +#
> +interface(`hadoop_exec_config_files', `
> + gen_require(`
> + type hadoop_etc_t;
> + ')
> +
> + hadoop_read_config_files($1)
> + allow $1 hadoop_etc_t:file { execute execute_no_trans};
> +')
> diff --git a/policy/modules/services/hadoop.te b/policy/modules/services/hadoop.te
> new file mode 100644
> index 0000000..519aebb
> --- /dev/null
> +++ b/policy/modules/services/hadoop.te
> @@ -0,0 +1,380 @@
> +policy_module(hadoop, 1.0.0)
> +
> +########################################
> +#
> +# Hadoop declarations.
> +#
> +
> +attribute hadoop_domain;
> +
> +type hadoop_t;
> +type hadoop_exec_t;
> +application_domain(hadoop_t, hadoop_exec_t)
> +ubac_constrained(hadoop_t)
> +
> +type hadoop_etc_t;
> +files_config_file(hadoop_etc_t)
> +
> +type hadoop_var_lib_t;
> +files_type(hadoop_var_lib_t)
> +
> +type hadoop_log_t;
> +logging_log_file(hadoop_log_t)
> +
> +type hadoop_var_run_t;
> +files_pid_file(hadoop_var_run_t)
> +
> +type hadoop_tmp_t;
> +files_tmp_file(hadoop_tmp_t)
> +ubac_constrained(hadoop_tmp_t)
> +
> +type hadoop_hsperfdata_t;
> +files_tmp_file(hadoop_hsperfdata_t)
> +ubac_constrained(hadoop_hsperfdata_t)
> +
> +hadoop_domain_template(datanode)
> +hadoop_domain_template(jobtracker)
> +hadoop_domain_template(namenode)
> +hadoop_domain_template(secondarynamenode)
> +hadoop_domain_template(tasktracker)
> +
> +########################################
> +#
> +# Hadoop zookeeper client declarations.
> +#
> +
> +type zookeeper_t;
> +type zookeeper_exec_t;
> +application_domain(zookeeper_t, zookeeper_exec_t)
> +ubac_constrained(zookeeper_t)
> +
> +type zookeeper_etc_t;
> +files_config_file(zookeeper_etc_t)
> +
> +type zookeeper_log_t;
> +logging_log_file(zookeeper_log_t)
> +
> +type zookeeper_tmp_t;
> +files_tmp_file(zookeeper_tmp_t)
> +ubac_constrained(zookeeper_tmp_t)
> +
> +########################################
> +#
> +# Hadoop zookeeper server declarations.
> +#
> +
> +type zookeeper_server_t;
> +type zookeeper_server_exec_t;
> +init_daemon_domain(zookeeper_server_t, zookeeper_server_exec_t)
> +
> +type zookeeper_server_initrc_exec_t;
> +init_script_file(zookeeper_server_initrc_exec_t)
> +
> +type zookeeper_server_var_t;
> +files_type(zookeeper_server_var_t)
> +
> +# This will need a file context specification.
> +type zookeeper_server_var_run_t;
> +files_pid_file(zookeeper_server_var_run_t)
> +
> +type zookeeper_server_tmp_t;
> +files_tmp_file(zookeeper_server_tmp_t)
> +
> +########################################
> +#
> +# Hadoop policy.
> +#
> +
> +allow hadoop_t self:capability sys_resource;
> +allow hadoop_t self:process { getsched setsched signal signull setrlimit execmem };
> +allow hadoop_t self:fifo_file rw_fifo_file_perms;
> +allow hadoop_t self:key write;
> +allow hadoop_t self:tcp_socket create_stream_socket_perms;
> +allow hadoop_t self:udp_socket create_socket_perms;
> +allow hadoop_t hadoop_domain:process signull;
> +
> +dontaudit hadoop_t self:netlink_route_socket rw_netlink_socket_perms;
> +
> +read_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
> +read_lnk_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
> +can_exec(hadoop_t, hadoop_etc_t)
> +
> +manage_dirs_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +manage_dirs_pattern(hadoop_t, hadoop_log_t, hadoop_log_t)
> +manage_dirs_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
> +manage_dirs_pattern(hadoop_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
> +manage_files_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +manage_files_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
> +
> +getattr_dirs_pattern(hadoop_t, hadoop_var_run_t, hadoop_var_run_t)
> +
> +files_tmp_filetrans(hadoop_t, hadoop_hsperfdata_t, dir)
> +filetrans_pattern(hadoop_t, hadoop_hsperfdata_t, hadoop_tmp_t, file)
> +
> +kernel_read_network_state(hadoop_t)
> +kernel_read_system_state(hadoop_t)
> +
> +corecmd_exec_bin(hadoop_t)
> +corecmd_exec_shell(hadoop_t)
> +
> +corenet_all_recvfrom_unlabeled(hadoop_t)
> +corenet_all_recvfrom_netlabel(hadoop_t)
> +corenet_sendrecv_hadoop_namenode_client_packets(hadoop_t)
> +corenet_sendrecv_portmap_client_packets(hadoop_t)
> +corenet_sendrecv_zope_client_packets(hadoop_t)
> +corenet_tcp_bind_all_nodes(hadoop_t)
> +corenet_tcp_connect_hadoop_namenode_port(hadoop_t)
> +corenet_tcp_connect_portmap_port(hadoop_t)
> +corenet_tcp_connect_zope_port(hadoop_t)
> +corenet_tcp_sendrecv_all_nodes(hadoop_t)
> +corenet_tcp_sendrecv_all_ports(hadoop_t)
> +corenet_tcp_sendrecv_generic_if(hadoop_t)
> +corenet_tcp_connect_generic_port(hadoop_t)
This port can't be identified?
> +corenet_udp_bind_all_nodes(hadoop_t)
> +corenet_udp_sendrecv_all_nodes(hadoop_t)
> +corenet_udp_sendrecv_all_ports(hadoop_t)
> +corenet_udp_sendrecv_generic_if(hadoop_t)
> +
> +dev_read_rand(hadoop_t)
> +dev_read_sysfs(hadoop_t)
> +dev_read_urand(hadoop_t)
> +
> +files_dontaudit_search_spool(hadoop_t)
> +files_read_usr_files(hadoop_t)
> +files_read_all_files(hadoop_t)
> +
> +fs_getattr_xattr_fs(hadoop_t)
> +
> +java_exec(hadoop_t)
> +
> +# This can be removed on anything post-el5
> +libs_use_ld_so(hadoop_t)
> +libs_use_shared_libs(hadoop_t)
> +
> +miscfiles_read_localization(hadoop_t)
> +
> +userdom_dontaudit_search_user_home_dirs(hadoop_t)
> +userdom_use_user_terminals(hadoop_t)
> +
> +optional_policy(`
> + nis_use_ypbind(hadoop_t)
> +')
> +
> +optional_policy(`
> + nscd_socket_use(hadoop_t)
> +')
> +
> +########################################
> +#
> +# Hadoop datanode policy.
> +#
> +
> +allow hadoop_datanode_t self:process signal;
> +corenet_tcp_connect_hadoop_namenode_port(hadoop_datanode_t)
> +fs_getattr_xattr_fs(hadoop_datanode_t)
> +manage_dirs_pattern(hadoop_datanode_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +
> +########################################
> +#
> +# Hadoop jobtracker policy.
> +#
> +
> +corenet_tcp_bind_zope_port(hadoop_jobtracker_t)
> +corenet_tcp_connect_hadoop_namenode_port(hadoop_jobtracker_t)
> +create_dirs_pattern(hadoop_jobtracker_t, hadoop_jobtracker_log_t, hadoop_jobtracker_log_t)
> +manage_dirs_pattern(hadoop_jobtracker_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +setattr_dirs_pattern(hadoop_jobtracker_t, hadoop_jobtracker_log_t, hadoop_jobtracker_log_t)
> +
> +########################################
> +#
> +# Hadoop namenode policy.
> +#
> +
> +corenet_tcp_bind_hadoop_namenode_port(hadoop_namenode_t)
> +corenet_tcp_connect_hadoop_namenode_port(hadoop_namenode_t)
> +manage_dirs_pattern(hadoop_namenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +manage_files_pattern(hadoop_namenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +
> +########################################
> +#
> +# Hadoop secondary namenode policy.
> +#
> +
> +corenet_tcp_connect_hadoop_namenode_port(hadoop_secondarynamenode_t)
> +manage_dirs_pattern(hadoop_secondarynamenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +
> +########################################
> +#
> +# Hadoop tasktracker policy.
> +#
> +
> +allow hadoop_tasktracker_t self:process signal;
> +
> +corenet_tcp_connect_zope_port(hadoop_tasktracker_t)
> +corenet_tcp_connect_hadoop_namenode_port(hadoop_tasktracker_t)
> +
> +filetrans_pattern(hadoop_tasktracker_t, hadoop_log_t, hadoop_tasktracker_log_t, dir)
> +fs_getattr_xattr_fs(hadoop_tasktracker_t)
> +fs_associate(hadoop_tasktracker_t)
> +
> +manage_dirs_pattern(hadoop_tasktracker_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +manage_dirs_pattern(hadoop_tasktracker_t, hadoop_tasktracker_log_t, hadoop_tasktracker_log_t);
> +
> +########################################
> +#
> +# Hadoop zookeeper client policy.
> +#
> +
> +allow zookeeper_t self:process { getsched sigkill signal signull execmem };
> +allow zookeeper_t self:fifo_file rw_fifo_file_perms;
> +allow zookeeper_t self:tcp_socket create_stream_socket_perms;
> +allow zookeeper_t self:udp_socket create_socket_perms;
> +allow zookeeper_t zookeeper_server_t:process signull;
> +
> +read_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
> +read_lnk_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
> +
> +setattr_dirs_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
> +append_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
> +create_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
> +read_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
> +setattr_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
> +logging_log_filetrans(zookeeper_t, zookeeper_log_t, file)
> +
> +manage_dirs_pattern(zookeeper_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
> +manage_files_pattern(zookeeper_t, zookeeper_tmp_t, zookeeper_tmp_t)
> +files_tmp_filetrans(zookeeper_t, hadoop_hsperfdata_t, dir)
> +filetrans_pattern(zookeeper_t, hadoop_hsperfdata_t, zookeeper_tmp_t, file)
> +
> +can_exec(zookeeper_t, zookeeper_exec_t)
> +
> +kernel_read_network_state(zookeeper_t)
> +kernel_read_system_state(zookeeper_t)
> +
> +corecmd_exec_bin(zookeeper_t)
> +corecmd_exec_shell(zookeeper_t)
> +
> +dontaudit zookeeper_t self:netlink_route_socket rw_netlink_socket_perms;
> +
> +corenet_all_recvfrom_unlabeled(zookeeper_t)
> +corenet_all_recvfrom_netlabel(zookeeper_t)
> +corenet_sendrecv_zookeeper_client_client_packets(zookeeper_t)
> +corenet_tcp_bind_all_nodes(zookeeper_t)
> +corenet_tcp_connect_zookeeper_client_port(zookeeper_t)
> +corenet_tcp_sendrecv_all_nodes(zookeeper_t)
> +corenet_tcp_sendrecv_all_ports(zookeeper_t)
> +corenet_tcp_sendrecv_generic_if(zookeeper_t)
> +corenet_tcp_connect_generic_port(zookeeper_t)
Another port to lock down if possible. Please recheck the port usage
across the board.
> +corenet_udp_bind_all_nodes(zookeeper_t)
> +corenet_udp_sendrecv_all_nodes(zookeeper_t)
> +corenet_udp_sendrecv_all_ports(zookeeper_t)
> +corenet_udp_sendrecv_generic_if(zookeeper_t)
> +
> +dev_read_rand(zookeeper_t)
> +dev_read_sysfs(zookeeper_t)
> +dev_read_urand(zookeeper_t)
> +
> +files_read_etc_files(zookeeper_t)
> +files_read_usr_files(zookeeper_t)
> +
> +# This can be removed on anything post-el5
> +libs_use_ld_so(zookeeper_t)
> +libs_use_shared_libs(zookeeper_t)
> +
> +miscfiles_read_localization(zookeeper_t)
> +
> +sysnet_read_config(zookeeper_t)
> +
> +userdom_dontaudit_search_user_home_dirs(zookeeper_t)
> +userdom_use_user_terminals(zookeeper_t)
> +
> +java_exec(zookeeper_t)
> +
> +optional_policy(`
> + nscd_socket_use(zookeeper_t)
> +')
> +
> +########################################
> +#
> +# Hadoop zookeeper server policy.
> +#
> +
> +allow zookeeper_server_t self:capability kill;
> +allow zookeeper_server_t self:process { execmem getsched sigkill signal signull };
> +allow zookeeper_server_t self:fifo_file rw_fifo_file_perms;
> +allow zookeeper_server_t self:netlink_route_socket rw_netlink_socket_perms;
> +allow zookeeper_server_t self:tcp_socket create_stream_socket_perms;
> +allow zookeeper_server_t self:udp_socket create_socket_perms;
> +
> +read_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
> +read_lnk_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
> +
> +manage_dirs_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
> +manage_files_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
> +files_var_lib_filetrans(zookeeper_server_t, zookeeper_server_var_t, { dir file })
> +
> +setattr_dirs_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
> +append_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
> +create_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
> +read_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
> +setattr_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
> +logging_log_filetrans(zookeeper_server_t, zookeeper_log_t, file)
> +
> +manage_files_pattern(zookeeper_server_t, zookeeper_server_var_run_t, zookeeper_server_var_run_t)
> +files_pid_filetrans(zookeeper_server_t, zookeeper_server_var_run_t, file)
> +
> +manage_dirs_pattern(zookeeper_server_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
> +manage_files_pattern(zookeeper_server_t, zookeeper_server_tmp_t, zookeeper_server_tmp_t)
> +files_tmp_filetrans(zookeeper_server_t, hadoop_hsperfdata_t, dir)
> +filetrans_pattern(zookeeper_server_t, hadoop_hsperfdata_t, zookeeper_server_tmp_t, file)
> +
> +can_exec(zookeeper_server_t, zookeeper_server_exec_t)
> +
> +kernel_read_network_state(zookeeper_server_t)
> +kernel_read_system_state(zookeeper_server_t)
> +
> +corecmd_exec_bin(zookeeper_server_t)
> +corecmd_exec_shell(zookeeper_server_t)
> +
> +corenet_all_recvfrom_unlabeled(zookeeper_server_t)
> +corenet_all_recvfrom_netlabel(zookeeper_server_t)
> +corenet_sendrecv_zookeeper_election_client_packets(zookeeper_server_t)
> +corenet_sendrecv_zookeeper_leader_client_packets(zookeeper_server_t)
> +corenet_sendrecv_zookeeper_client_server_packets(zookeeper_server_t)
> +corenet_sendrecv_zookeeper_election_server_packets(zookeeper_server_t)
> +corenet_sendrecv_zookeeper_leader_server_packets(zookeeper_server_t)
> +corenet_tcp_bind_all_nodes(zookeeper_server_t)
> +corenet_tcp_bind_zookeeper_client_port(zookeeper_server_t)
> +corenet_tcp_bind_zookeeper_election_port(zookeeper_server_t)
> +corenet_tcp_bind_zookeeper_leader_port(zookeeper_server_t)
> +corenet_tcp_connect_zookeeper_election_port(zookeeper_server_t)
> +corenet_tcp_connect_zookeeper_leader_port(zookeeper_server_t)
> +corenet_tcp_sendrecv_generic_if(zookeeper_server_t)
> +corenet_tcp_sendrecv_generic_node(zookeeper_server_t)
> +corenet_tcp_sendrecv_all_ports(zookeeper_server_t)
> +corenet_tcp_connect_generic_port(zookeeper_server_t)
> +corenet_udp_sendrecv_generic_if(zookeeper_server_t)
> +corenet_udp_sendrecv_all_nodes(zookeeper_server_t)
> +corenet_udp_sendrecv_all_ports(zookeeper_server_t)
> +corenet_udp_bind_all_nodes(zookeeper_server_t)
> +
> +dev_read_rand(zookeeper_server_t)
> +dev_read_sysfs(zookeeper_server_t)
> +dev_read_urand(zookeeper_server_t)
> +
> +files_read_etc_files(zookeeper_server_t)
> +files_read_usr_files(zookeeper_server_t)
> +
> +fs_getattr_xattr_fs(zookeeper_server_t)
> +
> +# This can be removed on anything post-el5
> +libs_use_ld_so(zookeeper_server_t)
> +libs_use_shared_libs(zookeeper_server_t)
> +
> +logging_send_syslog_msg(zookeeper_server_t)
> +
> +miscfiles_read_localization(zookeeper_server_t)
> +
> +sysnet_read_config(zookeeper_server_t)
> +
> +java_exec(zookeeper_server_t)
> diff --git a/policy/modules/system/unconfined.te b/policy/modules/system/unconfined.te
> index f976344..ac27264 100644
> --- a/policy/modules/system/unconfined.te
> +++ b/policy/modules/system/unconfined.te
> @@ -118,6 +118,10 @@ optional_policy(`
> ')
>
> optional_policy(`
> + hadoop_run(unconfined_t, unconfined_r)
> +')
> +
> +optional_policy(`
> inn_domtrans(unconfined_t)
> ')
>
> @@ -210,6 +214,10 @@ optional_policy(`
> xserver_domtrans(unconfined_t)
> ')
>
> +optional_policy(`
> + zookeeper_run_client(unconfined_t, unconfined_r)
> +')
> +
> ########################################
> #
> # Unconfined Execmem Local policy
--
Chris PeBenito
Tresys Technology, LLC
http://www.tresys.com | oss.tresys.com
On 10/04/2010 02:18 PM, Christopher J. PeBenito wrote:
> On 10/04/10 13:15, Paul Nuzzi wrote:
>> On 10/01/2010 01:56 PM, Christopher J. PeBenito wrote:
>>> On 10/01/10 11:17, Paul Nuzzi wrote:
>>>> On 10/01/2010 08:02 AM, Dominick Grift wrote:
>>>>> On Thu, Sep 30, 2010 at 03:39:40PM -0400, Paul Nuzzi wrote:
>>>>>> I updated the patch based on recommendations from the mailing list.
>>>>>> All of hadoop's services are included in one module instead of
>>>>>> individual ones. Unconfined and sysadm roles are given access to
>>>>>> hadoop and zookeeper client domain transitions. The services are started
>>>>>> using run_init. Let me know what you think.
>>>>>
>>>>> Why do some hadoop domain need to manage generic tmp?
>>>>>
>>>>> files_manage_generic_tmp_dirs(zookeeper_t)
>>>>> files_manage_generic_tmp_dirs(hadoop_t)
>>>>> files_manage_generic_tmp_dirs(hadoop_$1_initrc_t)
>>>>> files_manage_generic_tmp_files(hadoop_$1_initrc_t)
>>>>> files_manage_generic_tmp_files(hadoop_$1_t)
>>>>> files_manage_generic_tmp_dirs(hadoop_$1_t)
>>>>
>>>> This has to be done for Java JMX to work. All of the files are written to
>>>> /tmp/hsperfdata_(hadoop/zookeeper). /tmp/hsperfdata_ is labeled tmp_t while
>>>> all the files for each service are labeled with hadoop_*_tmp_t. The first service
>>>> will end up owning the directory if it is not labeled tmp_t.
>>>
>>> The hsperfdata dir in /tmp certainly the bane of policy writers. Based on a quick look through the policy, it looks like the only dir they create in /tmp is this hsperfdata dir. I suggest you do something like
>>>
>>> files_tmp_filetrans(hadoop_t, hadoop_hsperfdata_t, dir)
>>> files_tmp_filetrans(zookeeper_t, hadoop_hsperfdata_t, dir)
>>>
>>> filetrans_pattern(hadoop_t, hadoop_hsperfdata_t, hadoop_tmp_t, file)
>>> filetrans_pattern(zookeeper_t, hadoop_hsperfdata_t, zookeeper_tmp_t, file)
>>>
>>
>> That looks like a better way to handle the tmp_t problem.
>>
>> I changed the patch with your comments. Hopefully this will be one of the last updates.
>> Tested on a CDH3 cluster as a module without any problems.
>
> There are several little issues with style, but it'll be easier just to fix them when its committed.
>
> Other comments inline.
>
I did my best locking down the ports hadoop uses. Unfortunately the services use high, randomized ports making
tcp_connect_generic_port a must have. Hopefully one day hadoop will settle on static ports. I added hadoop_datanode port 50010 since it is important to lock down that service. I changed the patch based on the rest of the comments.
Signed-off-by: Paul Nuzzi <[email protected]>
---
policy/modules/kernel/corenetwork.te.in | 5
policy/modules/roles/sysadm.te | 8
policy/modules/services/hadoop.fc | 54 ++++
policy/modules/services/hadoop.if | 352 +++++++++++++++++++++++++++++
policy/modules/services/hadoop.te | 379 ++++++++++++++++++++++++++++++++
policy/modules/system/unconfined.te | 8
6 files changed, 806 insertions(+)
diff --git a/policy/modules/kernel/corenetwork.te.in b/policy/modules/kernel/corenetwork.te.in
index 2ecdde8..73163db 100644
--- a/policy/modules/kernel/corenetwork.te.in
+++ b/policy/modules/kernel/corenetwork.te.in
@@ -105,6 +105,8 @@ network_port(giftd, tcp,1213,s0)
network_port(git, tcp,9418,s0, udp,9418,s0)
network_port(gopher, tcp,70,s0, udp,70,s0)
network_port(gpsd, tcp,2947,s0)
+network_port(hadoop_datanode, tcp, 50010,s0)
+network_port(hadoop_namenode, tcp, 8020,s0)
network_port(hddtemp, tcp,7634,s0)
network_port(howl, tcp,5335,s0, udp,5353,s0)
network_port(hplip, tcp,1782,s0, tcp,2207,s0, tcp,2208,s0, tcp, 8290,s0, tcp,50000,s0, tcp,50002,s0, tcp,8292,s0, tcp,9100,s0, tcp,9101,s0, tcp,9102,s0, tcp,9220,s0, tcp,9221,s0, tcp,9222,s0, tcp,9280,s0, tcp,9281,s0, tcp,9282,s0, tcp,9290,s0, tcp,9291,s0, tcp,9292,s0)
@@ -211,6 +213,9 @@ network_port(xdmcp, udp,177,s0, tcp,177,s0)
network_port(xen, tcp,8002,s0)
network_port(xfs, tcp,7100,s0)
network_port(xserver, tcp,6000-6020,s0)
+network_port(zookeeper_client, tcp, 2181,s0)
+network_port(zookeeper_election, tcp, 3888,s0)
+network_port(zookeeper_leader, tcp, 2888,s0)
network_port(zebra, tcp,2600-2604,s0, tcp,2606,s0, udp,2600-2604,s0, udp,2606,s0)
network_port(zope, tcp,8021,s0)
diff --git a/policy/modules/roles/sysadm.te b/policy/modules/roles/sysadm.te
index cad05ff..d2bc2b1 100644
--- a/policy/modules/roles/sysadm.te
+++ b/policy/modules/roles/sysadm.te
@@ -152,6 +152,10 @@ optional_policy(`
')
optional_policy(`
+ hadoop_run(sysadm_t, sysadm_r)
+')
+
+optional_policy(`
# allow system administrator to use the ipsec script to look
# at things (e.g., ipsec auto --status)
# probably should create an ipsec_admin role for this kind of thing
@@ -392,6 +396,10 @@ optional_policy(`
yam_run(sysadm_t, sysadm_r)
')
+optional_policy(`
+ hadoop_zookeeper_run_client(sysadm_t, sysadm_r)
+')
+
ifndef(`distro_redhat',`
optional_policy(`
auth_role(sysadm_r, sysadm_t)
diff --git a/policy/modules/services/hadoop.fc b/policy/modules/services/hadoop.fc
new file mode 100644
index 0000000..a09275d
--- /dev/null
+++ b/policy/modules/services/hadoop.fc
@@ -0,0 +1,54 @@
+/etc/hadoop.*(/.*)? gen_context(system_u:object_r:hadoop_etc_t,s0)
+
+/etc/rc\.d/init\.d/hadoop-(.*)?-datanode -- gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-jobtracker -- gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-namenode -- gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-secondarynamenode -- gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-(.*)?-tasktracker -- gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
+/etc/rc\.d/init\.d/hadoop-zookeeper -- gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
+/etc/init\.d/hadoop-datanode -- gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
+/etc/init\.d/hadoop-jobtracker -- gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
+/etc/init\.d/hadoop-namenode -- gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
+/etc/init\.d/hadoop-secondarynamenode -- gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
+/etc/init\.d/hadoop-tasktracker -- gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
+/etc/init\.d/zookeeper -- gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
+
+/etc/zookeeper(/.*)? gen_context(system_u:object_r:zookeeper_etc_t,s0)
+/etc/zookeeper\.dist(/.*)? gen_context(system_u:object_r:zookeeper_etc_t,s0)
+
+/usr/lib/hadoop(.*)?/bin/hadoop -- gen_context(system_u:object_r:hadoop_exec_t,s0)
+
+/usr/bin/zookeeper-client -- gen_context(system_u:object_r:zookeeper_exec_t,s0)
+/usr/bin/zookeeper-server -- gen_context(system_u:object_r:zookeeper_server_exec_t,s0)
+
+/var/zookeeper(/.*)? gen_context(system_u:object_r:zookeeper_server_var_t,s0)
+/var/lib/zookeeper(/.*)? gen_context(system_u:object_r:zookeeper_server_var_t,s0)
+
+/var/lib/hadoop(.*)? gen_context(system_u:object_r:hadoop_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/data(/.*)? gen_context(system_u:object_r:hadoop_datanode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/name(/.*)? gen_context(system_u:object_r:hadoop_namenode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/dfs/namesecondary(/.*)? gen_context(system_u:object_r:hadoop_secondarynamenode_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/jobTracker(/.*)? gen_context(system_u:object_r:hadoop_jobtracker_var_lib_t,s0)
+/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/taskTracker(/.*)? gen_context(system_u:object_r:hadoop_tasktracker_var_lib_t,s0)
+
+/var/lock/subsys/hadoop-datanode -- gen_context(system_u:object_r:hadoop_datanode_lock_t,s0)
+/var/lock/subsys/hadoop-namenode -- gen_context(system_u:object_r:hadoop_namenode_lock_t,s0)
+/var/lock/subsys/hadoop-jobtracker -- gen_context(system_u:object_r:hadoop_jobtracker_lock_t,s0)
+/var/lock/subsys/hadoop-tasktracker -- gen_context(system_u:object_r:hadoop_tasktracker_lock_t,s0)
+/var/lock/subsys/hadoop-secondarynamenode -- gen_context(system_u:object_r:hadoop_secondarynamenode_lock_t,s0)
+
+/var/log/hadoop(.*)? gen_context(system_u:object_r:hadoop_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-datanode-(.*)? gen_context(system_u:object_r:hadoop_datanode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-jobtracker-(.*)? gen_context(system_u:object_r:hadoop_jobtracker_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-namenode-(.*)? gen_context(system_u:object_r:hadoop_namenode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-secondarynamenode-(.*)? gen_context(system_u:object_r:hadoop_secondarynamenode_log_t,s0)
+/var/log/hadoop(.*)?/hadoop-hadoop-tasktracker-(.*)? gen_context(system_u:object_r:hadoop_tasktracker_log_t,s0)
+/var/log/hadoop(.*)?/history(/.*)? gen_context(system_u:object_r:hadoop_jobtracker_log_t,s0)
+/var/log/zookeeper(/.*)? gen_context(system_u:object_r:zookeeper_log_t,s0)
+
+/var/run/hadoop(.*)? -d gen_context(system_u:object_r:hadoop_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-datanode\.pid -- gen_context(system_u:object_r:hadoop_datanode_initrc_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-namenode\.pid -- gen_context(system_u:object_r:hadoop_namenode_initrc_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-jobtracker\.pid -- gen_context(system_u:object_r:hadoop_jobtracker_initrc_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-tasktracker\.pid -- gen_context(system_u:object_r:hadoop_tasktracker_initrc_var_run_t,s0)
+/var/run/hadoop(.*)?/hadoop-hadoop-secondarynamenode\.pid -- gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_var_run_t,s0)
diff --git a/policy/modules/services/hadoop.if b/policy/modules/services/hadoop.if
new file mode 100644
index 0000000..e919bcb
--- /dev/null
+++ b/policy/modules/services/hadoop.if
@@ -0,0 +1,352 @@
+## <summary>Software for reliable, scalable, distributed computing.</summary>
+
+#######################################
+## <summary>
+## The template to define a hadoop domain.
+## </summary>
+## <param name="domain_prefix">
+## <summary>
+## Domain prefix to be used.
+## </summary>
+## </param>
+#
+template(`hadoop_domain_template',`
+ gen_require(`
+ attribute hadoop_domain;
+ type hadoop_log_t, hadoop_var_lib_t, hadoop_var_run_t;
+ type hadoop_exec_t, hadoop_hsperfdata_t;
+ ')
+
+ ########################################
+ #
+ # Shared declarations.
+ #
+
+ type hadoop_$1_t, hadoop_domain;
+ domain_type(hadoop_$1_t)
+ domain_entry_file(hadoop_$1_t, hadoop_exec_t)
+
+ type hadoop_$1_initrc_t;
+ type hadoop_$1_initrc_exec_t;
+ init_script_domain(hadoop_$1_initrc_t, hadoop_$1_initrc_exec_t)
+
+ role system_r types { hadoop_$1_initrc_t hadoop_$1_t };
+
+ type hadoop_$1_lock_t;
+ files_lock_file(hadoop_$1_lock_t)
+ files_lock_filetrans(hadoop_$1_initrc_t, hadoop_$1_lock_t, file)
+
+ type hadoop_$1_log_t;
+ logging_log_file(hadoop_$1_log_t)
+ filetrans_pattern(hadoop_$1_initrc_t, hadoop_log_t, hadoop_$1_log_t, {dir file})
+ filetrans_pattern(hadoop_$1_t, hadoop_log_t, hadoop_$1_log_t, {dir file})
+
+ type hadoop_$1_var_lib_t;
+ files_type(hadoop_$1_var_lib_t)
+ filetrans_pattern(hadoop_$1_t, hadoop_var_lib_t, hadoop_$1_var_lib_t, file)
+
+ type hadoop_$1_initrc_var_run_t;
+ files_pid_file(hadoop_$1_initrc_var_run_t)
+ filetrans_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_$1_initrc_var_run_t, file)
+
+ type hadoop_$1_tmp_t;
+ files_tmp_file(hadoop_$1_tmp_t)
+ files_tmp_filetrans(hadoop_$1_t, hadoop_hsperfdata_t, dir)
+ filetrans_pattern(hadoop_$1_t, hadoop_hsperfdata_t, hadoop_$1_tmp_t, file)
+
+ ####################################
+ #
+ # Shared hadoop_$1 initrc policy.
+ #
+
+ allow hadoop_$1_initrc_t hadoop_$1_t:process { signal signull };
+ allow hadoop_$1_initrc_t self:capability { setuid setgid };
+ allow hadoop_$1_initrc_t self:fifo_file rw_fifo_file_perms;
+ allow hadoop_$1_initrc_t self:process setsched;
+
+ consoletype_exec(hadoop_$1_initrc_t)
+ corecmd_exec_bin(hadoop_$1_initrc_t)
+ corecmd_exec_shell(hadoop_$1_initrc_t)
+
+ domtrans_pattern(hadoop_$1_initrc_t, hadoop_exec_t, hadoop_$1_t)
+ dontaudit hadoop_$1_initrc_t self:capability sys_tty_config;
+
+ files_read_etc_files(hadoop_$1_initrc_t)
+ files_read_usr_files(hadoop_$1_initrc_t)
+ files_search_pids(hadoop_$1_initrc_t)
+ files_search_locks(hadoop_$1_initrc_t)
+ fs_getattr_xattr_fs(hadoop_$1_initrc_t)
+
+ hadoop_exec_config_files(hadoop_$1_initrc_t)
+
+ init_rw_utmp(hadoop_$1_initrc_t)
+ init_use_script_ptys(hadoop_$1_initrc_t)
+
+ kernel_read_kernel_sysctls(hadoop_$1_initrc_t)
+ kernel_read_sysctl(hadoop_$1_initrc_t)
+ kernel_read_system_state(hadoop_$1_initrc_t)
+
+ logging_send_syslog_msg(hadoop_$1_initrc_t)
+ logging_send_audit_msgs(hadoop_$1_initrc_t)
+ logging_search_logs(hadoop_$1_initrc_t)
+
+ manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_lock_t, hadoop_$1_lock_t)
+ manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_initrc_var_run_t, hadoop_$1_initrc_var_run_t)
+ manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
+ manage_dirs_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_var_run_t)
+ manage_files_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_var_run_t)
+
+ miscfiles_read_localization(hadoop_$1_initrc_t)
+
+ optional_policy(`
+ nscd_socket_use(hadoop_$1_initrc_t)
+ ')
+
+ term_use_generic_ptys(hadoop_$1_initrc_t)
+
+ userdom_dontaudit_search_user_home_dirs(hadoop_$1_initrc_t)
+
+ ####################################
+ #
+ # Shared hadoop_$1 policy.
+ #
+
+ allow hadoop_$1_t hadoop_domain:process signull;
+ allow hadoop_$1_t self:fifo_file rw_fifo_file_perms;
+ allow hadoop_$1_t self:process execmem;
+ allow hadoop_$1_t hadoop_var_run_t:dir getattr;
+
+ corecmd_exec_bin(hadoop_$1_t)
+ corecmd_exec_shell(hadoop_$1_t)
+
+ dev_read_rand(hadoop_$1_t)
+ dev_read_urand(hadoop_$1_t)
+ dev_read_sysfs(hadoop_$1_t)
+ dontaudit hadoop_$1_t self:netlink_route_socket rw_netlink_socket_perms;
+
+ files_read_etc_files(hadoop_$1_t)
+ files_search_pids(hadoop_$1_t)
+ files_search_var_lib(hadoop_$1_t)
+
+ hadoop_exec_config_files(hadoop_$1_t)
+
+ java_exec(hadoop_$1_t)
+
+ kernel_read_network_state(hadoop_$1_t)
+ kernel_read_system_state(hadoop_$1_t)
+
+ logging_search_logs(hadoop_$1_t)
+
+ manage_dirs_pattern(hadoop_$1_t, hadoop_$1_var_lib_t, hadoop_$1_var_lib_t)
+ manage_dirs_pattern(hadoop_$1_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
+ manage_files_pattern(hadoop_$1_t, hadoop_$1_log_t, hadoop_$1_log_t)
+ manage_files_pattern(hadoop_$1_t, hadoop_$1_var_lib_t, hadoop_$1_var_lib_t)
+ manage_files_pattern(hadoop_$1_t, hadoop_$1_tmp_t, hadoop_$1_tmp_t)
+ miscfiles_read_localization(hadoop_$1_t)
+
+ optional_policy(`
+ nscd_socket_use(hadoop_$1_t)
+ ')
+
+ sysnet_read_config(hadoop_$1_t)
+
+ allow hadoop_$1_t self:tcp_socket create_stream_socket_perms;
+ corenet_all_recvfrom_unlabeled(hadoop_$1_t)
+ corenet_all_recvfrom_netlabel(hadoop_$1_t)
+ corenet_tcp_bind_all_nodes(hadoop_$1_t)
+ corenet_tcp_sendrecv_generic_if(hadoop_$1_t)
+ corenet_tcp_sendrecv_generic_node(hadoop_$1_t)
+ corenet_tcp_sendrecv_all_ports(hadoop_$1_t)
+ # Hadoop uses high ordered random ports for services
+ # If permanent ports are chosen, remove line below and lock down
+ corenet_tcp_connect_generic_port(hadoop_$1_t)
+
+ allow hadoop_$1_t self:udp_socket create_socket_perms;
+ corenet_udp_sendrecv_generic_if(hadoop_$1_t)
+ corenet_udp_sendrecv_all_nodes(hadoop_$1_t)
+ corenet_udp_bind_all_nodes(hadoop_$1_t)
+')
+
+########################################
+## <summary>
+## Execute hadoop in the
+## hadoop domain.
+## </summary>
+## <param name="domain">
+## <summary>
+## Domain allowed to transition.
+## </summary>
+## </param>
+#
+interface(`hadoop_domtrans',`
+ gen_require(`
+ type hadoop_t, hadoop_exec_t;
+ ')
+
+ files_search_usr($1)
+ libs_search_lib($1)
+ domtrans_pattern($1, hadoop_exec_t, hadoop_t)
+')
+
+########################################
+## <summary>
+## Execute hadoop in the hadoop domain,
+## and allow the specified role the
+## hadoop domain.
+## </summary>
+## <param name="domain">
+## <summary>
+## Domain allowed to transition.
+## </summary>
+## </param>
+## <param name="role">
+## <summary>
+## Role allowed access.
+## </summary>
+## </param>
+## <rolecap/>
+#
+interface(`hadoop_run',`
+ gen_require(`
+ type hadoop_t;
+ ')
+
+ hadoop_domtrans($1)
+ role $2 types hadoop_t;
+
+ allow $1 hadoop_t:process { ptrace signal_perms };
+ ps_process_pattern($1, hadoop_t)
+')
+
+########################################
+## <summary>
+## Execute zookeeper client in the
+## zookeeper client domain.
+## </summary>
+## <param name="domain">
+## <summary>
+## Domain allowed to transition.
+## </summary>
+## </param>
+#
+interface(`hadoop_domtrans_zookeeper_client',`
+ gen_require(`
+ type zookeeper_t, zookeeper_exec_t;
+ ')
+
+ corecmd_search_bin($1)
+ files_search_usr($1)
+ domtrans_pattern($1, zookeeper_exec_t, zookeeper_t)
+')
+
+########################################
+## <summary>
+## Execute zookeeper server in the
+## zookeeper server domain.
+## </summary>
+## <param name="domain">
+## <summary>
+## Domain allowed to transition.
+## </summary>
+## </param>
+#
+interface(`hadoop_domtrans_zookeeper_server',`
+ gen_require(`
+ type zookeeper_server_t, zookeeper_server_exec_t;
+ ')
+
+ corecmd_search_bin($1)
+ files_search_usr($1)
+ domtrans_pattern($1, zookeeper_server_exec_t, zookeeper_server_t)
+')
+
+########################################
+## <summary>
+## Execute zookeeper server in the
+## zookeeper domain.
+## </summary>
+## <param name="domain">
+## <summary>
+## Domain allowed to transition.
+## </summary>
+## </param>
+#
+interface(`hadoop_zookeeper_initrc_domtrans_server',`
+ gen_require(`
+ type zookeeper_server_initrc_exec_t;
+ ')
+
+ init_labeled_script_domtrans($1, zookeeper_server_initrc_exec_t)
+')
+
+########################################
+## <summary>
+## Execute zookeeper client in the
+## zookeeper client domain, and allow the
+## specified role the zookeeper client domain.
+## </summary>
+## <param name="domain">
+## <summary>
+## Domain allowed to transition.
+## </summary>
+## </param>
+## <param name="role">
+## <summary>
+## Role allowed access.
+## </summary>
+## </param>
+## <rolecap/>
+#
+interface(`hadoop_zookeeper_run_client',`
+ gen_require(`
+ type zookeeper_t;
+ ')
+
+ hadoop_domtrans_zookeeper_client($1)
+ role $2 types zookeeper_t;
+
+ allow $1 zookeeper_t:process { ptrace signal_perms };
+ ps_process_pattern($1, zookeeper_t)
+')
+
+########################################
+## <summary>
+## Give permission to a domain to read
+## hadoop_etc_t
+## </summary>
+## <param name="domain">
+## <summary>
+## Domain needing read permission
+## </summary>
+## </param>
+#
+interface(`hadoop_read_config_files', `
+ gen_require(`
+ type hadoop_etc_t;
+ ')
+
+ allow $1 hadoop_etc_t:dir search_dir_perms;
+ allow $1 hadoop_etc_t:lnk_file { read getattr };
+ allow $1 hadoop_etc_t:file read_file_perms;
+')
+
+########################################
+## <summary>
+## Give permission to a domain to
+## execute hadoop_etc_t
+## </summary>
+## <param name="domain">
+## <summary>
+## Domain needing read and execute
+## permission
+## </summary>
+## </param>
+#
+interface(`hadoop_exec_config_files', `
+ gen_require(`
+ type hadoop_etc_t;
+ ')
+
+ hadoop_read_config_files($1)
+ allow $1 hadoop_etc_t:file { execute execute_no_trans};
+')
diff --git a/policy/modules/services/hadoop.te b/policy/modules/services/hadoop.te
new file mode 100644
index 0000000..587c393
--- /dev/null
+++ b/policy/modules/services/hadoop.te
@@ -0,0 +1,379 @@
+policy_module(hadoop, 1.0.0)
+
+########################################
+#
+# Hadoop declarations.
+#
+
+attribute hadoop_domain;
+
+type hadoop_t;
+type hadoop_exec_t;
+application_domain(hadoop_t, hadoop_exec_t)
+ubac_constrained(hadoop_t)
+
+type hadoop_etc_t;
+files_config_file(hadoop_etc_t)
+
+type hadoop_var_lib_t;
+files_type(hadoop_var_lib_t)
+
+type hadoop_log_t;
+logging_log_file(hadoop_log_t)
+
+type hadoop_var_run_t;
+files_pid_file(hadoop_var_run_t)
+
+type hadoop_tmp_t;
+files_tmp_file(hadoop_tmp_t)
+ubac_constrained(hadoop_tmp_t)
+
+type hadoop_hsperfdata_t;
+files_tmp_file(hadoop_hsperfdata_t)
+ubac_constrained(hadoop_hsperfdata_t)
+
+hadoop_domain_template(datanode)
+hadoop_domain_template(jobtracker)
+hadoop_domain_template(namenode)
+hadoop_domain_template(secondarynamenode)
+hadoop_domain_template(tasktracker)
+
+########################################
+#
+# Hadoop zookeeper client declarations.
+#
+
+type zookeeper_t;
+type zookeeper_exec_t;
+application_domain(zookeeper_t, zookeeper_exec_t)
+ubac_constrained(zookeeper_t)
+
+type zookeeper_etc_t;
+files_config_file(zookeeper_etc_t)
+
+type zookeeper_log_t;
+logging_log_file(zookeeper_log_t)
+
+type zookeeper_tmp_t;
+files_tmp_file(zookeeper_tmp_t)
+ubac_constrained(zookeeper_tmp_t)
+
+########################################
+#
+# Hadoop zookeeper server declarations.
+#
+
+type zookeeper_server_t;
+type zookeeper_server_exec_t;
+init_daemon_domain(zookeeper_server_t, zookeeper_server_exec_t)
+
+type zookeeper_server_initrc_exec_t;
+init_script_file(zookeeper_server_initrc_exec_t)
+
+type zookeeper_server_var_t;
+files_type(zookeeper_server_var_t)
+
+# This will need a file context specification.
+type zookeeper_server_var_run_t;
+files_pid_file(zookeeper_server_var_run_t)
+
+type zookeeper_server_tmp_t;
+files_tmp_file(zookeeper_server_tmp_t)
+
+########################################
+#
+# Hadoop policy.
+#
+
+allow hadoop_t self:capability sys_resource;
+allow hadoop_t self:process { getsched setsched signal signull setrlimit execmem };
+allow hadoop_t self:fifo_file rw_fifo_file_perms;
+allow hadoop_t self:key write;
+allow hadoop_t self:tcp_socket create_stream_socket_perms;
+allow hadoop_t self:udp_socket create_socket_perms;
+allow hadoop_t hadoop_domain:process signull;
+
+dontaudit hadoop_t self:netlink_route_socket rw_netlink_socket_perms;
+
+read_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
+read_lnk_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
+can_exec(hadoop_t, hadoop_etc_t)
+
+manage_dirs_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
+manage_dirs_pattern(hadoop_t, hadoop_log_t, hadoop_log_t)
+manage_dirs_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
+manage_dirs_pattern(hadoop_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
+manage_files_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
+manage_files_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
+
+getattr_dirs_pattern(hadoop_t, hadoop_var_run_t, hadoop_var_run_t)
+
+files_tmp_filetrans(hadoop_t, hadoop_hsperfdata_t, dir)
+filetrans_pattern(hadoop_t, hadoop_hsperfdata_t, hadoop_tmp_t, file)
+
+kernel_read_network_state(hadoop_t)
+kernel_read_system_state(hadoop_t)
+
+corecmd_exec_bin(hadoop_t)
+corecmd_exec_shell(hadoop_t)
+
+corenet_all_recvfrom_unlabeled(hadoop_t)
+corenet_all_recvfrom_netlabel(hadoop_t)
+corenet_sendrecv_hadoop_namenode_client_packets(hadoop_t)
+corenet_sendrecv_portmap_client_packets(hadoop_t)
+corenet_sendrecv_zope_client_packets(hadoop_t)
+corenet_tcp_bind_all_nodes(hadoop_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_t)
+corenet_tcp_connect_hadoop_datanode_port(hadoop_t)
+corenet_tcp_connect_portmap_port(hadoop_t)
+corenet_tcp_connect_zope_port(hadoop_t)
+corenet_tcp_sendrecv_all_nodes(hadoop_t)
+corenet_tcp_sendrecv_all_ports(hadoop_t)
+corenet_tcp_sendrecv_generic_if(hadoop_t)
+# Hadoop uses high ordered random ports for services
+# If permanent ports are chosen, remove line below and lock down
+corenet_tcp_connect_generic_port(hadoop_t)
+corenet_udp_bind_all_nodes(hadoop_t)
+corenet_udp_sendrecv_all_nodes(hadoop_t)
+corenet_udp_sendrecv_all_ports(hadoop_t)
+corenet_udp_sendrecv_generic_if(hadoop_t)
+
+dev_read_rand(hadoop_t)
+dev_read_sysfs(hadoop_t)
+dev_read_urand(hadoop_t)
+
+files_dontaudit_search_spool(hadoop_t)
+files_read_usr_files(hadoop_t)
+files_read_all_files(hadoop_t)
+
+fs_getattr_xattr_fs(hadoop_t)
+
+java_exec(hadoop_t)
+
+miscfiles_read_localization(hadoop_t)
+
+userdom_dontaudit_search_user_home_dirs(hadoop_t)
+userdom_use_user_terminals(hadoop_t)
+
+optional_policy(`
+ nis_use_ypbind(hadoop_t)
+')
+
+optional_policy(`
+ nscd_socket_use(hadoop_t)
+')
+
+########################################
+#
+# Hadoop datanode policy.
+#
+
+allow hadoop_datanode_t self:process signal;
+corenet_tcp_bind_hadoop_datanode_port(hadoop_datanode_t)
+corenet_tcp_connect_hadoop_datanode_port(hadoop_datanode_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_datanode_t)
+fs_getattr_xattr_fs(hadoop_datanode_t)
+manage_dirs_pattern(hadoop_datanode_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+########################################
+#
+# Hadoop jobtracker policy.
+#
+
+corenet_tcp_bind_zope_port(hadoop_jobtracker_t)
+corenet_tcp_connect_hadoop_datanode_port(hadoop_jobtracker_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_jobtracker_t)
+create_dirs_pattern(hadoop_jobtracker_t, hadoop_jobtracker_log_t, hadoop_jobtracker_log_t)
+manage_dirs_pattern(hadoop_jobtracker_t, hadoop_var_lib_t, hadoop_var_lib_t)
+setattr_dirs_pattern(hadoop_jobtracker_t, hadoop_jobtracker_log_t, hadoop_jobtracker_log_t)
+
+########################################
+#
+# Hadoop namenode policy.
+#
+
+corenet_tcp_bind_hadoop_namenode_port(hadoop_namenode_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_namenode_t)
+manage_dirs_pattern(hadoop_namenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
+manage_files_pattern(hadoop_namenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+########################################
+#
+# Hadoop secondary namenode policy.
+#
+
+corenet_tcp_connect_hadoop_namenode_port(hadoop_secondarynamenode_t)
+manage_dirs_pattern(hadoop_secondarynamenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
+
+########################################
+#
+# Hadoop tasktracker policy.
+#
+
+allow hadoop_tasktracker_t self:process signal;
+
+corenet_tcp_connect_hadoop_datanode_port(hadoop_tasktracker_t)
+corenet_tcp_connect_hadoop_namenode_port(hadoop_tasktracker_t)
+corenet_tcp_connect_zope_port(hadoop_tasktracker_t)
+
+filetrans_pattern(hadoop_tasktracker_t, hadoop_log_t, hadoop_tasktracker_log_t, dir)
+fs_associate(hadoop_tasktracker_t)
+fs_getattr_xattr_fs(hadoop_tasktracker_t)
+
+manage_dirs_pattern(hadoop_tasktracker_t, hadoop_var_lib_t, hadoop_var_lib_t)
+manage_dirs_pattern(hadoop_tasktracker_t, hadoop_tasktracker_log_t, hadoop_tasktracker_log_t);
+
+########################################
+#
+# Hadoop zookeeper client policy.
+#
+
+allow zookeeper_t self:process { getsched sigkill signal signull execmem };
+allow zookeeper_t self:fifo_file rw_fifo_file_perms;
+allow zookeeper_t self:tcp_socket create_stream_socket_perms;
+allow zookeeper_t self:udp_socket create_socket_perms;
+allow zookeeper_t zookeeper_server_t:process signull;
+
+read_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
+read_lnk_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
+
+setattr_dirs_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+append_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+create_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+read_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+setattr_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
+logging_log_filetrans(zookeeper_t, zookeeper_log_t, file)
+
+manage_dirs_pattern(zookeeper_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
+manage_files_pattern(zookeeper_t, zookeeper_tmp_t, zookeeper_tmp_t)
+files_tmp_filetrans(zookeeper_t, hadoop_hsperfdata_t, dir)
+filetrans_pattern(zookeeper_t, hadoop_hsperfdata_t, zookeeper_tmp_t, file)
+
+can_exec(zookeeper_t, zookeeper_exec_t)
+
+kernel_read_network_state(zookeeper_t)
+kernel_read_system_state(zookeeper_t)
+
+corecmd_exec_bin(zookeeper_t)
+corecmd_exec_shell(zookeeper_t)
+
+dontaudit zookeeper_t self:netlink_route_socket rw_netlink_socket_perms;
+
+corenet_all_recvfrom_unlabeled(zookeeper_t)
+corenet_all_recvfrom_netlabel(zookeeper_t)
+corenet_sendrecv_zookeeper_client_client_packets(zookeeper_t)
+corenet_tcp_bind_all_nodes(zookeeper_t)
+corenet_tcp_connect_zookeeper_client_port(zookeeper_t)
+corenet_tcp_sendrecv_all_nodes(zookeeper_t)
+corenet_tcp_sendrecv_all_ports(zookeeper_t)
+corenet_tcp_sendrecv_generic_if(zookeeper_t)
+# Hadoop uses high ordered random ports for services
+# If permanent ports are chosen, remove line below and lock down
+corenet_tcp_connect_generic_port(zookeeper_t)
+corenet_udp_bind_all_nodes(zookeeper_t)
+corenet_udp_sendrecv_all_nodes(zookeeper_t)
+corenet_udp_sendrecv_all_ports(zookeeper_t)
+corenet_udp_sendrecv_generic_if(zookeeper_t)
+
+dev_read_rand(zookeeper_t)
+dev_read_sysfs(zookeeper_t)
+dev_read_urand(zookeeper_t)
+
+files_read_etc_files(zookeeper_t)
+files_read_usr_files(zookeeper_t)
+
+miscfiles_read_localization(zookeeper_t)
+
+sysnet_read_config(zookeeper_t)
+
+userdom_dontaudit_search_user_home_dirs(zookeeper_t)
+userdom_use_user_terminals(zookeeper_t)
+
+java_exec(zookeeper_t)
+
+optional_policy(`
+ nscd_socket_use(zookeeper_t)
+')
+
+########################################
+#
+# Hadoop zookeeper server policy.
+#
+
+allow zookeeper_server_t self:capability kill;
+allow zookeeper_server_t self:process { execmem getsched sigkill signal signull };
+allow zookeeper_server_t self:fifo_file rw_fifo_file_perms;
+allow zookeeper_server_t self:netlink_route_socket rw_netlink_socket_perms;
+allow zookeeper_server_t self:tcp_socket create_stream_socket_perms;
+allow zookeeper_server_t self:udp_socket create_socket_perms;
+
+read_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
+read_lnk_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
+
+manage_dirs_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
+manage_files_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
+files_var_lib_filetrans(zookeeper_server_t, zookeeper_server_var_t, { dir file })
+
+setattr_dirs_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+append_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+create_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+read_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+setattr_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
+logging_log_filetrans(zookeeper_server_t, zookeeper_log_t, file)
+
+manage_files_pattern(zookeeper_server_t, zookeeper_server_var_run_t, zookeeper_server_var_run_t)
+files_pid_filetrans(zookeeper_server_t, zookeeper_server_var_run_t, file)
+
+manage_dirs_pattern(zookeeper_server_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
+manage_files_pattern(zookeeper_server_t, zookeeper_server_tmp_t, zookeeper_server_tmp_t)
+files_tmp_filetrans(zookeeper_server_t, hadoop_hsperfdata_t, dir)
+filetrans_pattern(zookeeper_server_t, hadoop_hsperfdata_t, zookeeper_server_tmp_t, file)
+
+can_exec(zookeeper_server_t, zookeeper_server_exec_t)
+
+kernel_read_network_state(zookeeper_server_t)
+kernel_read_system_state(zookeeper_server_t)
+
+corecmd_exec_bin(zookeeper_server_t)
+corecmd_exec_shell(zookeeper_server_t)
+
+corenet_all_recvfrom_unlabeled(zookeeper_server_t)
+corenet_all_recvfrom_netlabel(zookeeper_server_t)
+corenet_sendrecv_zookeeper_election_client_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_leader_client_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_client_server_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_election_server_packets(zookeeper_server_t)
+corenet_sendrecv_zookeeper_leader_server_packets(zookeeper_server_t)
+corenet_tcp_bind_all_nodes(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_client_port(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_election_port(zookeeper_server_t)
+corenet_tcp_bind_zookeeper_leader_port(zookeeper_server_t)
+corenet_tcp_connect_zookeeper_election_port(zookeeper_server_t)
+corenet_tcp_connect_zookeeper_leader_port(zookeeper_server_t)
+corenet_tcp_sendrecv_generic_if(zookeeper_server_t)
+corenet_tcp_sendrecv_generic_node(zookeeper_server_t)
+corenet_tcp_sendrecv_all_ports(zookeeper_server_t)
+# Hadoop uses high ordered random ports for services
+# If permanent ports are chosen, remove line below and lock down
+corenet_tcp_connect_generic_port(zookeeper_server_t)
+corenet_udp_sendrecv_generic_if(zookeeper_server_t)
+corenet_udp_sendrecv_all_nodes(zookeeper_server_t)
+corenet_udp_sendrecv_all_ports(zookeeper_server_t)
+corenet_udp_bind_all_nodes(zookeeper_server_t)
+
+dev_read_rand(zookeeper_server_t)
+dev_read_sysfs(zookeeper_server_t)
+dev_read_urand(zookeeper_server_t)
+
+files_read_etc_files(zookeeper_server_t)
+files_read_usr_files(zookeeper_server_t)
+
+fs_getattr_xattr_fs(zookeeper_server_t)
+
+logging_send_syslog_msg(zookeeper_server_t)
+
+miscfiles_read_localization(zookeeper_server_t)
+
+sysnet_read_config(zookeeper_server_t)
+
+java_exec(zookeeper_server_t)
diff --git a/policy/modules/system/unconfined.te b/policy/modules/system/unconfined.te
index f976344..f1e6c9f 100644
--- a/policy/modules/system/unconfined.te
+++ b/policy/modules/system/unconfined.te
@@ -118,6 +118,10 @@ optional_policy(`
')
optional_policy(`
+ hadoop_run(unconfined_t, unconfined_r)
+')
+
+optional_policy(`
inn_domtrans(unconfined_t)
')
@@ -210,6 +214,10 @@ optional_policy(`
xserver_domtrans(unconfined_t)
')
+optional_policy(`
+ hadoop_zookeeper_run_client(unconfined_t, unconfined_r)
+')
+
########################################
#
# Unconfined Execmem Local policy
On 10/05/10 15:59, Paul Nuzzi wrote:
> On 10/04/2010 02:18 PM, Christopher J. PeBenito wrote:
>> On 10/04/10 13:15, Paul Nuzzi wrote:
>>> On 10/01/2010 01:56 PM, Christopher J. PeBenito wrote:
>>>> On 10/01/10 11:17, Paul Nuzzi wrote:
>>>>> On 10/01/2010 08:02 AM, Dominick Grift wrote:
>>>>>> On Thu, Sep 30, 2010 at 03:39:40PM -0400, Paul Nuzzi wrote:
>>>>>>> I updated the patch based on recommendations from the mailing list.
>>>>>>> All of hadoop's services are included in one module instead of
>>>>>>> individual ones. Unconfined and sysadm roles are given access to
>>>>>>> hadoop and zookeeper client domain transitions. The services are started
>>>>>>> using run_init. Let me know what you think.
>>>>>>
>>>>>> Why do some hadoop domain need to manage generic tmp?
>>>>>>
>>>>>> files_manage_generic_tmp_dirs(zookeeper_t)
>>>>>> files_manage_generic_tmp_dirs(hadoop_t)
>>>>>> files_manage_generic_tmp_dirs(hadoop_$1_initrc_t)
>>>>>> files_manage_generic_tmp_files(hadoop_$1_initrc_t)
>>>>>> files_manage_generic_tmp_files(hadoop_$1_t)
>>>>>> files_manage_generic_tmp_dirs(hadoop_$1_t)
>>>>>
>>>>> This has to be done for Java JMX to work. All of the files are written to
>>>>> /tmp/hsperfdata_(hadoop/zookeeper). /tmp/hsperfdata_ is labeled tmp_t while
>>>>> all the files for each service are labeled with hadoop_*_tmp_t. The first service
>>>>> will end up owning the directory if it is not labeled tmp_t.
>>>>
>>>> The hsperfdata dir in /tmp certainly the bane of policy writers. Based on a quick look through the policy, it looks like the only dir they create in /tmp is this hsperfdata dir. I suggest you do something like
>>>>
>>>> files_tmp_filetrans(hadoop_t, hadoop_hsperfdata_t, dir)
>>>> files_tmp_filetrans(zookeeper_t, hadoop_hsperfdata_t, dir)
>>>>
>>>> filetrans_pattern(hadoop_t, hadoop_hsperfdata_t, hadoop_tmp_t, file)
>>>> filetrans_pattern(zookeeper_t, hadoop_hsperfdata_t, zookeeper_tmp_t, file)
>>>>
>>>
>>> That looks like a better way to handle the tmp_t problem.
>>>
>>> I changed the patch with your comments. Hopefully this will be one of the last updates.
>>> Tested on a CDH3 cluster as a module without any problems.
>>
>> There are several little issues with style, but it'll be easier just to fix them when its committed.
>>
>> Other comments inline.
>>
>
> I did my best locking down the ports hadoop uses. Unfortunately the services use high, randomized ports making
> tcp_connect_generic_port a must have. Hopefully one day hadoop will settle on static ports. I added hadoop_datanode port 50010 since it is important to lock down that service. I changed the patch based on the rest of the comments.
Merged. I've made several changes:
* a pass cleaning up the style.
* adjusted some regular expressions in the file contexts: .* is the same
as (.*)? since * means 0 or more matches.
* renamed a few interfaces
* two rules that I dropped as they require further explanation
> +files_read_all_files(hadoop_t)
A very big privilege.
and
> +fs_associate(hadoop_tasktracker_t)
This is a domain, so the only files with this type should be the
/proc/pid ones, which don't require associate permissions.
> ---
> policy/modules/kernel/corenetwork.te.in | 5
> policy/modules/roles/sysadm.te | 8
> policy/modules/services/hadoop.fc | 54 ++++
> policy/modules/services/hadoop.if | 352 +++++++++++++++++++++++++++++
> policy/modules/services/hadoop.te | 379 ++++++++++++++++++++++++++++++++
> policy/modules/system/unconfined.te | 8
> 6 files changed, 806 insertions(+)
>
> diff --git a/policy/modules/kernel/corenetwork.te.in b/policy/modules/kernel/corenetwork.te.in
> index 2ecdde8..73163db 100644
> --- a/policy/modules/kernel/corenetwork.te.in
> +++ b/policy/modules/kernel/corenetwork.te.in
> @@ -105,6 +105,8 @@ network_port(giftd, tcp,1213,s0)
> network_port(git, tcp,9418,s0, udp,9418,s0)
> network_port(gopher, tcp,70,s0, udp,70,s0)
> network_port(gpsd, tcp,2947,s0)
> +network_port(hadoop_datanode, tcp, 50010,s0)
> +network_port(hadoop_namenode, tcp, 8020,s0)
> network_port(hddtemp, tcp,7634,s0)
> network_port(howl, tcp,5335,s0, udp,5353,s0)
> network_port(hplip, tcp,1782,s0, tcp,2207,s0, tcp,2208,s0, tcp, 8290,s0, tcp,50000,s0, tcp,50002,s0, tcp,8292,s0, tcp,9100,s0, tcp,9101,s0, tcp,9102,s0, tcp,9220,s0, tcp,9221,s0, tcp,9222,s0, tcp,9280,s0, tcp,9281,s0, tcp,9282,s0, tcp,9290,s0, tcp,9291,s0, tcp,9292,s0)
> @@ -211,6 +213,9 @@ network_port(xdmcp, udp,177,s0, tcp,177,s0)
> network_port(xen, tcp,8002,s0)
> network_port(xfs, tcp,7100,s0)
> network_port(xserver, tcp,6000-6020,s0)
> +network_port(zookeeper_client, tcp, 2181,s0)
> +network_port(zookeeper_election, tcp, 3888,s0)
> +network_port(zookeeper_leader, tcp, 2888,s0)
> network_port(zebra, tcp,2600-2604,s0, tcp,2606,s0, udp,2600-2604,s0, udp,2606,s0)
> network_port(zope, tcp,8021,s0)
>
> diff --git a/policy/modules/roles/sysadm.te b/policy/modules/roles/sysadm.te
> index cad05ff..d2bc2b1 100644
> --- a/policy/modules/roles/sysadm.te
> +++ b/policy/modules/roles/sysadm.te
> @@ -152,6 +152,10 @@ optional_policy(`
> ')
>
> optional_policy(`
> + hadoop_run(sysadm_t, sysadm_r)
> +')
> +
> +optional_policy(`
> # allow system administrator to use the ipsec script to look
> # at things (e.g., ipsec auto --status)
> # probably should create an ipsec_admin role for this kind of thing
> @@ -392,6 +396,10 @@ optional_policy(`
> yam_run(sysadm_t, sysadm_r)
> ')
>
> +optional_policy(`
> + hadoop_zookeeper_run_client(sysadm_t, sysadm_r)
> +')
> +
> ifndef(`distro_redhat',`
> optional_policy(`
> auth_role(sysadm_r, sysadm_t)
> diff --git a/policy/modules/services/hadoop.fc b/policy/modules/services/hadoop.fc
> new file mode 100644
> index 0000000..a09275d
> --- /dev/null
> +++ b/policy/modules/services/hadoop.fc
> @@ -0,0 +1,54 @@
> +/etc/hadoop.*(/.*)? gen_context(system_u:object_r:hadoop_etc_t,s0)
> +
> +/etc/rc\.d/init\.d/hadoop-(.*)?-datanode -- gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
> +/etc/rc\.d/init\.d/hadoop-(.*)?-jobtracker -- gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
> +/etc/rc\.d/init\.d/hadoop-(.*)?-namenode -- gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
> +/etc/rc\.d/init\.d/hadoop-(.*)?-secondarynamenode -- gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
> +/etc/rc\.d/init\.d/hadoop-(.*)?-tasktracker -- gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
> +/etc/rc\.d/init\.d/hadoop-zookeeper -- gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
> +/etc/init\.d/hadoop-datanode -- gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
> +/etc/init\.d/hadoop-jobtracker -- gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
> +/etc/init\.d/hadoop-namenode -- gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
> +/etc/init\.d/hadoop-secondarynamenode -- gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
> +/etc/init\.d/hadoop-tasktracker -- gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
> +/etc/init\.d/zookeeper -- gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
> +
> +/etc/zookeeper(/.*)? gen_context(system_u:object_r:zookeeper_etc_t,s0)
> +/etc/zookeeper\.dist(/.*)? gen_context(system_u:object_r:zookeeper_etc_t,s0)
> +
> +/usr/lib/hadoop(.*)?/bin/hadoop -- gen_context(system_u:object_r:hadoop_exec_t,s0)
> +
> +/usr/bin/zookeeper-client -- gen_context(system_u:object_r:zookeeper_exec_t,s0)
> +/usr/bin/zookeeper-server -- gen_context(system_u:object_r:zookeeper_server_exec_t,s0)
> +
> +/var/zookeeper(/.*)? gen_context(system_u:object_r:zookeeper_server_var_t,s0)
> +/var/lib/zookeeper(/.*)? gen_context(system_u:object_r:zookeeper_server_var_t,s0)
> +
> +/var/lib/hadoop(.*)? gen_context(system_u:object_r:hadoop_var_lib_t,s0)
> +/var/lib/hadoop(.*)?/cache/hadoop/dfs/data(/.*)? gen_context(system_u:object_r:hadoop_datanode_var_lib_t,s0)
> +/var/lib/hadoop(.*)?/cache/hadoop/dfs/name(/.*)? gen_context(system_u:object_r:hadoop_namenode_var_lib_t,s0)
> +/var/lib/hadoop(.*)?/cache/hadoop/dfs/namesecondary(/.*)? gen_context(system_u:object_r:hadoop_secondarynamenode_var_lib_t,s0)
> +/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/jobTracker(/.*)? gen_context(system_u:object_r:hadoop_jobtracker_var_lib_t,s0)
> +/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/taskTracker(/.*)? gen_context(system_u:object_r:hadoop_tasktracker_var_lib_t,s0)
> +
> +/var/lock/subsys/hadoop-datanode -- gen_context(system_u:object_r:hadoop_datanode_lock_t,s0)
> +/var/lock/subsys/hadoop-namenode -- gen_context(system_u:object_r:hadoop_namenode_lock_t,s0)
> +/var/lock/subsys/hadoop-jobtracker -- gen_context(system_u:object_r:hadoop_jobtracker_lock_t,s0)
> +/var/lock/subsys/hadoop-tasktracker -- gen_context(system_u:object_r:hadoop_tasktracker_lock_t,s0)
> +/var/lock/subsys/hadoop-secondarynamenode -- gen_context(system_u:object_r:hadoop_secondarynamenode_lock_t,s0)
> +
> +/var/log/hadoop(.*)? gen_context(system_u:object_r:hadoop_log_t,s0)
> +/var/log/hadoop(.*)?/hadoop-hadoop-datanode-(.*)? gen_context(system_u:object_r:hadoop_datanode_log_t,s0)
> +/var/log/hadoop(.*)?/hadoop-hadoop-jobtracker-(.*)? gen_context(system_u:object_r:hadoop_jobtracker_log_t,s0)
> +/var/log/hadoop(.*)?/hadoop-hadoop-namenode-(.*)? gen_context(system_u:object_r:hadoop_namenode_log_t,s0)
> +/var/log/hadoop(.*)?/hadoop-hadoop-secondarynamenode-(.*)? gen_context(system_u:object_r:hadoop_secondarynamenode_log_t,s0)
> +/var/log/hadoop(.*)?/hadoop-hadoop-tasktracker-(.*)? gen_context(system_u:object_r:hadoop_tasktracker_log_t,s0)
> +/var/log/hadoop(.*)?/history(/.*)? gen_context(system_u:object_r:hadoop_jobtracker_log_t,s0)
> +/var/log/zookeeper(/.*)? gen_context(system_u:object_r:zookeeper_log_t,s0)
> +
> +/var/run/hadoop(.*)? -d gen_context(system_u:object_r:hadoop_var_run_t,s0)
> +/var/run/hadoop(.*)?/hadoop-hadoop-datanode\.pid -- gen_context(system_u:object_r:hadoop_datanode_initrc_var_run_t,s0)
> +/var/run/hadoop(.*)?/hadoop-hadoop-namenode\.pid -- gen_context(system_u:object_r:hadoop_namenode_initrc_var_run_t,s0)
> +/var/run/hadoop(.*)?/hadoop-hadoop-jobtracker\.pid -- gen_context(system_u:object_r:hadoop_jobtracker_initrc_var_run_t,s0)
> +/var/run/hadoop(.*)?/hadoop-hadoop-tasktracker\.pid -- gen_context(system_u:object_r:hadoop_tasktracker_initrc_var_run_t,s0)
> +/var/run/hadoop(.*)?/hadoop-hadoop-secondarynamenode\.pid -- gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_var_run_t,s0)
> diff --git a/policy/modules/services/hadoop.if b/policy/modules/services/hadoop.if
> new file mode 100644
> index 0000000..e919bcb
> --- /dev/null
> +++ b/policy/modules/services/hadoop.if
> @@ -0,0 +1,352 @@
> +##<summary>Software for reliable, scalable, distributed computing.</summary>
> +
> +#######################################
> +##<summary>
> +## The template to define a hadoop domain.
> +##</summary>
> +##<param name="domain_prefix">
> +## <summary>
> +## Domain prefix to be used.
> +## </summary>
> +##</param>
> +#
> +template(`hadoop_domain_template',`
> + gen_require(`
> + attribute hadoop_domain;
> + type hadoop_log_t, hadoop_var_lib_t, hadoop_var_run_t;
> + type hadoop_exec_t, hadoop_hsperfdata_t;
> + ')
> +
> + ########################################
> + #
> + # Shared declarations.
> + #
> +
> + type hadoop_$1_t, hadoop_domain;
> + domain_type(hadoop_$1_t)
> + domain_entry_file(hadoop_$1_t, hadoop_exec_t)
> +
> + type hadoop_$1_initrc_t;
> + type hadoop_$1_initrc_exec_t;
> + init_script_domain(hadoop_$1_initrc_t, hadoop_$1_initrc_exec_t)
> +
> + role system_r types { hadoop_$1_initrc_t hadoop_$1_t };
> +
> + type hadoop_$1_lock_t;
> + files_lock_file(hadoop_$1_lock_t)
> + files_lock_filetrans(hadoop_$1_initrc_t, hadoop_$1_lock_t, file)
> +
> + type hadoop_$1_log_t;
> + logging_log_file(hadoop_$1_log_t)
> + filetrans_pattern(hadoop_$1_initrc_t, hadoop_log_t, hadoop_$1_log_t, {dir file})
> + filetrans_pattern(hadoop_$1_t, hadoop_log_t, hadoop_$1_log_t, {dir file})
> +
> + type hadoop_$1_var_lib_t;
> + files_type(hadoop_$1_var_lib_t)
> + filetrans_pattern(hadoop_$1_t, hadoop_var_lib_t, hadoop_$1_var_lib_t, file)
> +
> + type hadoop_$1_initrc_var_run_t;
> + files_pid_file(hadoop_$1_initrc_var_run_t)
> + filetrans_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_$1_initrc_var_run_t, file)
> +
> + type hadoop_$1_tmp_t;
> + files_tmp_file(hadoop_$1_tmp_t)
> + files_tmp_filetrans(hadoop_$1_t, hadoop_hsperfdata_t, dir)
> + filetrans_pattern(hadoop_$1_t, hadoop_hsperfdata_t, hadoop_$1_tmp_t, file)
> +
> + ####################################
> + #
> + # Shared hadoop_$1 initrc policy.
> + #
> +
> + allow hadoop_$1_initrc_t hadoop_$1_t:process { signal signull };
> + allow hadoop_$1_initrc_t self:capability { setuid setgid };
> + allow hadoop_$1_initrc_t self:fifo_file rw_fifo_file_perms;
> + allow hadoop_$1_initrc_t self:process setsched;
> +
> + consoletype_exec(hadoop_$1_initrc_t)
> + corecmd_exec_bin(hadoop_$1_initrc_t)
> + corecmd_exec_shell(hadoop_$1_initrc_t)
> +
> + domtrans_pattern(hadoop_$1_initrc_t, hadoop_exec_t, hadoop_$1_t)
> + dontaudit hadoop_$1_initrc_t self:capability sys_tty_config;
> +
> + files_read_etc_files(hadoop_$1_initrc_t)
> + files_read_usr_files(hadoop_$1_initrc_t)
> + files_search_pids(hadoop_$1_initrc_t)
> + files_search_locks(hadoop_$1_initrc_t)
> + fs_getattr_xattr_fs(hadoop_$1_initrc_t)
> +
> + hadoop_exec_config_files(hadoop_$1_initrc_t)
> +
> + init_rw_utmp(hadoop_$1_initrc_t)
> + init_use_script_ptys(hadoop_$1_initrc_t)
> +
> + kernel_read_kernel_sysctls(hadoop_$1_initrc_t)
> + kernel_read_sysctl(hadoop_$1_initrc_t)
> + kernel_read_system_state(hadoop_$1_initrc_t)
> +
> + logging_send_syslog_msg(hadoop_$1_initrc_t)
> + logging_send_audit_msgs(hadoop_$1_initrc_t)
> + logging_search_logs(hadoop_$1_initrc_t)
> +
> + manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_lock_t, hadoop_$1_lock_t)
> + manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_initrc_var_run_t, hadoop_$1_initrc_var_run_t)
> + manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
> + manage_dirs_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_var_run_t)
> + manage_files_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_var_run_t)
> +
> + miscfiles_read_localization(hadoop_$1_initrc_t)
> +
> + optional_policy(`
> + nscd_socket_use(hadoop_$1_initrc_t)
> + ')
> +
> + term_use_generic_ptys(hadoop_$1_initrc_t)
> +
> + userdom_dontaudit_search_user_home_dirs(hadoop_$1_initrc_t)
> +
> + ####################################
> + #
> + # Shared hadoop_$1 policy.
> + #
> +
> + allow hadoop_$1_t hadoop_domain:process signull;
> + allow hadoop_$1_t self:fifo_file rw_fifo_file_perms;
> + allow hadoop_$1_t self:process execmem;
> + allow hadoop_$1_t hadoop_var_run_t:dir getattr;
> +
> + corecmd_exec_bin(hadoop_$1_t)
> + corecmd_exec_shell(hadoop_$1_t)
> +
> + dev_read_rand(hadoop_$1_t)
> + dev_read_urand(hadoop_$1_t)
> + dev_read_sysfs(hadoop_$1_t)
> + dontaudit hadoop_$1_t self:netlink_route_socket rw_netlink_socket_perms;
> +
> + files_read_etc_files(hadoop_$1_t)
> + files_search_pids(hadoop_$1_t)
> + files_search_var_lib(hadoop_$1_t)
> +
> + hadoop_exec_config_files(hadoop_$1_t)
> +
> + java_exec(hadoop_$1_t)
> +
> + kernel_read_network_state(hadoop_$1_t)
> + kernel_read_system_state(hadoop_$1_t)
> +
> + logging_search_logs(hadoop_$1_t)
> +
> + manage_dirs_pattern(hadoop_$1_t, hadoop_$1_var_lib_t, hadoop_$1_var_lib_t)
> + manage_dirs_pattern(hadoop_$1_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
> + manage_files_pattern(hadoop_$1_t, hadoop_$1_log_t, hadoop_$1_log_t)
> + manage_files_pattern(hadoop_$1_t, hadoop_$1_var_lib_t, hadoop_$1_var_lib_t)
> + manage_files_pattern(hadoop_$1_t, hadoop_$1_tmp_t, hadoop_$1_tmp_t)
> + miscfiles_read_localization(hadoop_$1_t)
> +
> + optional_policy(`
> + nscd_socket_use(hadoop_$1_t)
> + ')
> +
> + sysnet_read_config(hadoop_$1_t)
> +
> + allow hadoop_$1_t self:tcp_socket create_stream_socket_perms;
> + corenet_all_recvfrom_unlabeled(hadoop_$1_t)
> + corenet_all_recvfrom_netlabel(hadoop_$1_t)
> + corenet_tcp_bind_all_nodes(hadoop_$1_t)
> + corenet_tcp_sendrecv_generic_if(hadoop_$1_t)
> + corenet_tcp_sendrecv_generic_node(hadoop_$1_t)
> + corenet_tcp_sendrecv_all_ports(hadoop_$1_t)
> + # Hadoop uses high ordered random ports for services
> + # If permanent ports are chosen, remove line below and lock down
> + corenet_tcp_connect_generic_port(hadoop_$1_t)
> +
> + allow hadoop_$1_t self:udp_socket create_socket_perms;
> + corenet_udp_sendrecv_generic_if(hadoop_$1_t)
> + corenet_udp_sendrecv_all_nodes(hadoop_$1_t)
> + corenet_udp_bind_all_nodes(hadoop_$1_t)
> +')
> +
> +########################################
> +##<summary>
> +## Execute hadoop in the
> +## hadoop domain.
> +##</summary>
> +##<param name="domain">
> +## <summary>
> +## Domain allowed to transition.
> +## </summary>
> +##</param>
> +#
> +interface(`hadoop_domtrans',`
> + gen_require(`
> + type hadoop_t, hadoop_exec_t;
> + ')
> +
> + files_search_usr($1)
> + libs_search_lib($1)
> + domtrans_pattern($1, hadoop_exec_t, hadoop_t)
> +')
> +
> +########################################
> +##<summary>
> +## Execute hadoop in the hadoop domain,
> +## and allow the specified role the
> +## hadoop domain.
> +##</summary>
> +##<param name="domain">
> +## <summary>
> +## Domain allowed to transition.
> +## </summary>
> +##</param>
> +##<param name="role">
> +## <summary>
> +## Role allowed access.
> +## </summary>
> +##</param>
> +##<rolecap/>
> +#
> +interface(`hadoop_run',`
> + gen_require(`
> + type hadoop_t;
> + ')
> +
> + hadoop_domtrans($1)
> + role $2 types hadoop_t;
> +
> + allow $1 hadoop_t:process { ptrace signal_perms };
> + ps_process_pattern($1, hadoop_t)
> +')
> +
> +########################################
> +##<summary>
> +## Execute zookeeper client in the
> +## zookeeper client domain.
> +##</summary>
> +##<param name="domain">
> +## <summary>
> +## Domain allowed to transition.
> +## </summary>
> +##</param>
> +#
> +interface(`hadoop_domtrans_zookeeper_client',`
> + gen_require(`
> + type zookeeper_t, zookeeper_exec_t;
> + ')
> +
> + corecmd_search_bin($1)
> + files_search_usr($1)
> + domtrans_pattern($1, zookeeper_exec_t, zookeeper_t)
> +')
> +
> +########################################
> +##<summary>
> +## Execute zookeeper server in the
> +## zookeeper server domain.
> +##</summary>
> +##<param name="domain">
> +## <summary>
> +## Domain allowed to transition.
> +## </summary>
> +##</param>
> +#
> +interface(`hadoop_domtrans_zookeeper_server',`
> + gen_require(`
> + type zookeeper_server_t, zookeeper_server_exec_t;
> + ')
> +
> + corecmd_search_bin($1)
> + files_search_usr($1)
> + domtrans_pattern($1, zookeeper_server_exec_t, zookeeper_server_t)
> +')
> +
> +########################################
> +##<summary>
> +## Execute zookeeper server in the
> +## zookeeper domain.
> +##</summary>
> +##<param name="domain">
> +## <summary>
> +## Domain allowed to transition.
> +## </summary>
> +##</param>
> +#
> +interface(`hadoop_zookeeper_initrc_domtrans_server',`
> + gen_require(`
> + type zookeeper_server_initrc_exec_t;
> + ')
> +
> + init_labeled_script_domtrans($1, zookeeper_server_initrc_exec_t)
> +')
> +
> +########################################
> +##<summary>
> +## Execute zookeeper client in the
> +## zookeeper client domain, and allow the
> +## specified role the zookeeper client domain.
> +##</summary>
> +##<param name="domain">
> +## <summary>
> +## Domain allowed to transition.
> +## </summary>
> +##</param>
> +##<param name="role">
> +## <summary>
> +## Role allowed access.
> +## </summary>
> +##</param>
> +##<rolecap/>
> +#
> +interface(`hadoop_zookeeper_run_client',`
> + gen_require(`
> + type zookeeper_t;
> + ')
> +
> + hadoop_domtrans_zookeeper_client($1)
> + role $2 types zookeeper_t;
> +
> + allow $1 zookeeper_t:process { ptrace signal_perms };
> + ps_process_pattern($1, zookeeper_t)
> +')
> +
> +########################################
> +##<summary>
> +## Give permission to a domain to read
> +## hadoop_etc_t
> +##</summary>
> +##<param name="domain">
> +##<summary>
> +## Domain needing read permission
> +##</summary>
> +##</param>
> +#
> +interface(`hadoop_read_config_files', `
> + gen_require(`
> + type hadoop_etc_t;
> + ')
> +
> + allow $1 hadoop_etc_t:dir search_dir_perms;
> + allow $1 hadoop_etc_t:lnk_file { read getattr };
> + allow $1 hadoop_etc_t:file read_file_perms;
> +')
> +
> +########################################
> +##<summary>
> +## Give permission to a domain to
> +## execute hadoop_etc_t
> +##</summary>
> +##<param name="domain">
> +##<summary>
> +## Domain needing read and execute
> +## permission
> +##</summary>
> +##</param>
> +#
> +interface(`hadoop_exec_config_files', `
> + gen_require(`
> + type hadoop_etc_t;
> + ')
> +
> + hadoop_read_config_files($1)
> + allow $1 hadoop_etc_t:file { execute execute_no_trans};
> +')
> diff --git a/policy/modules/services/hadoop.te b/policy/modules/services/hadoop.te
> new file mode 100644
> index 0000000..587c393
> --- /dev/null
> +++ b/policy/modules/services/hadoop.te
> @@ -0,0 +1,379 @@
> +policy_module(hadoop, 1.0.0)
> +
> +########################################
> +#
> +# Hadoop declarations.
> +#
> +
> +attribute hadoop_domain;
> +
> +type hadoop_t;
> +type hadoop_exec_t;
> +application_domain(hadoop_t, hadoop_exec_t)
> +ubac_constrained(hadoop_t)
> +
> +type hadoop_etc_t;
> +files_config_file(hadoop_etc_t)
> +
> +type hadoop_var_lib_t;
> +files_type(hadoop_var_lib_t)
> +
> +type hadoop_log_t;
> +logging_log_file(hadoop_log_t)
> +
> +type hadoop_var_run_t;
> +files_pid_file(hadoop_var_run_t)
> +
> +type hadoop_tmp_t;
> +files_tmp_file(hadoop_tmp_t)
> +ubac_constrained(hadoop_tmp_t)
> +
> +type hadoop_hsperfdata_t;
> +files_tmp_file(hadoop_hsperfdata_t)
> +ubac_constrained(hadoop_hsperfdata_t)
> +
> +hadoop_domain_template(datanode)
> +hadoop_domain_template(jobtracker)
> +hadoop_domain_template(namenode)
> +hadoop_domain_template(secondarynamenode)
> +hadoop_domain_template(tasktracker)
> +
> +########################################
> +#
> +# Hadoop zookeeper client declarations.
> +#
> +
> +type zookeeper_t;
> +type zookeeper_exec_t;
> +application_domain(zookeeper_t, zookeeper_exec_t)
> +ubac_constrained(zookeeper_t)
> +
> +type zookeeper_etc_t;
> +files_config_file(zookeeper_etc_t)
> +
> +type zookeeper_log_t;
> +logging_log_file(zookeeper_log_t)
> +
> +type zookeeper_tmp_t;
> +files_tmp_file(zookeeper_tmp_t)
> +ubac_constrained(zookeeper_tmp_t)
> +
> +########################################
> +#
> +# Hadoop zookeeper server declarations.
> +#
> +
> +type zookeeper_server_t;
> +type zookeeper_server_exec_t;
> +init_daemon_domain(zookeeper_server_t, zookeeper_server_exec_t)
> +
> +type zookeeper_server_initrc_exec_t;
> +init_script_file(zookeeper_server_initrc_exec_t)
> +
> +type zookeeper_server_var_t;
> +files_type(zookeeper_server_var_t)
> +
> +# This will need a file context specification.
> +type zookeeper_server_var_run_t;
> +files_pid_file(zookeeper_server_var_run_t)
> +
> +type zookeeper_server_tmp_t;
> +files_tmp_file(zookeeper_server_tmp_t)
> +
> +########################################
> +#
> +# Hadoop policy.
> +#
> +
> +allow hadoop_t self:capability sys_resource;
> +allow hadoop_t self:process { getsched setsched signal signull setrlimit execmem };
> +allow hadoop_t self:fifo_file rw_fifo_file_perms;
> +allow hadoop_t self:key write;
> +allow hadoop_t self:tcp_socket create_stream_socket_perms;
> +allow hadoop_t self:udp_socket create_socket_perms;
> +allow hadoop_t hadoop_domain:process signull;
> +
> +dontaudit hadoop_t self:netlink_route_socket rw_netlink_socket_perms;
> +
> +read_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
> +read_lnk_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
> +can_exec(hadoop_t, hadoop_etc_t)
> +
> +manage_dirs_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +manage_dirs_pattern(hadoop_t, hadoop_log_t, hadoop_log_t)
> +manage_dirs_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
> +manage_dirs_pattern(hadoop_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
> +manage_files_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +manage_files_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
> +
> +getattr_dirs_pattern(hadoop_t, hadoop_var_run_t, hadoop_var_run_t)
> +
> +files_tmp_filetrans(hadoop_t, hadoop_hsperfdata_t, dir)
> +filetrans_pattern(hadoop_t, hadoop_hsperfdata_t, hadoop_tmp_t, file)
> +
> +kernel_read_network_state(hadoop_t)
> +kernel_read_system_state(hadoop_t)
> +
> +corecmd_exec_bin(hadoop_t)
> +corecmd_exec_shell(hadoop_t)
> +
> +corenet_all_recvfrom_unlabeled(hadoop_t)
> +corenet_all_recvfrom_netlabel(hadoop_t)
> +corenet_sendrecv_hadoop_namenode_client_packets(hadoop_t)
> +corenet_sendrecv_portmap_client_packets(hadoop_t)
> +corenet_sendrecv_zope_client_packets(hadoop_t)
> +corenet_tcp_bind_all_nodes(hadoop_t)
> +corenet_tcp_connect_hadoop_namenode_port(hadoop_t)
> +corenet_tcp_connect_hadoop_datanode_port(hadoop_t)
> +corenet_tcp_connect_portmap_port(hadoop_t)
> +corenet_tcp_connect_zope_port(hadoop_t)
> +corenet_tcp_sendrecv_all_nodes(hadoop_t)
> +corenet_tcp_sendrecv_all_ports(hadoop_t)
> +corenet_tcp_sendrecv_generic_if(hadoop_t)
> +# Hadoop uses high ordered random ports for services
> +# If permanent ports are chosen, remove line below and lock down
> +corenet_tcp_connect_generic_port(hadoop_t)
> +corenet_udp_bind_all_nodes(hadoop_t)
> +corenet_udp_sendrecv_all_nodes(hadoop_t)
> +corenet_udp_sendrecv_all_ports(hadoop_t)
> +corenet_udp_sendrecv_generic_if(hadoop_t)
> +
> +dev_read_rand(hadoop_t)
> +dev_read_sysfs(hadoop_t)
> +dev_read_urand(hadoop_t)
> +
> +files_dontaudit_search_spool(hadoop_t)
> +files_read_usr_files(hadoop_t)
> +files_read_all_files(hadoop_t)
> +
> +fs_getattr_xattr_fs(hadoop_t)
> +
> +java_exec(hadoop_t)
> +
> +miscfiles_read_localization(hadoop_t)
> +
> +userdom_dontaudit_search_user_home_dirs(hadoop_t)
> +userdom_use_user_terminals(hadoop_t)
> +
> +optional_policy(`
> + nis_use_ypbind(hadoop_t)
> +')
> +
> +optional_policy(`
> + nscd_socket_use(hadoop_t)
> +')
> +
> +########################################
> +#
> +# Hadoop datanode policy.
> +#
> +
> +allow hadoop_datanode_t self:process signal;
> +corenet_tcp_bind_hadoop_datanode_port(hadoop_datanode_t)
> +corenet_tcp_connect_hadoop_datanode_port(hadoop_datanode_t)
> +corenet_tcp_connect_hadoop_namenode_port(hadoop_datanode_t)
> +fs_getattr_xattr_fs(hadoop_datanode_t)
> +manage_dirs_pattern(hadoop_datanode_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +
> +########################################
> +#
> +# Hadoop jobtracker policy.
> +#
> +
> +corenet_tcp_bind_zope_port(hadoop_jobtracker_t)
> +corenet_tcp_connect_hadoop_datanode_port(hadoop_jobtracker_t)
> +corenet_tcp_connect_hadoop_namenode_port(hadoop_jobtracker_t)
> +create_dirs_pattern(hadoop_jobtracker_t, hadoop_jobtracker_log_t, hadoop_jobtracker_log_t)
> +manage_dirs_pattern(hadoop_jobtracker_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +setattr_dirs_pattern(hadoop_jobtracker_t, hadoop_jobtracker_log_t, hadoop_jobtracker_log_t)
> +
> +########################################
> +#
> +# Hadoop namenode policy.
> +#
> +
> +corenet_tcp_bind_hadoop_namenode_port(hadoop_namenode_t)
> +corenet_tcp_connect_hadoop_namenode_port(hadoop_namenode_t)
> +manage_dirs_pattern(hadoop_namenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +manage_files_pattern(hadoop_namenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +
> +########################################
> +#
> +# Hadoop secondary namenode policy.
> +#
> +
> +corenet_tcp_connect_hadoop_namenode_port(hadoop_secondarynamenode_t)
> +manage_dirs_pattern(hadoop_secondarynamenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +
> +########################################
> +#
> +# Hadoop tasktracker policy.
> +#
> +
> +allow hadoop_tasktracker_t self:process signal;
> +
> +corenet_tcp_connect_hadoop_datanode_port(hadoop_tasktracker_t)
> +corenet_tcp_connect_hadoop_namenode_port(hadoop_tasktracker_t)
> +corenet_tcp_connect_zope_port(hadoop_tasktracker_t)
> +
> +filetrans_pattern(hadoop_tasktracker_t, hadoop_log_t, hadoop_tasktracker_log_t, dir)
> +fs_associate(hadoop_tasktracker_t)
> +fs_getattr_xattr_fs(hadoop_tasktracker_t)
> +
> +manage_dirs_pattern(hadoop_tasktracker_t, hadoop_var_lib_t, hadoop_var_lib_t)
> +manage_dirs_pattern(hadoop_tasktracker_t, hadoop_tasktracker_log_t, hadoop_tasktracker_log_t);
> +
> +########################################
> +#
> +# Hadoop zookeeper client policy.
> +#
> +
> +allow zookeeper_t self:process { getsched sigkill signal signull execmem };
> +allow zookeeper_t self:fifo_file rw_fifo_file_perms;
> +allow zookeeper_t self:tcp_socket create_stream_socket_perms;
> +allow zookeeper_t self:udp_socket create_socket_perms;
> +allow zookeeper_t zookeeper_server_t:process signull;
> +
> +read_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
> +read_lnk_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
> +
> +setattr_dirs_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
> +append_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
> +create_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
> +read_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
> +setattr_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
> +logging_log_filetrans(zookeeper_t, zookeeper_log_t, file)
> +
> +manage_dirs_pattern(zookeeper_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
> +manage_files_pattern(zookeeper_t, zookeeper_tmp_t, zookeeper_tmp_t)
> +files_tmp_filetrans(zookeeper_t, hadoop_hsperfdata_t, dir)
> +filetrans_pattern(zookeeper_t, hadoop_hsperfdata_t, zookeeper_tmp_t, file)
> +
> +can_exec(zookeeper_t, zookeeper_exec_t)
> +
> +kernel_read_network_state(zookeeper_t)
> +kernel_read_system_state(zookeeper_t)
> +
> +corecmd_exec_bin(zookeeper_t)
> +corecmd_exec_shell(zookeeper_t)
> +
> +dontaudit zookeeper_t self:netlink_route_socket rw_netlink_socket_perms;
> +
> +corenet_all_recvfrom_unlabeled(zookeeper_t)
> +corenet_all_recvfrom_netlabel(zookeeper_t)
> +corenet_sendrecv_zookeeper_client_client_packets(zookeeper_t)
> +corenet_tcp_bind_all_nodes(zookeeper_t)
> +corenet_tcp_connect_zookeeper_client_port(zookeeper_t)
> +corenet_tcp_sendrecv_all_nodes(zookeeper_t)
> +corenet_tcp_sendrecv_all_ports(zookeeper_t)
> +corenet_tcp_sendrecv_generic_if(zookeeper_t)
> +# Hadoop uses high ordered random ports for services
> +# If permanent ports are chosen, remove line below and lock down
> +corenet_tcp_connect_generic_port(zookeeper_t)
> +corenet_udp_bind_all_nodes(zookeeper_t)
> +corenet_udp_sendrecv_all_nodes(zookeeper_t)
> +corenet_udp_sendrecv_all_ports(zookeeper_t)
> +corenet_udp_sendrecv_generic_if(zookeeper_t)
> +
> +dev_read_rand(zookeeper_t)
> +dev_read_sysfs(zookeeper_t)
> +dev_read_urand(zookeeper_t)
> +
> +files_read_etc_files(zookeeper_t)
> +files_read_usr_files(zookeeper_t)
> +
> +miscfiles_read_localization(zookeeper_t)
> +
> +sysnet_read_config(zookeeper_t)
> +
> +userdom_dontaudit_search_user_home_dirs(zookeeper_t)
> +userdom_use_user_terminals(zookeeper_t)
> +
> +java_exec(zookeeper_t)
> +
> +optional_policy(`
> + nscd_socket_use(zookeeper_t)
> +')
> +
> +########################################
> +#
> +# Hadoop zookeeper server policy.
> +#
> +
> +allow zookeeper_server_t self:capability kill;
> +allow zookeeper_server_t self:process { execmem getsched sigkill signal signull };
> +allow zookeeper_server_t self:fifo_file rw_fifo_file_perms;
> +allow zookeeper_server_t self:netlink_route_socket rw_netlink_socket_perms;
> +allow zookeeper_server_t self:tcp_socket create_stream_socket_perms;
> +allow zookeeper_server_t self:udp_socket create_socket_perms;
> +
> +read_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
> +read_lnk_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
> +
> +manage_dirs_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
> +manage_files_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
> +files_var_lib_filetrans(zookeeper_server_t, zookeeper_server_var_t, { dir file })
> +
> +setattr_dirs_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
> +append_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
> +create_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
> +read_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
> +setattr_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
> +logging_log_filetrans(zookeeper_server_t, zookeeper_log_t, file)
> +
> +manage_files_pattern(zookeeper_server_t, zookeeper_server_var_run_t, zookeeper_server_var_run_t)
> +files_pid_filetrans(zookeeper_server_t, zookeeper_server_var_run_t, file)
> +
> +manage_dirs_pattern(zookeeper_server_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
> +manage_files_pattern(zookeeper_server_t, zookeeper_server_tmp_t, zookeeper_server_tmp_t)
> +files_tmp_filetrans(zookeeper_server_t, hadoop_hsperfdata_t, dir)
> +filetrans_pattern(zookeeper_server_t, hadoop_hsperfdata_t, zookeeper_server_tmp_t, file)
> +
> +can_exec(zookeeper_server_t, zookeeper_server_exec_t)
> +
> +kernel_read_network_state(zookeeper_server_t)
> +kernel_read_system_state(zookeeper_server_t)
> +
> +corecmd_exec_bin(zookeeper_server_t)
> +corecmd_exec_shell(zookeeper_server_t)
> +
> +corenet_all_recvfrom_unlabeled(zookeeper_server_t)
> +corenet_all_recvfrom_netlabel(zookeeper_server_t)
> +corenet_sendrecv_zookeeper_election_client_packets(zookeeper_server_t)
> +corenet_sendrecv_zookeeper_leader_client_packets(zookeeper_server_t)
> +corenet_sendrecv_zookeeper_client_server_packets(zookeeper_server_t)
> +corenet_sendrecv_zookeeper_election_server_packets(zookeeper_server_t)
> +corenet_sendrecv_zookeeper_leader_server_packets(zookeeper_server_t)
> +corenet_tcp_bind_all_nodes(zookeeper_server_t)
> +corenet_tcp_bind_zookeeper_client_port(zookeeper_server_t)
> +corenet_tcp_bind_zookeeper_election_port(zookeeper_server_t)
> +corenet_tcp_bind_zookeeper_leader_port(zookeeper_server_t)
> +corenet_tcp_connect_zookeeper_election_port(zookeeper_server_t)
> +corenet_tcp_connect_zookeeper_leader_port(zookeeper_server_t)
> +corenet_tcp_sendrecv_generic_if(zookeeper_server_t)
> +corenet_tcp_sendrecv_generic_node(zookeeper_server_t)
> +corenet_tcp_sendrecv_all_ports(zookeeper_server_t)
> +# Hadoop uses high ordered random ports for services
> +# If permanent ports are chosen, remove line below and lock down
> +corenet_tcp_connect_generic_port(zookeeper_server_t)
> +corenet_udp_sendrecv_generic_if(zookeeper_server_t)
> +corenet_udp_sendrecv_all_nodes(zookeeper_server_t)
> +corenet_udp_sendrecv_all_ports(zookeeper_server_t)
> +corenet_udp_bind_all_nodes(zookeeper_server_t)
> +
> +dev_read_rand(zookeeper_server_t)
> +dev_read_sysfs(zookeeper_server_t)
> +dev_read_urand(zookeeper_server_t)
> +
> +files_read_etc_files(zookeeper_server_t)
> +files_read_usr_files(zookeeper_server_t)
> +
> +fs_getattr_xattr_fs(zookeeper_server_t)
> +
> +logging_send_syslog_msg(zookeeper_server_t)
> +
> +miscfiles_read_localization(zookeeper_server_t)
> +
> +sysnet_read_config(zookeeper_server_t)
> +
> +java_exec(zookeeper_server_t)
> diff --git a/policy/modules/system/unconfined.te b/policy/modules/system/unconfined.te
> index f976344..f1e6c9f 100644
> --- a/policy/modules/system/unconfined.te
> +++ b/policy/modules/system/unconfined.te
> @@ -118,6 +118,10 @@ optional_policy(`
> ')
>
> optional_policy(`
> + hadoop_run(unconfined_t, unconfined_r)
> +')
> +
> +optional_policy(`
> inn_domtrans(unconfined_t)
> ')
>
> @@ -210,6 +214,10 @@ optional_policy(`
> xserver_domtrans(unconfined_t)
> ')
>
> +optional_policy(`
> + hadoop_zookeeper_run_client(unconfined_t, unconfined_r)
> +')
> +
> ########################################
> #
> # Unconfined Execmem Local policy
--
Chris PeBenito
<[email protected]>
Developer,
Hardened Gentoo Linux
On 10/07/2010 10:41 AM, Chris PeBenito wrote:
> On 10/05/10 15:59, Paul Nuzzi wrote:
>> On 10/04/2010 02:18 PM, Christopher J. PeBenito wrote:
>>> On 10/04/10 13:15, Paul Nuzzi wrote:
>>>> On 10/01/2010 01:56 PM, Christopher J. PeBenito wrote:
>>>>> On 10/01/10 11:17, Paul Nuzzi wrote:
>>>>>> On 10/01/2010 08:02 AM, Dominick Grift wrote:
>>>>>>> On Thu, Sep 30, 2010 at 03:39:40PM -0400, Paul Nuzzi wrote:
>>>>>>>> I updated the patch based on recommendations from the mailing list.
>>>>>>>> All of hadoop's services are included in one module instead of
>>>>>>>> individual ones. Unconfined and sysadm roles are given access to
>>>>>>>> hadoop and zookeeper client domain transitions. The services are started
>>>>>>>> using run_init. Let me know what you think.
>>>>>>>
>>>>>>> Why do some hadoop domain need to manage generic tmp?
>>>>>>>
>>>>>>> files_manage_generic_tmp_dirs(zookeeper_t)
>>>>>>> files_manage_generic_tmp_dirs(hadoop_t)
>>>>>>> files_manage_generic_tmp_dirs(hadoop_$1_initrc_t)
>>>>>>> files_manage_generic_tmp_files(hadoop_$1_initrc_t)
>>>>>>> files_manage_generic_tmp_files(hadoop_$1_t)
>>>>>>> files_manage_generic_tmp_dirs(hadoop_$1_t)
>>>>>>
>>>>>> This has to be done for Java JMX to work. All of the files are written to
>>>>>> /tmp/hsperfdata_(hadoop/zookeeper). /tmp/hsperfdata_ is labeled tmp_t while
>>>>>> all the files for each service are labeled with hadoop_*_tmp_t. The first service
>>>>>> will end up owning the directory if it is not labeled tmp_t.
>>>>>
>>>>> The hsperfdata dir in /tmp certainly the bane of policy writers. Based on a quick look through the policy, it looks like the only dir they create in /tmp is this hsperfdata dir. I suggest you do something like
>>>>>
>>>>> files_tmp_filetrans(hadoop_t, hadoop_hsperfdata_t, dir)
>>>>> files_tmp_filetrans(zookeeper_t, hadoop_hsperfdata_t, dir)
>>>>>
>>>>> filetrans_pattern(hadoop_t, hadoop_hsperfdata_t, hadoop_tmp_t, file)
>>>>> filetrans_pattern(zookeeper_t, hadoop_hsperfdata_t, zookeeper_tmp_t, file)
>>>>>
>>>>
>>>> That looks like a better way to handle the tmp_t problem.
>>>>
>>>> I changed the patch with your comments. Hopefully this will be one of the last updates.
>>>> Tested on a CDH3 cluster as a module without any problems.
>>>
>>> There are several little issues with style, but it'll be easier just to fix them when its committed.
>>>
>>> Other comments inline.
>>>
>>
>> I did my best locking down the ports hadoop uses. Unfortunately the services use high, randomized ports making
>> tcp_connect_generic_port a must have. Hopefully one day hadoop will settle on static ports. I added hadoop_datanode port 50010 since it is important to lock down that service. I changed the patch based on the rest of the comments.
>
> Merged. I've made several changes:
Thanks to everyone who helped get this merged.
>
> * a pass cleaning up the style.
> * adjusted some regular expressions in the file contexts: .* is the same as (.*)? since * means 0 or more matches.
> * renamed a few interfaces
> * two rules that I dropped as they require further explanation
>
>> +files_read_all_files(hadoop_t)
>
> A very big privilege.
"hadoop fs -put" takes any file you are allowed to access and puts it into the distributed file system.
> and
>
>> +fs_associate(hadoop_tasktracker_t)
This might not be needed.
> This is a domain, so the only files with this type should be the /proc/pid ones, which don't require associate permissions.
>
>
>> ---
>> policy/modules/kernel/corenetwork.te.in | 5
>> policy/modules/roles/sysadm.te | 8
>> policy/modules/services/hadoop.fc | 54 ++++
>> policy/modules/services/hadoop.if | 352 +++++++++++++++++++++++++++++
>> policy/modules/services/hadoop.te | 379 ++++++++++++++++++++++++++++++++
>> policy/modules/system/unconfined.te | 8
>> 6 files changed, 806 insertions(+)
>>
>> diff --git a/policy/modules/kernel/corenetwork.te.in b/policy/modules/kernel/corenetwork.te.in
>> index 2ecdde8..73163db 100644
>> --- a/policy/modules/kernel/corenetwork.te.in
>> +++ b/policy/modules/kernel/corenetwork.te.in
>> @@ -105,6 +105,8 @@ network_port(giftd, tcp,1213,s0)
>> network_port(git, tcp,9418,s0, udp,9418,s0)
>> network_port(gopher, tcp,70,s0, udp,70,s0)
>> network_port(gpsd, tcp,2947,s0)
>> +network_port(hadoop_datanode, tcp, 50010,s0)
>> +network_port(hadoop_namenode, tcp, 8020,s0)
>> network_port(hddtemp, tcp,7634,s0)
>> network_port(howl, tcp,5335,s0, udp,5353,s0)
>> network_port(hplip, tcp,1782,s0, tcp,2207,s0, tcp,2208,s0, tcp, 8290,s0, tcp,50000,s0, tcp,50002,s0, tcp,8292,s0, tcp,9100,s0, tcp,9101,s0, tcp,9102,s0, tcp,9220,s0, tcp,9221,s0, tcp,9222,s0, tcp,9280,s0, tcp,9281,s0, tcp,9282,s0, tcp,9290,s0, tcp,9291,s0, tcp,9292,s0)
>> @@ -211,6 +213,9 @@ network_port(xdmcp, udp,177,s0, tcp,177,s0)
>> network_port(xen, tcp,8002,s0)
>> network_port(xfs, tcp,7100,s0)
>> network_port(xserver, tcp,6000-6020,s0)
>> +network_port(zookeeper_client, tcp, 2181,s0)
>> +network_port(zookeeper_election, tcp, 3888,s0)
>> +network_port(zookeeper_leader, tcp, 2888,s0)
>> network_port(zebra, tcp,2600-2604,s0, tcp,2606,s0, udp,2600-2604,s0, udp,2606,s0)
>> network_port(zope, tcp,8021,s0)
>>
>> diff --git a/policy/modules/roles/sysadm.te b/policy/modules/roles/sysadm.te
>> index cad05ff..d2bc2b1 100644
>> --- a/policy/modules/roles/sysadm.te
>> +++ b/policy/modules/roles/sysadm.te
>> @@ -152,6 +152,10 @@ optional_policy(`
>> ')
>>
>> optional_policy(`
>> + hadoop_run(sysadm_t, sysadm_r)
>> +')
>> +
>> +optional_policy(`
>> # allow system administrator to use the ipsec script to look
>> # at things (e.g., ipsec auto --status)
>> # probably should create an ipsec_admin role for this kind of thing
>> @@ -392,6 +396,10 @@ optional_policy(`
>> yam_run(sysadm_t, sysadm_r)
>> ')
>>
>> +optional_policy(`
>> + hadoop_zookeeper_run_client(sysadm_t, sysadm_r)
>> +')
>> +
>> ifndef(`distro_redhat',`
>> optional_policy(`
>> auth_role(sysadm_r, sysadm_t)
>> diff --git a/policy/modules/services/hadoop.fc b/policy/modules/services/hadoop.fc
>> new file mode 100644
>> index 0000000..a09275d
>> --- /dev/null
>> +++ b/policy/modules/services/hadoop.fc
>> @@ -0,0 +1,54 @@
>> +/etc/hadoop.*(/.*)? gen_context(system_u:object_r:hadoop_etc_t,s0)
>> +
>> +/etc/rc\.d/init\.d/hadoop-(.*)?-datanode -- gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
>> +/etc/rc\.d/init\.d/hadoop-(.*)?-jobtracker -- gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
>> +/etc/rc\.d/init\.d/hadoop-(.*)?-namenode -- gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
>> +/etc/rc\.d/init\.d/hadoop-(.*)?-secondarynamenode -- gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
>> +/etc/rc\.d/init\.d/hadoop-(.*)?-tasktracker -- gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
>> +/etc/rc\.d/init\.d/hadoop-zookeeper -- gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
>> +/etc/init\.d/hadoop-datanode -- gen_context(system_u:object_r:hadoop_datanode_initrc_exec_t,s0)
>> +/etc/init\.d/hadoop-jobtracker -- gen_context(system_u:object_r:hadoop_jobtracker_initrc_exec_t,s0)
>> +/etc/init\.d/hadoop-namenode -- gen_context(system_u:object_r:hadoop_namenode_initrc_exec_t,s0)
>> +/etc/init\.d/hadoop-secondarynamenode -- gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_exec_t,s0)
>> +/etc/init\.d/hadoop-tasktracker -- gen_context(system_u:object_r:hadoop_tasktracker_initrc_exec_t,s0)
>> +/etc/init\.d/zookeeper -- gen_context(system_u:object_r:zookeeper_server_initrc_exec_t,s0)
>> +
>> +/etc/zookeeper(/.*)? gen_context(system_u:object_r:zookeeper_etc_t,s0)
>> +/etc/zookeeper\.dist(/.*)? gen_context(system_u:object_r:zookeeper_etc_t,s0)
>> +
>> +/usr/lib/hadoop(.*)?/bin/hadoop -- gen_context(system_u:object_r:hadoop_exec_t,s0)
>> +
>> +/usr/bin/zookeeper-client -- gen_context(system_u:object_r:zookeeper_exec_t,s0)
>> +/usr/bin/zookeeper-server -- gen_context(system_u:object_r:zookeeper_server_exec_t,s0)
>> +
>> +/var/zookeeper(/.*)? gen_context(system_u:object_r:zookeeper_server_var_t,s0)
>> +/var/lib/zookeeper(/.*)? gen_context(system_u:object_r:zookeeper_server_var_t,s0)
>> +
>> +/var/lib/hadoop(.*)? gen_context(system_u:object_r:hadoop_var_lib_t,s0)
>> +/var/lib/hadoop(.*)?/cache/hadoop/dfs/data(/.*)? gen_context(system_u:object_r:hadoop_datanode_var_lib_t,s0)
>> +/var/lib/hadoop(.*)?/cache/hadoop/dfs/name(/.*)? gen_context(system_u:object_r:hadoop_namenode_var_lib_t,s0)
>> +/var/lib/hadoop(.*)?/cache/hadoop/dfs/namesecondary(/.*)? gen_context(system_u:object_r:hadoop_secondarynamenode_var_lib_t,s0)
>> +/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/jobTracker(/.*)? gen_context(system_u:object_r:hadoop_jobtracker_var_lib_t,s0)
>> +/var/lib/hadoop(.*)?/cache/hadoop/mapred/local/taskTracker(/.*)? gen_context(system_u:object_r:hadoop_tasktracker_var_lib_t,s0)
>> +
>> +/var/lock/subsys/hadoop-datanode -- gen_context(system_u:object_r:hadoop_datanode_lock_t,s0)
>> +/var/lock/subsys/hadoop-namenode -- gen_context(system_u:object_r:hadoop_namenode_lock_t,s0)
>> +/var/lock/subsys/hadoop-jobtracker -- gen_context(system_u:object_r:hadoop_jobtracker_lock_t,s0)
>> +/var/lock/subsys/hadoop-tasktracker -- gen_context(system_u:object_r:hadoop_tasktracker_lock_t,s0)
>> +/var/lock/subsys/hadoop-secondarynamenode -- gen_context(system_u:object_r:hadoop_secondarynamenode_lock_t,s0)
>> +
>> +/var/log/hadoop(.*)? gen_context(system_u:object_r:hadoop_log_t,s0)
>> +/var/log/hadoop(.*)?/hadoop-hadoop-datanode-(.*)? gen_context(system_u:object_r:hadoop_datanode_log_t,s0)
>> +/var/log/hadoop(.*)?/hadoop-hadoop-jobtracker-(.*)? gen_context(system_u:object_r:hadoop_jobtracker_log_t,s0)
>> +/var/log/hadoop(.*)?/hadoop-hadoop-namenode-(.*)? gen_context(system_u:object_r:hadoop_namenode_log_t,s0)
>> +/var/log/hadoop(.*)?/hadoop-hadoop-secondarynamenode-(.*)? gen_context(system_u:object_r:hadoop_secondarynamenode_log_t,s0)
>> +/var/log/hadoop(.*)?/hadoop-hadoop-tasktracker-(.*)? gen_context(system_u:object_r:hadoop_tasktracker_log_t,s0)
>> +/var/log/hadoop(.*)?/history(/.*)? gen_context(system_u:object_r:hadoop_jobtracker_log_t,s0)
>> +/var/log/zookeeper(/.*)? gen_context(system_u:object_r:zookeeper_log_t,s0)
>> +
>> +/var/run/hadoop(.*)? -d gen_context(system_u:object_r:hadoop_var_run_t,s0)
>> +/var/run/hadoop(.*)?/hadoop-hadoop-datanode\.pid -- gen_context(system_u:object_r:hadoop_datanode_initrc_var_run_t,s0)
>> +/var/run/hadoop(.*)?/hadoop-hadoop-namenode\.pid -- gen_context(system_u:object_r:hadoop_namenode_initrc_var_run_t,s0)
>> +/var/run/hadoop(.*)?/hadoop-hadoop-jobtracker\.pid -- gen_context(system_u:object_r:hadoop_jobtracker_initrc_var_run_t,s0)
>> +/var/run/hadoop(.*)?/hadoop-hadoop-tasktracker\.pid -- gen_context(system_u:object_r:hadoop_tasktracker_initrc_var_run_t,s0)
>> +/var/run/hadoop(.*)?/hadoop-hadoop-secondarynamenode\.pid -- gen_context(system_u:object_r:hadoop_secondarynamenode_initrc_var_run_t,s0)
>> diff --git a/policy/modules/services/hadoop.if b/policy/modules/services/hadoop.if
>> new file mode 100644
>> index 0000000..e919bcb
>> --- /dev/null
>> +++ b/policy/modules/services/hadoop.if
>> @@ -0,0 +1,352 @@
>> +##<summary>Software for reliable, scalable, distributed computing.</summary>
>> +
>> +#######################################
>> +##<summary>
>> +## The template to define a hadoop domain.
>> +##</summary>
>> +##<param name="domain_prefix">
>> +## <summary>
>> +## Domain prefix to be used.
>> +## </summary>
>> +##</param>
>> +#
>> +template(`hadoop_domain_template',`
>> + gen_require(`
>> + attribute hadoop_domain;
>> + type hadoop_log_t, hadoop_var_lib_t, hadoop_var_run_t;
>> + type hadoop_exec_t, hadoop_hsperfdata_t;
>> + ')
>> +
>> + ########################################
>> + #
>> + # Shared declarations.
>> + #
>> +
>> + type hadoop_$1_t, hadoop_domain;
>> + domain_type(hadoop_$1_t)
>> + domain_entry_file(hadoop_$1_t, hadoop_exec_t)
>> +
>> + type hadoop_$1_initrc_t;
>> + type hadoop_$1_initrc_exec_t;
>> + init_script_domain(hadoop_$1_initrc_t, hadoop_$1_initrc_exec_t)
>> +
>> + role system_r types { hadoop_$1_initrc_t hadoop_$1_t };
>> +
>> + type hadoop_$1_lock_t;
>> + files_lock_file(hadoop_$1_lock_t)
>> + files_lock_filetrans(hadoop_$1_initrc_t, hadoop_$1_lock_t, file)
>> +
>> + type hadoop_$1_log_t;
>> + logging_log_file(hadoop_$1_log_t)
>> + filetrans_pattern(hadoop_$1_initrc_t, hadoop_log_t, hadoop_$1_log_t, {dir file})
>> + filetrans_pattern(hadoop_$1_t, hadoop_log_t, hadoop_$1_log_t, {dir file})
>> +
>> + type hadoop_$1_var_lib_t;
>> + files_type(hadoop_$1_var_lib_t)
>> + filetrans_pattern(hadoop_$1_t, hadoop_var_lib_t, hadoop_$1_var_lib_t, file)
>> +
>> + type hadoop_$1_initrc_var_run_t;
>> + files_pid_file(hadoop_$1_initrc_var_run_t)
>> + filetrans_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_$1_initrc_var_run_t, file)
>> +
>> + type hadoop_$1_tmp_t;
>> + files_tmp_file(hadoop_$1_tmp_t)
>> + files_tmp_filetrans(hadoop_$1_t, hadoop_hsperfdata_t, dir)
>> + filetrans_pattern(hadoop_$1_t, hadoop_hsperfdata_t, hadoop_$1_tmp_t, file)
>> +
>> + ####################################
>> + #
>> + # Shared hadoop_$1 initrc policy.
>> + #
>> +
>> + allow hadoop_$1_initrc_t hadoop_$1_t:process { signal signull };
>> + allow hadoop_$1_initrc_t self:capability { setuid setgid };
>> + allow hadoop_$1_initrc_t self:fifo_file rw_fifo_file_perms;
>> + allow hadoop_$1_initrc_t self:process setsched;
>> +
>> + consoletype_exec(hadoop_$1_initrc_t)
>> + corecmd_exec_bin(hadoop_$1_initrc_t)
>> + corecmd_exec_shell(hadoop_$1_initrc_t)
>> +
>> + domtrans_pattern(hadoop_$1_initrc_t, hadoop_exec_t, hadoop_$1_t)
>> + dontaudit hadoop_$1_initrc_t self:capability sys_tty_config;
>> +
>> + files_read_etc_files(hadoop_$1_initrc_t)
>> + files_read_usr_files(hadoop_$1_initrc_t)
>> + files_search_pids(hadoop_$1_initrc_t)
>> + files_search_locks(hadoop_$1_initrc_t)
>> + fs_getattr_xattr_fs(hadoop_$1_initrc_t)
>> +
>> + hadoop_exec_config_files(hadoop_$1_initrc_t)
>> +
>> + init_rw_utmp(hadoop_$1_initrc_t)
>> + init_use_script_ptys(hadoop_$1_initrc_t)
>> +
>> + kernel_read_kernel_sysctls(hadoop_$1_initrc_t)
>> + kernel_read_sysctl(hadoop_$1_initrc_t)
>> + kernel_read_system_state(hadoop_$1_initrc_t)
>> +
>> + logging_send_syslog_msg(hadoop_$1_initrc_t)
>> + logging_send_audit_msgs(hadoop_$1_initrc_t)
>> + logging_search_logs(hadoop_$1_initrc_t)
>> +
>> + manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_lock_t, hadoop_$1_lock_t)
>> + manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_initrc_var_run_t, hadoop_$1_initrc_var_run_t)
>> + manage_files_pattern(hadoop_$1_initrc_t, hadoop_$1_log_t, hadoop_$1_log_t)
>> + manage_dirs_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_var_run_t)
>> + manage_files_pattern(hadoop_$1_initrc_t, hadoop_var_run_t, hadoop_var_run_t)
>> +
>> + miscfiles_read_localization(hadoop_$1_initrc_t)
>> +
>> + optional_policy(`
>> + nscd_socket_use(hadoop_$1_initrc_t)
>> + ')
>> +
>> + term_use_generic_ptys(hadoop_$1_initrc_t)
>> +
>> + userdom_dontaudit_search_user_home_dirs(hadoop_$1_initrc_t)
>> +
>> + ####################################
>> + #
>> + # Shared hadoop_$1 policy.
>> + #
>> +
>> + allow hadoop_$1_t hadoop_domain:process signull;
>> + allow hadoop_$1_t self:fifo_file rw_fifo_file_perms;
>> + allow hadoop_$1_t self:process execmem;
>> + allow hadoop_$1_t hadoop_var_run_t:dir getattr;
>> +
>> + corecmd_exec_bin(hadoop_$1_t)
>> + corecmd_exec_shell(hadoop_$1_t)
>> +
>> + dev_read_rand(hadoop_$1_t)
>> + dev_read_urand(hadoop_$1_t)
>> + dev_read_sysfs(hadoop_$1_t)
>> + dontaudit hadoop_$1_t self:netlink_route_socket rw_netlink_socket_perms;
>> +
>> + files_read_etc_files(hadoop_$1_t)
>> + files_search_pids(hadoop_$1_t)
>> + files_search_var_lib(hadoop_$1_t)
>> +
>> + hadoop_exec_config_files(hadoop_$1_t)
>> +
>> + java_exec(hadoop_$1_t)
>> +
>> + kernel_read_network_state(hadoop_$1_t)
>> + kernel_read_system_state(hadoop_$1_t)
>> +
>> + logging_search_logs(hadoop_$1_t)
>> +
>> + manage_dirs_pattern(hadoop_$1_t, hadoop_$1_var_lib_t, hadoop_$1_var_lib_t)
>> + manage_dirs_pattern(hadoop_$1_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
>> + manage_files_pattern(hadoop_$1_t, hadoop_$1_log_t, hadoop_$1_log_t)
>> + manage_files_pattern(hadoop_$1_t, hadoop_$1_var_lib_t, hadoop_$1_var_lib_t)
>> + manage_files_pattern(hadoop_$1_t, hadoop_$1_tmp_t, hadoop_$1_tmp_t)
>> + miscfiles_read_localization(hadoop_$1_t)
>> +
>> + optional_policy(`
>> + nscd_socket_use(hadoop_$1_t)
>> + ')
>> +
>> + sysnet_read_config(hadoop_$1_t)
>> +
>> + allow hadoop_$1_t self:tcp_socket create_stream_socket_perms;
>> + corenet_all_recvfrom_unlabeled(hadoop_$1_t)
>> + corenet_all_recvfrom_netlabel(hadoop_$1_t)
>> + corenet_tcp_bind_all_nodes(hadoop_$1_t)
>> + corenet_tcp_sendrecv_generic_if(hadoop_$1_t)
>> + corenet_tcp_sendrecv_generic_node(hadoop_$1_t)
>> + corenet_tcp_sendrecv_all_ports(hadoop_$1_t)
>> + # Hadoop uses high ordered random ports for services
>> + # If permanent ports are chosen, remove line below and lock down
>> + corenet_tcp_connect_generic_port(hadoop_$1_t)
>> +
>> + allow hadoop_$1_t self:udp_socket create_socket_perms;
>> + corenet_udp_sendrecv_generic_if(hadoop_$1_t)
>> + corenet_udp_sendrecv_all_nodes(hadoop_$1_t)
>> + corenet_udp_bind_all_nodes(hadoop_$1_t)
>> +')
>> +
>> +########################################
>> +##<summary>
>> +## Execute hadoop in the
>> +## hadoop domain.
>> +##</summary>
>> +##<param name="domain">
>> +## <summary>
>> +## Domain allowed to transition.
>> +## </summary>
>> +##</param>
>> +#
>> +interface(`hadoop_domtrans',`
>> + gen_require(`
>> + type hadoop_t, hadoop_exec_t;
>> + ')
>> +
>> + files_search_usr($1)
>> + libs_search_lib($1)
>> + domtrans_pattern($1, hadoop_exec_t, hadoop_t)
>> +')
>> +
>> +########################################
>> +##<summary>
>> +## Execute hadoop in the hadoop domain,
>> +## and allow the specified role the
>> +## hadoop domain.
>> +##</summary>
>> +##<param name="domain">
>> +## <summary>
>> +## Domain allowed to transition.
>> +## </summary>
>> +##</param>
>> +##<param name="role">
>> +## <summary>
>> +## Role allowed access.
>> +## </summary>
>> +##</param>
>> +##<rolecap/>
>> +#
>> +interface(`hadoop_run',`
>> + gen_require(`
>> + type hadoop_t;
>> + ')
>> +
>> + hadoop_domtrans($1)
>> + role $2 types hadoop_t;
>> +
>> + allow $1 hadoop_t:process { ptrace signal_perms };
>> + ps_process_pattern($1, hadoop_t)
>> +')
>> +
>> +########################################
>> +##<summary>
>> +## Execute zookeeper client in the
>> +## zookeeper client domain.
>> +##</summary>
>> +##<param name="domain">
>> +## <summary>
>> +## Domain allowed to transition.
>> +## </summary>
>> +##</param>
>> +#
>> +interface(`hadoop_domtrans_zookeeper_client',`
>> + gen_require(`
>> + type zookeeper_t, zookeeper_exec_t;
>> + ')
>> +
>> + corecmd_search_bin($1)
>> + files_search_usr($1)
>> + domtrans_pattern($1, zookeeper_exec_t, zookeeper_t)
>> +')
>> +
>> +########################################
>> +##<summary>
>> +## Execute zookeeper server in the
>> +## zookeeper server domain.
>> +##</summary>
>> +##<param name="domain">
>> +## <summary>
>> +## Domain allowed to transition.
>> +## </summary>
>> +##</param>
>> +#
>> +interface(`hadoop_domtrans_zookeeper_server',`
>> + gen_require(`
>> + type zookeeper_server_t, zookeeper_server_exec_t;
>> + ')
>> +
>> + corecmd_search_bin($1)
>> + files_search_usr($1)
>> + domtrans_pattern($1, zookeeper_server_exec_t, zookeeper_server_t)
>> +')
>> +
>> +########################################
>> +##<summary>
>> +## Execute zookeeper server in the
>> +## zookeeper domain.
>> +##</summary>
>> +##<param name="domain">
>> +## <summary>
>> +## Domain allowed to transition.
>> +## </summary>
>> +##</param>
>> +#
>> +interface(`hadoop_zookeeper_initrc_domtrans_server',`
>> + gen_require(`
>> + type zookeeper_server_initrc_exec_t;
>> + ')
>> +
>> + init_labeled_script_domtrans($1, zookeeper_server_initrc_exec_t)
>> +')
>> +
>> +########################################
>> +##<summary>
>> +## Execute zookeeper client in the
>> +## zookeeper client domain, and allow the
>> +## specified role the zookeeper client domain.
>> +##</summary>
>> +##<param name="domain">
>> +## <summary>
>> +## Domain allowed to transition.
>> +## </summary>
>> +##</param>
>> +##<param name="role">
>> +## <summary>
>> +## Role allowed access.
>> +## </summary>
>> +##</param>
>> +##<rolecap/>
>> +#
>> +interface(`hadoop_zookeeper_run_client',`
>> + gen_require(`
>> + type zookeeper_t;
>> + ')
>> +
>> + hadoop_domtrans_zookeeper_client($1)
>> + role $2 types zookeeper_t;
>> +
>> + allow $1 zookeeper_t:process { ptrace signal_perms };
>> + ps_process_pattern($1, zookeeper_t)
>> +')
>> +
>> +########################################
>> +##<summary>
>> +## Give permission to a domain to read
>> +## hadoop_etc_t
>> +##</summary>
>> +##<param name="domain">
>> +##<summary>
>> +## Domain needing read permission
>> +##</summary>
>> +##</param>
>> +#
>> +interface(`hadoop_read_config_files', `
>> + gen_require(`
>> + type hadoop_etc_t;
>> + ')
>> +
>> + allow $1 hadoop_etc_t:dir search_dir_perms;
>> + allow $1 hadoop_etc_t:lnk_file { read getattr };
>> + allow $1 hadoop_etc_t:file read_file_perms;
>> +')
>> +
>> +########################################
>> +##<summary>
>> +## Give permission to a domain to
>> +## execute hadoop_etc_t
>> +##</summary>
>> +##<param name="domain">
>> +##<summary>
>> +## Domain needing read and execute
>> +## permission
>> +##</summary>
>> +##</param>
>> +#
>> +interface(`hadoop_exec_config_files', `
>> + gen_require(`
>> + type hadoop_etc_t;
>> + ')
>> +
>> + hadoop_read_config_files($1)
>> + allow $1 hadoop_etc_t:file { execute execute_no_trans};
>> +')
>> diff --git a/policy/modules/services/hadoop.te b/policy/modules/services/hadoop.te
>> new file mode 100644
>> index 0000000..587c393
>> --- /dev/null
>> +++ b/policy/modules/services/hadoop.te
>> @@ -0,0 +1,379 @@
>> +policy_module(hadoop, 1.0.0)
>> +
>> +########################################
>> +#
>> +# Hadoop declarations.
>> +#
>> +
>> +attribute hadoop_domain;
>> +
>> +type hadoop_t;
>> +type hadoop_exec_t;
>> +application_domain(hadoop_t, hadoop_exec_t)
>> +ubac_constrained(hadoop_t)
>> +
>> +type hadoop_etc_t;
>> +files_config_file(hadoop_etc_t)
>> +
>> +type hadoop_var_lib_t;
>> +files_type(hadoop_var_lib_t)
>> +
>> +type hadoop_log_t;
>> +logging_log_file(hadoop_log_t)
>> +
>> +type hadoop_var_run_t;
>> +files_pid_file(hadoop_var_run_t)
>> +
>> +type hadoop_tmp_t;
>> +files_tmp_file(hadoop_tmp_t)
>> +ubac_constrained(hadoop_tmp_t)
>> +
>> +type hadoop_hsperfdata_t;
>> +files_tmp_file(hadoop_hsperfdata_t)
>> +ubac_constrained(hadoop_hsperfdata_t)
>> +
>> +hadoop_domain_template(datanode)
>> +hadoop_domain_template(jobtracker)
>> +hadoop_domain_template(namenode)
>> +hadoop_domain_template(secondarynamenode)
>> +hadoop_domain_template(tasktracker)
>> +
>> +########################################
>> +#
>> +# Hadoop zookeeper client declarations.
>> +#
>> +
>> +type zookeeper_t;
>> +type zookeeper_exec_t;
>> +application_domain(zookeeper_t, zookeeper_exec_t)
>> +ubac_constrained(zookeeper_t)
>> +
>> +type zookeeper_etc_t;
>> +files_config_file(zookeeper_etc_t)
>> +
>> +type zookeeper_log_t;
>> +logging_log_file(zookeeper_log_t)
>> +
>> +type zookeeper_tmp_t;
>> +files_tmp_file(zookeeper_tmp_t)
>> +ubac_constrained(zookeeper_tmp_t)
>> +
>> +########################################
>> +#
>> +# Hadoop zookeeper server declarations.
>> +#
>> +
>> +type zookeeper_server_t;
>> +type zookeeper_server_exec_t;
>> +init_daemon_domain(zookeeper_server_t, zookeeper_server_exec_t)
>> +
>> +type zookeeper_server_initrc_exec_t;
>> +init_script_file(zookeeper_server_initrc_exec_t)
>> +
>> +type zookeeper_server_var_t;
>> +files_type(zookeeper_server_var_t)
>> +
>> +# This will need a file context specification.
>> +type zookeeper_server_var_run_t;
>> +files_pid_file(zookeeper_server_var_run_t)
>> +
>> +type zookeeper_server_tmp_t;
>> +files_tmp_file(zookeeper_server_tmp_t)
>> +
>> +########################################
>> +#
>> +# Hadoop policy.
>> +#
>> +
>> +allow hadoop_t self:capability sys_resource;
>> +allow hadoop_t self:process { getsched setsched signal signull setrlimit execmem };
>> +allow hadoop_t self:fifo_file rw_fifo_file_perms;
>> +allow hadoop_t self:key write;
>> +allow hadoop_t self:tcp_socket create_stream_socket_perms;
>> +allow hadoop_t self:udp_socket create_socket_perms;
>> +allow hadoop_t hadoop_domain:process signull;
>> +
>> +dontaudit hadoop_t self:netlink_route_socket rw_netlink_socket_perms;
>> +
>> +read_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
>> +read_lnk_files_pattern(hadoop_t, hadoop_etc_t, hadoop_etc_t)
>> +can_exec(hadoop_t, hadoop_etc_t)
>> +
>> +manage_dirs_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
>> +manage_dirs_pattern(hadoop_t, hadoop_log_t, hadoop_log_t)
>> +manage_dirs_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
>> +manage_dirs_pattern(hadoop_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
>> +manage_files_pattern(hadoop_t, hadoop_var_lib_t, hadoop_var_lib_t)
>> +manage_files_pattern(hadoop_t, hadoop_tmp_t, hadoop_tmp_t)
>> +
>> +getattr_dirs_pattern(hadoop_t, hadoop_var_run_t, hadoop_var_run_t)
>> +
>> +files_tmp_filetrans(hadoop_t, hadoop_hsperfdata_t, dir)
>> +filetrans_pattern(hadoop_t, hadoop_hsperfdata_t, hadoop_tmp_t, file)
>> +
>> +kernel_read_network_state(hadoop_t)
>> +kernel_read_system_state(hadoop_t)
>> +
>> +corecmd_exec_bin(hadoop_t)
>> +corecmd_exec_shell(hadoop_t)
>> +
>> +corenet_all_recvfrom_unlabeled(hadoop_t)
>> +corenet_all_recvfrom_netlabel(hadoop_t)
>> +corenet_sendrecv_hadoop_namenode_client_packets(hadoop_t)
>> +corenet_sendrecv_portmap_client_packets(hadoop_t)
>> +corenet_sendrecv_zope_client_packets(hadoop_t)
>> +corenet_tcp_bind_all_nodes(hadoop_t)
>> +corenet_tcp_connect_hadoop_namenode_port(hadoop_t)
>> +corenet_tcp_connect_hadoop_datanode_port(hadoop_t)
>> +corenet_tcp_connect_portmap_port(hadoop_t)
>> +corenet_tcp_connect_zope_port(hadoop_t)
>> +corenet_tcp_sendrecv_all_nodes(hadoop_t)
>> +corenet_tcp_sendrecv_all_ports(hadoop_t)
>> +corenet_tcp_sendrecv_generic_if(hadoop_t)
>> +# Hadoop uses high ordered random ports for services
>> +# If permanent ports are chosen, remove line below and lock down
>> +corenet_tcp_connect_generic_port(hadoop_t)
>> +corenet_udp_bind_all_nodes(hadoop_t)
>> +corenet_udp_sendrecv_all_nodes(hadoop_t)
>> +corenet_udp_sendrecv_all_ports(hadoop_t)
>> +corenet_udp_sendrecv_generic_if(hadoop_t)
>> +
>> +dev_read_rand(hadoop_t)
>> +dev_read_sysfs(hadoop_t)
>> +dev_read_urand(hadoop_t)
>> +
>> +files_dontaudit_search_spool(hadoop_t)
>> +files_read_usr_files(hadoop_t)
>> +files_read_all_files(hadoop_t)
>> +
>> +fs_getattr_xattr_fs(hadoop_t)
>> +
>> +java_exec(hadoop_t)
>> +
>> +miscfiles_read_localization(hadoop_t)
>> +
>> +userdom_dontaudit_search_user_home_dirs(hadoop_t)
>> +userdom_use_user_terminals(hadoop_t)
>> +
>> +optional_policy(`
>> + nis_use_ypbind(hadoop_t)
>> +')
>> +
>> +optional_policy(`
>> + nscd_socket_use(hadoop_t)
>> +')
>> +
>> +########################################
>> +#
>> +# Hadoop datanode policy.
>> +#
>> +
>> +allow hadoop_datanode_t self:process signal;
>> +corenet_tcp_bind_hadoop_datanode_port(hadoop_datanode_t)
>> +corenet_tcp_connect_hadoop_datanode_port(hadoop_datanode_t)
>> +corenet_tcp_connect_hadoop_namenode_port(hadoop_datanode_t)
>> +fs_getattr_xattr_fs(hadoop_datanode_t)
>> +manage_dirs_pattern(hadoop_datanode_t, hadoop_var_lib_t, hadoop_var_lib_t)
>> +
>> +########################################
>> +#
>> +# Hadoop jobtracker policy.
>> +#
>> +
>> +corenet_tcp_bind_zope_port(hadoop_jobtracker_t)
>> +corenet_tcp_connect_hadoop_datanode_port(hadoop_jobtracker_t)
>> +corenet_tcp_connect_hadoop_namenode_port(hadoop_jobtracker_t)
>> +create_dirs_pattern(hadoop_jobtracker_t, hadoop_jobtracker_log_t, hadoop_jobtracker_log_t)
>> +manage_dirs_pattern(hadoop_jobtracker_t, hadoop_var_lib_t, hadoop_var_lib_t)
>> +setattr_dirs_pattern(hadoop_jobtracker_t, hadoop_jobtracker_log_t, hadoop_jobtracker_log_t)
>> +
>> +########################################
>> +#
>> +# Hadoop namenode policy.
>> +#
>> +
>> +corenet_tcp_bind_hadoop_namenode_port(hadoop_namenode_t)
>> +corenet_tcp_connect_hadoop_namenode_port(hadoop_namenode_t)
>> +manage_dirs_pattern(hadoop_namenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
>> +manage_files_pattern(hadoop_namenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
>> +
>> +########################################
>> +#
>> +# Hadoop secondary namenode policy.
>> +#
>> +
>> +corenet_tcp_connect_hadoop_namenode_port(hadoop_secondarynamenode_t)
>> +manage_dirs_pattern(hadoop_secondarynamenode_t, hadoop_var_lib_t, hadoop_var_lib_t)
>> +
>> +########################################
>> +#
>> +# Hadoop tasktracker policy.
>> +#
>> +
>> +allow hadoop_tasktracker_t self:process signal;
>> +
>> +corenet_tcp_connect_hadoop_datanode_port(hadoop_tasktracker_t)
>> +corenet_tcp_connect_hadoop_namenode_port(hadoop_tasktracker_t)
>> +corenet_tcp_connect_zope_port(hadoop_tasktracker_t)
>> +
>> +filetrans_pattern(hadoop_tasktracker_t, hadoop_log_t, hadoop_tasktracker_log_t, dir)
>> +fs_associate(hadoop_tasktracker_t)
>> +fs_getattr_xattr_fs(hadoop_tasktracker_t)
>> +
>> +manage_dirs_pattern(hadoop_tasktracker_t, hadoop_var_lib_t, hadoop_var_lib_t)
>> +manage_dirs_pattern(hadoop_tasktracker_t, hadoop_tasktracker_log_t, hadoop_tasktracker_log_t);
>> +
>> +########################################
>> +#
>> +# Hadoop zookeeper client policy.
>> +#
>> +
>> +allow zookeeper_t self:process { getsched sigkill signal signull execmem };
>> +allow zookeeper_t self:fifo_file rw_fifo_file_perms;
>> +allow zookeeper_t self:tcp_socket create_stream_socket_perms;
>> +allow zookeeper_t self:udp_socket create_socket_perms;
>> +allow zookeeper_t zookeeper_server_t:process signull;
>> +
>> +read_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
>> +read_lnk_files_pattern(zookeeper_t, zookeeper_etc_t, zookeeper_etc_t)
>> +
>> +setattr_dirs_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
>> +append_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
>> +create_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
>> +read_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
>> +setattr_files_pattern(zookeeper_t, zookeeper_log_t, zookeeper_log_t)
>> +logging_log_filetrans(zookeeper_t, zookeeper_log_t, file)
>> +
>> +manage_dirs_pattern(zookeeper_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
>> +manage_files_pattern(zookeeper_t, zookeeper_tmp_t, zookeeper_tmp_t)
>> +files_tmp_filetrans(zookeeper_t, hadoop_hsperfdata_t, dir)
>> +filetrans_pattern(zookeeper_t, hadoop_hsperfdata_t, zookeeper_tmp_t, file)
>> +
>> +can_exec(zookeeper_t, zookeeper_exec_t)
>> +
>> +kernel_read_network_state(zookeeper_t)
>> +kernel_read_system_state(zookeeper_t)
>> +
>> +corecmd_exec_bin(zookeeper_t)
>> +corecmd_exec_shell(zookeeper_t)
>> +
>> +dontaudit zookeeper_t self:netlink_route_socket rw_netlink_socket_perms;
>> +
>> +corenet_all_recvfrom_unlabeled(zookeeper_t)
>> +corenet_all_recvfrom_netlabel(zookeeper_t)
>> +corenet_sendrecv_zookeeper_client_client_packets(zookeeper_t)
>> +corenet_tcp_bind_all_nodes(zookeeper_t)
>> +corenet_tcp_connect_zookeeper_client_port(zookeeper_t)
>> +corenet_tcp_sendrecv_all_nodes(zookeeper_t)
>> +corenet_tcp_sendrecv_all_ports(zookeeper_t)
>> +corenet_tcp_sendrecv_generic_if(zookeeper_t)
>> +# Hadoop uses high ordered random ports for services
>> +# If permanent ports are chosen, remove line below and lock down
>> +corenet_tcp_connect_generic_port(zookeeper_t)
>> +corenet_udp_bind_all_nodes(zookeeper_t)
>> +corenet_udp_sendrecv_all_nodes(zookeeper_t)
>> +corenet_udp_sendrecv_all_ports(zookeeper_t)
>> +corenet_udp_sendrecv_generic_if(zookeeper_t)
>> +
>> +dev_read_rand(zookeeper_t)
>> +dev_read_sysfs(zookeeper_t)
>> +dev_read_urand(zookeeper_t)
>> +
>> +files_read_etc_files(zookeeper_t)
>> +files_read_usr_files(zookeeper_t)
>> +
>> +miscfiles_read_localization(zookeeper_t)
>> +
>> +sysnet_read_config(zookeeper_t)
>> +
>> +userdom_dontaudit_search_user_home_dirs(zookeeper_t)
>> +userdom_use_user_terminals(zookeeper_t)
>> +
>> +java_exec(zookeeper_t)
>> +
>> +optional_policy(`
>> + nscd_socket_use(zookeeper_t)
>> +')
>> +
>> +########################################
>> +#
>> +# Hadoop zookeeper server policy.
>> +#
>> +
>> +allow zookeeper_server_t self:capability kill;
>> +allow zookeeper_server_t self:process { execmem getsched sigkill signal signull };
>> +allow zookeeper_server_t self:fifo_file rw_fifo_file_perms;
>> +allow zookeeper_server_t self:netlink_route_socket rw_netlink_socket_perms;
>> +allow zookeeper_server_t self:tcp_socket create_stream_socket_perms;
>> +allow zookeeper_server_t self:udp_socket create_socket_perms;
>> +
>> +read_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
>> +read_lnk_files_pattern(zookeeper_server_t, zookeeper_etc_t, zookeeper_etc_t)
>> +
>> +manage_dirs_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
>> +manage_files_pattern(zookeeper_server_t, zookeeper_server_var_t, zookeeper_server_var_t)
>> +files_var_lib_filetrans(zookeeper_server_t, zookeeper_server_var_t, { dir file })
>> +
>> +setattr_dirs_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
>> +append_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
>> +create_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
>> +read_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
>> +setattr_files_pattern(zookeeper_server_t, zookeeper_log_t, zookeeper_log_t)
>> +logging_log_filetrans(zookeeper_server_t, zookeeper_log_t, file)
>> +
>> +manage_files_pattern(zookeeper_server_t, zookeeper_server_var_run_t, zookeeper_server_var_run_t)
>> +files_pid_filetrans(zookeeper_server_t, zookeeper_server_var_run_t, file)
>> +
>> +manage_dirs_pattern(zookeeper_server_t, hadoop_hsperfdata_t, hadoop_hsperfdata_t)
>> +manage_files_pattern(zookeeper_server_t, zookeeper_server_tmp_t, zookeeper_server_tmp_t)
>> +files_tmp_filetrans(zookeeper_server_t, hadoop_hsperfdata_t, dir)
>> +filetrans_pattern(zookeeper_server_t, hadoop_hsperfdata_t, zookeeper_server_tmp_t, file)
>> +
>> +can_exec(zookeeper_server_t, zookeeper_server_exec_t)
>> +
>> +kernel_read_network_state(zookeeper_server_t)
>> +kernel_read_system_state(zookeeper_server_t)
>> +
>> +corecmd_exec_bin(zookeeper_server_t)
>> +corecmd_exec_shell(zookeeper_server_t)
>> +
>> +corenet_all_recvfrom_unlabeled(zookeeper_server_t)
>> +corenet_all_recvfrom_netlabel(zookeeper_server_t)
>> +corenet_sendrecv_zookeeper_election_client_packets(zookeeper_server_t)
>> +corenet_sendrecv_zookeeper_leader_client_packets(zookeeper_server_t)
>> +corenet_sendrecv_zookeeper_client_server_packets(zookeeper_server_t)
>> +corenet_sendrecv_zookeeper_election_server_packets(zookeeper_server_t)
>> +corenet_sendrecv_zookeeper_leader_server_packets(zookeeper_server_t)
>> +corenet_tcp_bind_all_nodes(zookeeper_server_t)
>> +corenet_tcp_bind_zookeeper_client_port(zookeeper_server_t)
>> +corenet_tcp_bind_zookeeper_election_port(zookeeper_server_t)
>> +corenet_tcp_bind_zookeeper_leader_port(zookeeper_server_t)
>> +corenet_tcp_connect_zookeeper_election_port(zookeeper_server_t)
>> +corenet_tcp_connect_zookeeper_leader_port(zookeeper_server_t)
>> +corenet_tcp_sendrecv_generic_if(zookeeper_server_t)
>> +corenet_tcp_sendrecv_generic_node(zookeeper_server_t)
>> +corenet_tcp_sendrecv_all_ports(zookeeper_server_t)
>> +# Hadoop uses high ordered random ports for services
>> +# If permanent ports are chosen, remove line below and lock down
>> +corenet_tcp_connect_generic_port(zookeeper_server_t)
>> +corenet_udp_sendrecv_generic_if(zookeeper_server_t)
>> +corenet_udp_sendrecv_all_nodes(zookeeper_server_t)
>> +corenet_udp_sendrecv_all_ports(zookeeper_server_t)
>> +corenet_udp_bind_all_nodes(zookeeper_server_t)
>> +
>> +dev_read_rand(zookeeper_server_t)
>> +dev_read_sysfs(zookeeper_server_t)
>> +dev_read_urand(zookeeper_server_t)
>> +
>> +files_read_etc_files(zookeeper_server_t)
>> +files_read_usr_files(zookeeper_server_t)
>> +
>> +fs_getattr_xattr_fs(zookeeper_server_t)
>> +
>> +logging_send_syslog_msg(zookeeper_server_t)
>> +
>> +miscfiles_read_localization(zookeeper_server_t)
>> +
>> +sysnet_read_config(zookeeper_server_t)
>> +
>> +java_exec(zookeeper_server_t)
>> diff --git a/policy/modules/system/unconfined.te b/policy/modules/system/unconfined.te
>> index f976344..f1e6c9f 100644
>> --- a/policy/modules/system/unconfined.te
>> +++ b/policy/modules/system/unconfined.te
>> @@ -118,6 +118,10 @@ optional_policy(`
>> ')
>>
>> optional_policy(`
>> + hadoop_run(unconfined_t, unconfined_r)
>> +')
>> +
>> +optional_policy(`
>> inn_domtrans(unconfined_t)
>> ')
>>
>> @@ -210,6 +214,10 @@ optional_policy(`
>> xserver_domtrans(unconfined_t)
>> ')
>>
>> +optional_policy(`
>> + hadoop_zookeeper_run_client(unconfined_t, unconfined_r)
>> +')
>> +
>> ########################################
>> #
>> # Unconfined Execmem Local policy
>
>