Index: openacs-4/packages/acs-tcl/tcl/cluster-init.tcl =================================================================== RCS file: /usr/local/cvsroot/openacs-4/packages/acs-tcl/tcl/cluster-init.tcl,v diff -u -r1.1.2.8 -r1.1.2.9 --- openacs-4/packages/acs-tcl/tcl/cluster-init.tcl 29 Dec 2022 13:02:48 -0000 1.1.2.8 +++ openacs-4/packages/acs-tcl/tcl/cluster-init.tcl 7 Feb 2023 17:50:32 -0000 1.1.2.9 @@ -1,32 +1,40 @@ # # Check if cluster is enabled, and if, set up the custer objects # +ns_log notice "server_cluster_enabled_p: [server_cluster_enabled_p]" if {[server_cluster_enabled_p]} { # - # Register the nodes, which are available at startup time. + # Check, whether the secret for intra-cluster communication is + # properly defined. If not, then do not activate cluster mode. # - ::acs::Cluster register_nodes + if {![::acs::cluster secret_configured]} { + ns_log error "cluster setup aborted:" \ + "the cluster secret is not properly defined." \ + "Deactivated cluster mode." + proc server_cluster_enabled_p {} { return 0 } + return + } + # - # Update the blueprint every 60s in case the cluster configuration - # has changed, or cluster nodes become available or unavailable. + # Perform setup only once (not in every object creation in new + # threads). # - ad_schedule_proc -all_servers t 20 ::acs::Cluster refresh_blueprint + ns_log notice "performing cluster setup" + ::acs::cluster setup - foreach ip [parameter::get -package_id $::acs::kernel_id -parameter ClusterAuthorizedIP] { - if {[string first * $ip] > -1} { - ::acs::Cluster eval [subst { - lappend :allowed_host_patterns $ip - }] - } else { - ::acs::Cluster eval [subst { - set :allowed_host($ip) 1 - }] - } - } + # + # Update the cluster info every 20s to detect changed cluster + # configurations, or cluster nodes become available or + # unavailable. + # + ad_schedule_proc -all_servers t 20s ::acs::cluster update_node_info - set url [::acs::Cluster eval {set :url}] + # + # Setup of the listening URL + # + set url [::acs::cluster cget -url] # Check, if the filter URL mirrors a site node. If so, # the cluster mechanism will not work, if the site node @@ -35,23 +43,33 @@ set node_info [site_node::get -url $url] if {[dict get $node_info url] ne "/"} { - ns_log notice "***\n*** WARNING: there appears a package mounted on" \ + ns_log warning "***\n*** WARNING: there appears a package mounted on" \ "$url\n***Cluster configuration will not work" \ "since there is a conflict with the filter with the same name! (n)" - } + } else { - #ns_register_filter trace GET $url ::acs::Cluster - ns_register_filter preauth GET $url ::acs::Cluster - #ns_register_filter postauth GET $url ::acs::Cluster - #ad_register_filter -priority 900 preauth GET $url ::acs::Cluster + #ns_register_filter trace GET $url ::acs::cluster + ns_register_filter preauth GET $url ::acs::cluster + #ns_register_filter postauth GET $url ::acs::cluster + #ad_register_filter -priority 900 preauth GET $url ::acs::cluster - ns_register_proc GET $url ::acs::Cluster incoming_request + ns_register_proc GET $url ::acs::cluster incoming_request + } + # + # Register the nodes, which are reachable at startup time. + # + ::acs::cluster register_nodes -startup + ns_atstartup { + # + # We could add some code for testing actively keep-alive + # status. + # ns_log notice "CHECK ::throttle '[::info commands ::throttle]'" if {0 && [::info commands ::throttle] ne ""} { - ns_log notice "CHECK calling ::acs::Cluster check_nodes" - throttle do ::acs::Cluster check_nodes + ns_log notice "CHECK calling ::acs::cluster check_nodes" + throttle do ::acs::cluster check_nodes } } }