Skip to content

NODEBB: Nginx error performance & High CPU

Solved Performance
  • @DownPW It’s more straightforward than it sounds, although can be confusing if you look at it for the first time. I’ve just implemented it here. Can you provide your nginx config and your config.json (remove password obviously)

    Thanks

  • @phenomlab said in NODEBB: Nginx error performance & High CPU:

    @DownPW yes, that was a command line to get you the immediate information you needed. Did you try adding the necessary config to nginx in order that it bypasses the reverse proxy?

    @phenomlab

    Nope because I don’t know what it will be used for.

    I access the report just fine without it.

    I would just like to use webmin to generate it every day automatically.
    I would just like to settle this permissions thing.

  • @phenomlab said in NODEBB: Nginx error performance & High CPU:

    @DownPW It’s more straightforward than it sounds, although can be confusing if you look at it for the first time. I’ve just implemented it here. Can you provide your nginx config and your config.json (remove password obviously)

    Thanks

    here is my nginx.conf

    user www-data;
    worker_processes auto;
    pid /run/nginx.pid;
    worker_rlimit_nofile 70000; 
    include /etc/nginx/modules-enabled/*.conf;
    
    events {
    	# worker_connections 768;
    	worker_connections 4000;
    	#multi_accept on;
    	#multi_accept on; 
    }
    
    http {
    
    	##
    	# Basic Settings
    	##
    
    	#client_max_body_size 10M; 
    
    	#Requete maximun par ip 
    	limit_req_zone $binary_remote_addr zone=flood:10m rate=100r/s; 
    
    	#Connexions maximum par ip 
    	limit_conn_zone $binary_remote_addr zone=ddos:1m; 
    
    	sendfile on;
    	tcp_nopush on;
    	tcp_nodelay on;
    	keepalive_timeout 65;
    	types_hash_max_size 2048;
    	# server_tokens off;
    
    	# server_names_hash_bucket_size 64;
    	# server_name_in_redirect off;
    
    	include /etc/nginx/mime.types;
    	default_type application/octet-stream;
    
    	##
    	# SSL Settings
    	##
    
    	ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3; # Dropping SSLv3, ref: POODLE
    	ssl_prefer_server_ciphers on;
    
    	##
    	# Logging Settings
    	##
    
    	access_log /var/log/nginx/access.log;
    	error_log /var/log/nginx/error.log;
    
    	##
    	# Gzip Settings
    	##
    
    	gzip on;
    
    	gzip_min_length 1000; #test Violence
    	gzip_proxied off; #test Violence
    	gzip_types text/plain application/xml text/javascript application/javascript application/x-javascript text/css application/json;
    
    	# gzip_vary on;
    	# gzip_proxied any;
    	# gzip_comp_level 6;
    	# gzip_buffers 16 8k;
    	# gzip_http_version 1.1;
    	# gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript;
    
    	##
    	# Virtual Host Configs
    	##
    
    	include /etc/nginx/conf.d/*.conf;
    	include /etc/nginx/sites-enabled/*;
    	server_names_hash_bucket_size 128;
    }
    
    
    #mail {
    #	# See sample authentication script at:
    #	# http://wiki.nginx.org/ImapAuthenticateWithApachePhpScript
    # 
    #	# auth_http localhost/auth.php;
    #	# pop3_capabilities "TOP" "USER";
    #	# imap_capabilities "IMAP4rev1" "UIDPLUS";
    # 
    #	server {
    #		listen     localhost:110;
    #		protocol   pop3;
    #		proxy      on;
    #	}
    # 
    #	server {
    #		listen     localhost:143;
    #		protocol   imap;
    #		proxy      on;
    #	}
    #}
    

    And my NodebB website.conf :

    server {
    	server_name XX-XX.net www.XX-XX.net;
    	listen XX.XX.XX.X;
    	listen [XX:XX:XX:XX::];
    	root /home/XX-XX/nodebb;
    	index index.php index.htm index.html;
    	access_log /var/log/virtualmin/XX-XX.net_access_log;
    	error_log /var/log/virtualmin/XX-XX.net_error_log;
    
            location / {
    				
    		limit_req zone=flood burst=100 nodelay; 
    		limit_conn ddos 10; 
    		proxy_read_timeout 180; 
    
                    proxy_set_header X-Real-IP $remote_addr;
                    proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
                    proxy_set_header Host $http_host;
                    proxy_set_header X-NginX-Proxy true;
    
                    proxy_pass http://127.0.0.1:4567/;
                    proxy_redirect off;
    
                    # Socket.IO Support
                    proxy_http_version 1.1;
                    proxy_set_header Upgrade $http_upgrade;
                    proxy_set_header Connection "upgrade";
            }
    
    	listen XX.XX.XX.XX:XXssl http2;
    	listen [XX:XX:XX:XX::]:443 ssl http2;
    	ssl_certificate /etc/ssl/virtualmin/166195366750642/ssl.cert;
    	ssl_certificate_key /etc/ssl/virtualmin/166195366750642/ssl.key;
    	if ($scheme = http) {
    		rewrite ^/(?!.well-known)(.*) "https://XX-XX.net/$1" break;
    	}
    }
    

    and my nodebb config.json :

    {
        "url": "https://XX-XX.net",
        "secret": "XXXXXXXXXXXXXXXX",
        "database": "mongo",
        "mongo": {
            "host": "127.0.0.1",
            "port": "27017",
            "username": "XXXXXXXXXXX",
            "password": "XXXXXXXXXXX",
            "database": "nodebb",
            "uri": ""
        }
    }
    
  • @DownPW Can you check to ensure that redis-server is installed on your server before we proceed ?

  • @phenomlab said in NODEBB: Nginx error performance & High CPU:

    @DownPW Can you check to ensure that redis-server is installed on your server before we proceed ?

    just this command ? :

    sudo apt install redis
    

    And what about the perf use ?

  • @DownPW You should use sudo apt install redis-server

    In terms of performance, your server should have enough resources for this - at any rate, the session information is stored in redis but nothing else, so it’s essentially only valid for the length of the session and has no impact to the over site in terms of speed.

  • @DownPW Change your config.json so that it looks like the below

    {
        "url": "https://XX-XX.net",
        "secret": "XXXXXXXXXXXXXXXX",
        "database": "mongo",
        "mongo": {
            "host": "127.0.0.1",
            "port": "27017",
            "username": "XXXXXXXXXXX",
            "password": "XXXXXXXXXXX",
            "database": "nodebb",
            "uri": ""
        },
        "redis": {
            "host":"127.0.0.1",
            "port":"6379",
            "database": 5
        },
        "port": ["4567", "4568", "4569"]  // will start three processes
    }
    

    Your nginx.conf also needs modification (see commented steps for changes etc)

    # add the below block for nodeBB clustering
    upstream io_nodes {
        ip_hash;
        server 127.0.0.1:4567;
        server 127.0.0.1:4568;
        server 127.0.0.1:4569;
    }
    
    server {
    	server_name XX-XX.net www.XX-XX.net;
    	listen XX.XX.XX.X;
    	listen [XX:XX:XX:XX::];
    	root /home/XX-XX/nodebb;
    	index index.php index.htm index.html;
    	access_log /var/log/virtualmin/XX-XX.net_access_log;
    	error_log /var/log/virtualmin/XX-XX.net_error_log;
    
    # add the below block which will force all traffic into the cluster when referenced with @nodebb
        
    location @nodebb {
         proxy_pass http://io_nodes;
        }
    
            location / {
    				
    		limit_req zone=flood burst=100 nodelay; 
    		limit_conn ddos 10; 
    		proxy_read_timeout 180; 
    
                    proxy_set_header X-Real-IP $remote_addr;
                    proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
                    proxy_set_header Host $http_host;
                    proxy_set_header X-NginX-Proxy true;
                   # It's necessary to set @nodebb here so that the clustering works
                    proxy_pass @nodebb;
                    proxy_redirect off;
    
                    # Socket.IO Support
                    proxy_http_version 1.1;
                    proxy_set_header Upgrade $http_upgrade;
                    proxy_set_header Connection "upgrade";
            }
    
    	listen XX.XX.XX.XX:XXssl http2;
    	listen [XX:XX:XX:XX::]:443 ssl http2;
    	ssl_certificate /etc/ssl/virtualmin/166195366750642/ssl.cert;
    	ssl_certificate_key /etc/ssl/virtualmin/166195366750642/ssl.key;
    	if ($scheme = http) {
    		rewrite ^/(?!.well-known)(.*) "https://XX-XX.net/$1" break;
    	}
    }
    
  • @phenomlab

    ok I have to talk to the staff members first before i do anything.

    So there is nothing to put in the vhost XX-XX.net ?, everything is in the nginx.conf and the config.json if I understand correctly

    In terms of disk space, memory, cpu what does it give ?

    After all of that, we need to restart nodebb I imagine ?

    These new ports (4567", “4568”, "4569) must also be open in the Hetzner interface?

    I wanted all the details if possible

  • @DownPW said in NODEBB: Nginx error performance & High CPU:

    everything is in the nginx.conf and the config.json if I understand correctly

    Yes, that’s correct

    @DownPW said in NODEBB: Nginx error performance & High CPU:

    In terms of disk space, memory, cpu what does it give ?

    There should be little change in terms of the usage. What clustering does is essentially provide multiple processes to carry out the same tasks, but obviously much faster than one process only ever could.

    @DownPW said in NODEBB: Nginx error performance & High CPU:

    After all of that, we need to restart nodebb I imagine ?

    Correct

    @DownPW said in NODEBB: Nginx error performance & High CPU:

    These new ports (4567", “4568”, "4569) must also be open in the Hetzner interface?

    Will not be necessary as they are not available publicly, but only to the reverse proxy on 127.0.0.1

  • OK.

    @phenomlab

    I resume in details.

    1- Stop nodebb
    2- Stop iframely
    3- Stop nginx
    4- Install redis server : sudo apt install redis-server
    5- Change nodebb Config.json file (can you verifiy this synthax please ? nodebb documentation tell “database”: 0 and not “database”: 5 - but maybe it’s just a name and i can use the same as mongo like “database”: nodebb , I moved the port directive) :

    {
        "url": "https://XX-XX.net",
        "secret": "XXXXXXXXXXXXXXXX",
        "database": "mongo",
       "port": [4567, 4568,4569],
        "mongo": {
            "host": "127.0.0.1",
            "port": "27017",
            "username": "XXXXXXXXXXX",
            "password": "XXXXXXXXXXX",
            "database": "nodebb",
            "uri": ""
        },
        "redis": {
            "host":"127.0.0.1",
            "port":"6379",
            "database": 5
        }
    }
    

    6- Change nginx.conf :

    # add the below block for nodeBB clustering
    upstream io_nodes {
        ip_hash;
        server 127.0.0.1:4567;
        server 127.0.0.1:4568;
        server 127.0.0.1:4569;
    }
    
    server {
    	server_name XX-XX.net www.XX-XX.net;
    	listen XX.XX.XX.X;
    	listen [XX:XX:XX:XX::];
    	root /home/XX-XX/nodebb;
    	index index.php index.htm index.html;
    	access_log /var/log/virtualmin/XX-XX.net_access_log;
    	error_log /var/log/virtualmin/XX-XX.net_error_log;
    
    # add the below block which will force all traffic into the cluster when referenced with @nodebb
        
    location @nodebb {
         proxy_pass http://io_nodes;
        }
    
            location / {
    				
    		limit_req zone=flood burst=100 nodelay; 
    		limit_conn ddos 10; 
    		proxy_read_timeout 180; 
    
                    proxy_set_header X-Real-IP $remote_addr;
                    proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
                    proxy_set_header Host $http_host;
                    proxy_set_header X-NginX-Proxy true;
                   # It's necessary to set @nodebb here so that the clustering works
                    proxy_pass @nodebb;
                    proxy_redirect off;
    
                    # Socket.IO Support
                    proxy_http_version 1.1;
                    proxy_set_header Upgrade $http_upgrade;
                    proxy_set_header Connection "upgrade";
            }
    
    	listen XX.XX.XX.XX:XXssl http2;
    	listen [XX:XX:XX:XX::]:443 ssl http2;
    	ssl_certificate /etc/ssl/virtualmin/166195366750642/ssl.cert;
    	ssl_certificate_key /etc/ssl/virtualmin/166195366750642/ssl.key;
    	if ($scheme = http) {
    		rewrite ^/(?!.well-known)(.*) "https://XX-XX.net/$1" break;
    	}
    }
    

    7- restart redis server systemctl restart redis-server.service
    8- Restart nginx
    9- Restart iframely
    10- Restart nodebb
    11- test configuration

  • @DownPW said in NODEBB: Nginx error performance & High CPU:

    5- Change nodebb Config.json file (can you verifiy this synthax please ? nodebb documentation tell “database”: 0 and not “database”: 5,

    All fine from my perspective - no real need to stop iFramely, but ok. The database number doesn’t matter as long as it’s not being used. You can use 0 if you wish - it’s in use on my side, hence 5.

  • @DownPW I’d add another set of steps here. When you move the sessions away from mongo to redis you are going to encounter issues logging in as the session tables will no longer match meaning none of your users will be able to login

    To resolve this issue

    Review https://sudonix.com/topic/249/invalid-csrf-on-dev-install and implement all steps - note that you will also need the below string when connecting to mongodb

    mongo -u admin -p <password> --authenticationDatabase=admin
    

    Obviously, substitute <password> with the actual value. So in summary:

    1. Open the mondogb console
    2. Select your database - in my case use sudonix;
    3. Issue this command db.objects.update({_key: "config"}, {$set: {cookieDomain: ""}});
    4. Press enter and then type quit() into the mongodb shell
    5. Restart NodeBB
    6. Clear cache on browser
    7. Try connection again
  • @phenomlab

    Hmm ok when perform these steps ?

  • @DownPW After you’ve setup the cluster and restarted NodeBB

  • @phenomlab said in NODEBB: Nginx error performance & High CPU:

    @crazycells said in NODEBB: Nginx error performance & High CPU:

    4567, 4568, and 4569… Is your NodeBB set up this way?

    It’s not (I set their server up). Sudonix is not configured this way either, but from memory, this also requires redis to handle the session data. I may configure this site to do exactly that.

    yes, you might be right about the necessity. We have redis installed.

  • @DownPW since you pointed it out, I just remembered. Since we know when this crowd will come and be online on our forum, that particular day, we switch off iframely and all preview plugins. That also helps to open the pages faster.

  • @phenomlab a general but related question. Since opening three ports help, is it possible to increase this number? For example, can we run 5 ports NodeBB at the same time to smooth the web page experience; or is “3” goldilocks number for maximum efficiency?

  • @crazycells It’s not necessarily the “Goldilocks” standard - it really depends on the system resources you have available. You could easily extend it as long as you allow for the additional port(s) in the nginx.conf file also.

    Personally, I don’t see the need for more than 3 though.

  • @phenomlab

    Ok redis is ok now. Thanks for your help 🙂

    I would like know to obtain the connecting clients Real IP on Nginx log.

    I read I have need ngx_http_realip_module for nginx but not active by default but I don’t know if virtualmin have this module enabled.


Did this solution help you?
Did you find the suggested solution useful? Why not buy me a coffee? It's a nice gesture, and a great way to show your appreciation 💗

  • SEO and Nodebb

    Performance
    2
    2 Votes
    2 Posts
    141 Views

    @Panda It’s the best it’s ever been to be honest. I’ve used a myriad of systems in the past - most notably, WordPress, and then Flarum (which for SEO, was absolutely dire - they never even had SEO out of the box, and relied on a third party extension to do it), and NodeBB easily fares the best - see below example

    https://www.google.com/search?q=site%3Asudonix.org&oq=site%3Asudonix.org&aqs=chrome..69i57j69i60j69i58j69i60l2.9039j0j3&sourceid=chrome&ie=UTF-8#ip=1

    However, this was not without significant effort on my part once I’d migrated from COM to ORG - see below posts

    https://community.nodebb.org/topic/17286/google-crawl-error-after-site-migration/17?_=1688461250365

    And also

    https://support.google.com/webmasters/thread/221027803?hl=en&msgid=221464164

    It was painful to say the least - as it turns out, there was an issue in NodeBB core that prevented spiders from getting to content, which as far as I understand, is now fixed. SEO in itself is a dark art - a black box that nobody really fully understands, and it’s essentially going to boil down to one thing - “content”.

    Google’s algorithm for indexing has also changed dramatically over the years. They only now crawl content that has value, so if it believes that your site has nothing to offer, it will simply skip it.

  • Composer options on nodebb

    Solved Configure
    8
    3 Votes
    8 Posts
    286 Views

    @Panda You should be able to expose the CSS for these using F12 to get into console

    3591518c-e3a3-4ada-a43c-6b32a5e0359c-image.png

    a2b8ed46-4157-4ff2-85f0-576543380107-image.png

    That should then expose the element once selected

    89d9c545-a47a-40d1-98f4-80cf3b958e8f-image.png

    Here’s the below CSS you need based on the screenshot provided.

    .composer .formatting-bar .formatting-group li[data-format="picture-o"], .composer .formatting-bar .formatting-group li[data-format="spoiler"] { display: none; }
  • 1 Votes
    1 Posts
    88 Views
    No one has replied
  • 3 Votes
    4 Posts
    603 Views

    @crazycells hi - no security reason, or anything specific in this case. However, the nginx.conf I posted was from my Dev environment which uses this port as a way of not interfering with production.

    And yes, I use clustering on this site with three instances.

  • 13 Votes
    22 Posts
    1k Views

    Been playing with the user profile page this afternoon. Thought I’d post a video as I’m really pleased with how this came out

  • 1 Votes
    8 Posts
    375 Views

    @JAC been working fine. No complaints from me

  • 3 Votes
    20 Posts
    976 Views

    @jac Exactly. Hard point to argue.

  • 0 Votes
    3 Posts
    680 Views

    @phenomlab many thanks Mark 😁.