Protection Peertube file2ban limit_req mes paramètres

Après avoir annulé les services Cloudflare
Afin de protéger complètement peertube, j’ai procédé comme suit.
File2ban installé, filtres ajoutés pour Nginx, créez filter.d dans le dossier

nano /etc/fail2ban/filter.d/nginx-noconnect.conf

Contenu

[Definition]

failregex = ^<HOST> -.*CONNECT

ignoreregex =

и

nano /etc/fail2ban/filter.d/nginx-noproxy.conf

и

[Definition]

failregex = ^<HOST> -.*GET http.*

ignoreregex =
nano /etc/fail2ban/filter.d/http1.conf
[Definition]
failregex = ^<HOST> -.*HTTP/1\.0.*$
            ^<HOST> -.*HTTP/1\.1.*$
ignoreregex =
nano /etc/fail2ban/filter.d/nginx-badbots.conf

[Definition]

badbotscustom = EmailCollector|WebEMailExtrac|TrackBack/1\.02|sogou music spider|(?:Mozilla/\d+\.\d+ )?Jorgee
badbots = Atomic_Email_Hunter/4\.0|atSpider/1\.0|autoemailspider|bwh3_user_agent|China Local Browse 2\.6|ContactBot/0\.2|ContentSmartz|DataCha0s/2\.0|DBrowse 1\.4b|DBrowse 1\.4d|Demo Bot DOT 16b|Demo Bot Z 16b|DSurf15a 01|DSurf15a 71|DSurf15a 81|DSurf15a VA|EBrowse 1\.4b|Educate Search VxB|EmailSiphon|EmailSpider|EmailWolf 1\.00|ESurf15a 15|ExtractorPro|Franklin Locator 1\.8|FSurf15a 01|Full Web Bot 0416B|Full Web Bot 0516B|Full Web Bot 2816B|Guestbook Auto Submitter|Industry Program 1\.0\.x|ISC Systems iRc Search 2\.1|IUPUI Research Bot v 1\.9a|LARBIN-EXPERIMENTAL \(efp@gmx\.net\)|LetsCrawl\.com/1\.0 \+http\://letscrawl\.com/|Lincoln State Web Browser|LMQueueBot/0\.2|LWP\:\:Simple/5\.803|Mac Finder 1\.0\.xx|MFC Foundation Class Library 4\.0|Microsoft URL Control - 6\.00\.8xxx|Missauga Locate 1\.0\.0|Missigua Locator 1\.9|Missouri College Browse|Mizzu Labs 2\.2|Mo College 1\.9|MVAClient|Mozilla/2\.0 \(compatible; NEWT ActiveX; Win32\)|Mozilla/3\.0 \(compatible; Indy Library\)|Mozilla/3\.0 \(compatible; scan4mail \(advanced version\) http\://www\.peterspages\.net/?scan4mail\)|Mozilla/4\.0 \(compatible; Advanced Email Extractor v2\.xx\)|Mozilla/4\.0 \(compatible; Iplexx Spider/1\.0 http\://www\.iplexx\.at\)|Mozilla/4\.0 \(compatible; MSIE 5\.0; Windows NT; DigExt; DTS Agent|Mozilla/4\.0 efp@gmx\.net|Mozilla/5\.0 \(Version\: xxxx Type\:xx\)|NameOfAgent \(CMS Spider\)|NASA Search 1\.0|Nsauditor/1\.x|PBrowse 1\.4b|PEval 1\.4b|Poirot|Port Huron Labs|Production Bot 0116B|Production Bot 2016B|Production Bot DOT 3016B|Program Shareware 1\.0\.2|PSurf15a 11|PSurf15a 51|PSurf15a VA|psycheclone|RSurf15a 41|RSurf15a 51|RSurf15a 81|searchbot admin@google\.com|ShablastBot 1\.0|snap\.com beta crawler v0|Snapbot/1\.0|Snapbot/1\.0 \(Snap Shots&#44; \+http\://www\.snap\.com\)|sogou develop spider|Sogou Orion spider/3\.0\(\+http\://www\.sogou\.com/docs/help/webmasters\.htm#07\)|sogou spider|Sogou web spider/3\.0\(\+http\://www\.sogou\.com/docs/help/webmasters\.htm#07\)|sohu agent|SSurf15a 11 |TSurf15a 11|Under the Rainbow 2\.2|User-Agent\: Mozilla/4\.0 \(compatible; MSIE 6\.0; Windows NT 5\.1\)|VadixBot|WebVulnCrawl\.unknown/1\.0 libwww-perl/5\.803|Wells Search II|WEP Search 00

failregex = ^<HOST> -.*"(GET|POST|HEAD).*HTTP.*"(?:%(badbots)s|%(badbotscustom)s)"$

ignoreregex =

datepattern = ^[^\[]*\[({DATE})
              {^LN-BEG}

Je chercherai des moyens de compléter cette liste avec les robots actuels, mais je n’en ai pas encore trouvé. Il existe un produit prêt à l’emploi pour cela mais je ne l’ai pas encore utilisé.
Ensuite, nous créons jail.local

nano/etc/fail2ban/jail.local

Remplir

[http1]
enabled = true
port = http,https
filter = http1
logpath = /var/log/nginx/access.log
maxretry = 5
bantime = 10d

[ssh-iptables]

enabled  = true
filter   = sshd
logpath  = /var/log/nginx/access.log
maxretry = 2
bantime  = 100d

[recidive]

enabled  = true
filter   = recidive
logpath  = /var/log/fail2ban.log
maxretry = 5
findtime = 1h
bantime  = 20d

[nginx-http-auth]

enabled = true
port = http,https
logpath = /var/log/nginx/*error.log
findtime = 10
bantime = 100d
maxretry = 2

[nginx-noconnect]

enabled  = true
port     = http,https
filter   = nginx-noconnect
logpath  = /var/log/nginx/access.log
maxretry = 2
bantime = 100d

[nginx-noscript]

enabled  = true
port     = http,https
filter   = nginx-noscript
logpath  = /var/log/nginx/access.log
maxretry = 2
bantime = 100d

[nginx-badbots]

enabled  = true
port     = http,https
filter   = nginx-badbots
logpath  = /var/log/nginx/access.log
maxretry = 2
bantime = 100d

[nginx-noproxy]

enabled  = true
port     = http,https
filter   = nginx-noproxy
logpath  = /var/log/nginx/access.log
maxretry = 2
bantime = 100d

[nginx-botsearch]

enabled  = true
port     = http,https
filter   = nginx-botsearch
logpath  = /var/log/nginx/access.log
maxretry = 2
bantime = 100d

[nginx-bad-request]

enabled  = true
port     = http,https
filter   = nginx-bad-request
logpath  = /var/log/nginx/access.log
maxretry = 2
bantime = 100d

Redémarrez et regardez l’état

sudo systemctl restart fail2ban && sleep 2 && sudo systemctl status fail2ban

J’ai également configuré limit_req dans le fichier /etc/nginx/sites-available/peertube
Mais c’est toujours un sujet controversé, car les valeurs que j’ai définies bloquent le chargement, par exemple, lors de l’affichage de 100 pages de vidéo dans le panneau d’administration, même si vous ne le faites pas tous les jours, je vais réfléchir à quoi et comment puis-je faire autrement

limit_req_zone $binary_remote_addr zone=peertube:10m rate=30r/s;

server {
  listen 80;
  listen [::]:80;
  server_name angeltales.angellive.ru;

  location /.well-known/acme-challenge/ {
    default_type "text/plain";
    root /var/www/certbot;
  }
  location / { return 301 https://$host$request_uri; }
}

upstream backend {
  server 127.0.0.1:9000;
}

server {
  listen 443 ssl http2;
  listen [::]:443 ssl http2;
  server_name angeltales.angellive.ru;

  access_log /var/log/nginx/peertube.access.log; # reduce I/0 with buffer=10m flush=5m
  error_log  /var/log/nginx/peertube.error.log;

  ##
  # Certificates
  # you need a certificate to run in production. see https://letsencrypt.org/
  ##
  ssl_certificate     /etc/letsencrypt/live/angeltales.angellive.ru/fullchain.pem;
  ssl_certificate_key /etc/letsencrypt/live/angeltales.angellive.ru/privkey.pem;

  location ^~ '/.well-known/acme-challenge' {
    default_type "text/plain";
    root /var/www/certbot;
  }

  ##
  # Security hardening (as of Nov 15, 2020)
  # based on Mozilla Guideline v5.6
  ##

  ssl_protocols             TLSv1.2 TLSv1.3;
  ssl_prefer_server_ciphers on;
  ssl_ciphers               ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256; # add ECDHE-RSA-AES256-SHA if you want compatibility with Android 4
  ssl_session_timeout       1d; # defaults to 5m
  ssl_session_cache         shared:SSL:10m; # estimated to 40k sessions
  ssl_session_tickets       off;
  ssl_stapling              on;
  ssl_stapling_verify       on;
  # HSTS (https://hstspreload.org), requires to be copied in 'location' sections that have add_header directives
  #add_header Strict-Transport-Security "max-age=63072000; includeSubDomains";

  ##
  # Application
  ##

  location @api {
    proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
    proxy_set_header Host            $host;
    proxy_set_header X-Real-IP       $remote_addr;

    client_max_body_size  100k; # default is 1M

    proxy_connect_timeout 10m;
    proxy_send_timeout    10m;
    proxy_read_timeout    10m;
    send_timeout          10m;

    proxy_pass http://backend;
  }

  location / {
    limit_req zone=peertube burst=10 nodelay; # Apply rate limiting to the entire server
    try_files /dev/null @api;
  }

  location ~ ^/api/v1/videos/(upload-resumable|([^/]+/source/replace-resumable))$ {
    client_max_body_size    0;
    proxy_request_buffering off;

    try_files /dev/null @api;
  }

  location ~ ^/api/v1/users/[^/]+/imports/import-resumable$ {
    client_max_body_size    0;
    proxy_request_buffering off;

    try_files /dev/null @api;
  }

  location ~ ^/api/v1/videos/(upload|([^/]+/studio/edit))$ {
    limit_except POST HEAD { deny all; }

    # This is the maximum upload size, which roughly matches the maximum size of a video file.
    # Note that temporary space is needed equal to the total size of all concurrent uploads.
    # This data gets stored in /var/lib/nginx by default, so you may want to put this directory
    # on a dedicated filesystem.
    client_max_body_size                      12G; # default is 1M
    add_header            X-File-Maximum-Size 8G always; # inform backend of the set value in bytes before mime-encoding (x * 1.4 >= client_max_body_size)

    try_files /dev/null @api;
  }

  location ~ ^/api/v1/runners/jobs/[^/]+/(update|success)$ {
    client_max_body_size                      12G; # default is 1M
    add_header            X-File-Maximum-Size 8G always; # inform backend of the set value in bytes before mime-encoding (x * 1.4 >= client_max_body_size)

    try_files /dev/null @api;
  }

  location ~ ^/api/v1/(videos|video-playlists|video-channels|users/me) {
    client_max_body_size                      6M; # default is 1M
    add_header            X-File-Maximum-Size 4M always; # inform backend of the set value in bytes before mime-encoding (x * 1.4 >= client_max_body_size)

    try_files /dev/null @api;
  }

  ##
  # Websocket
  ##

  location @api_websocket {
    proxy_http_version 1.1;
    proxy_set_header   X-Forwarded-For $proxy_add_x_forwarded_for;
    proxy_set_header   Host            $host;
    proxy_set_header   X-Real-IP       $remote_addr;
    proxy_set_header   Upgrade         $http_upgrade;
    proxy_set_header   Connection      "upgrade";

    proxy_pass http://backend;
  }

  location /socket.io {
    try_files /dev/null @api_websocket;
  }

  location /tracker/socket {
    # Peers send a message to the tracker every 15 minutes
    # Don't close the websocket before then
    proxy_read_timeout 15m; # default is 60s

    try_files /dev/null @api_websocket;
  }

  # Plugin websocket routes
  location ~ ^/plugins/[^/]+(/[^/]+)?/ws/ {
    try_files /dev/null @api_websocket;
  }

  ##
  # Performance optimizations
  # For extra performance please refer to https://github.com/denji/nginx-tuning
  ##

  root /var/www/peertube/storage;

  # Enable compression for JS/CSS/HTML, for improved client load times.
  # It might be nice to compress JSON/XML as returned by the API, but
  # leaving that out to protect against potential BREACH attack.
  gzip              on;
  gzip_vary         on;
  gzip_types        # text/html is always compressed by HttpGzipModule
                    text/css
                    application/javascript
                    font/truetype
                    font/opentype
                    application/vnd.ms-fontobject
                    image/svg+xml;
  gzip_min_length   1000; # default is 20 bytes
  gzip_buffers      16 8k;
  gzip_comp_level   2; # default is 1

  client_body_timeout       30s; # default is 60
  client_header_timeout     10s; # default is 60
  send_timeout              10s; # default is 60
  keepalive_timeout         10s; # default is 75
  resolver_timeout          10s; # default is 30
  reset_timedout_connection on;
  proxy_ignore_client_abort on;

  tcp_nopush                on; # send headers in one piece
  tcp_nodelay               on; # don't buffer data sent, good for small data bursts in real time

  # If you have a small /var/lib partition, it could be interesting to store temp nginx uploads in a different place
  # See https://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_temp_path
  #client_body_temp_path /var/www/peertube/storage/nginx/;

  # Bypass PeerTube for performance reasons. Optional.
  # Should be consistent with client-overrides assets list in client.ts server controller
  location ~ ^/client/(assets/images/(icons/icon-36x36\.png|icons/icon-48x48\.png|icons/icon-72x72\.png|icons/icon-96x96\.png|icons/icon-144x144\.png|icons/icon-192x192\.png|icons/icon-512x512\.png|logo\.svg|favicon\.png|default-playlist\.jpg|default-avatar-account\.png|default-avatar-account-48x48\.png|default-avatar-video-channel\.png|default-avatar-video-channel-48x48\.png))$ {
    add_header Cache-Control "public, max-age=31536000, immutable"; # Cache 1 year

    root /var/www/peertube;

    try_files /storage/client-overrides/$1 /peertube-latest/client/dist/$1 @api;
  }

  # Bypass PeerTube for performance reasons. Optional.
  location ~ ^/client/(.*\.(js|css|png|svg|woff2|otf|ttf|woff|eot))$ {
    add_header Cache-Control "public, max-age=31536000, immutable"; # Cache 1 year

    alias /var/www/peertube/peertube-latest/client/dist/$1;
  }

  location ~ ^(/static/(webseed|web-videos|streaming-playlists/hls)/private/)|^/download {
    # We can't rate limit a try_files directive, so we need to duplicate @api

    proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
    proxy_set_header Host            $host;
    proxy_set_header X-Real-IP       $remote_addr;

    proxy_limit_rate 5M;

    proxy_pass http://backend;
  }

  # Bypass PeerTube for performance reasons. Optional.
  location ~ ^/static/(webseed|web-videos|redundancy|streaming-playlists)/ {
    limit_rate_after            5M;

    set $peertube_limit_rate  5M;

    # Use this line with nginx >= 1.17.0
    limit_rate $peertube_limit_rate;
    # Or this line with nginx < 1.17.0
    # set $limit_rate $peertube_limit_rate;

    if ($request_method = 'OPTIONS') {
      add_header Access-Control-Allow-Origin  '*';
      add_header Access-Control-Allow-Methods 'GET, OPTIONS';
      add_header Access-Control-Allow-Headers 'Range,DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type';
      add_header Access-Control-Max-Age       1728000; # Preflight request can be cached 20 days
      add_header Content-Type                 'text/plain charset=UTF-8';
      add_header Content-Length               0;
      return 204;
    }

    if ($request_method = 'GET') {
      add_header Access-Control-Allow-Origin  '*';
      add_header Access-Control-Allow-Methods 'GET, OPTIONS';
      add_header Access-Control-Allow-Headers 'Range,DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type';

      # Don't spam access log file with byte range requests
      access_log off;
    }

    # Enabling the sendfile directive eliminates the step of copying the data into the buffer
    # and enables direct copying data from one file descriptor to another.
    sendfile on;
    sendfile_max_chunk 1M; # prevent one fast connection from entirely occupying the worker process. should be > 800k.
    aio threads;

    # web-videos is the name of the directory mapped to the `storage.web_videos` key in your PeerTube configuration
    rewrite ^/static/webseed/(.*)$ /web-videos/$1 break;
    rewrite ^/static/(.*)$         /$1        break;

    try_files $uri @api;
  }
}

Si vous avez quelque chose à ajouter ou à modifier, je serai heureux de vous écrire. Merci