Increase or control the limit of FBOs per process

It looks like there is a hard limit of the number of framebuffers that can be created per a process. And this limit is not bound by VRAM.

Here is the example app, that takes the number of FBOs to create:

#include <EGL/egl.h>
#include <GLES2/gl2.h>
#include <cstdio>
#include <thread>
#include <chrono>
#include <vector>
#include <atomic>

using std::printf;
std::atomic_int nComplete;

void run_test(int i, EGLDisplay d, EGLConfig cfg, EGLContext share_ctx) {
  const EGLint pbuf_attrs[] = {
      EGL_WIDTH, 1, EGL_HEIGHT, 1,
      EGL_NONE
  };
  EGLSurface sfc = eglCreatePbufferSurface(d, cfg, pbuf_attrs);
  const EGLint ctx_attrs[] = {
      EGL_CONTEXT_CLIENT_VERSION, 2,
      EGL_NONE
  };
  if(!sfc) {
    printf("eglCreatePbufferSurface failed: 0x%X\n", eglGetError());
    return;
  }
  EGLContext ctx = eglCreateContext(d, cfg, share_ctx, ctx_attrs);
  if(!ctx) {
    printf("eglCreateContext failed!\n");
    return;
  }
  EGLBoolean b = eglMakeCurrent(d, sfc, sfc, ctx);
  if(!b) {
    printf("eglMakeCurrent failed!\n");
    return;
  }

  GLuint fb1, fb2, tex;
  GLint fmt=GL_RGBA, mWidth=1, mHeight=1, type=GL_UNSIGNED_BYTE;
  // set up texture
  glGenTextures(1, &tex);
  glBindTexture(GL_TEXTURE_2D, tex);
  glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
  glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
  glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
  glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
  glTexImage2D(GL_TEXTURE_2D, 0, fmt, mWidth, mHeight, 0, fmt, type, NULL);
  glBindTexture(GL_TEXTURE_2D, 0);

  glGenFramebuffers(1, &fb1);
  printf("ctx[%d] fb: %d\n", i, fb1);
  glBindFramebuffer(GL_FRAMEBUFFER, fb1);
  glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, tex, 0);
  GLenum status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
  if (status != GL_FRAMEBUFFER_COMPLETE) {
    printf("fb[%d] incomplete: 0x%X\n",i , status);
    return;
  } else {
    nComplete++;
  }
//  printf("ctx[%d] is fb: %d\n", i, glIsFramebuffer(fb1));
  std::this_thread::sleep_for(std::chrono::seconds(10));
}

int main(int argc, char** argv) {
  int N=atoi(argv[1]);

  EGLDisplay d = eglGetDisplay(EGL_DEFAULT_DISPLAY);
  if(!d) {
    printf("eglGetDisplay failed!\n");
    return 1;
  }
  EGLBoolean b = eglInitialize(d, NULL, NULL);
  if(!b) {
    printf("eglInitialize failed!\n");
    return 1;
  }
  const EGLint attrs[] = {
      EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT,
      EGL_SURFACE_TYPE, EGL_PBUFFER_BIT,
      EGL_NONE
  };
  EGLint num_cfg;
  EGLConfig cfg;
  if(!eglChooseConfig(d, attrs, &cfg, 1, &num_cfg) || !num_cfg) {
    printf("eglChooseConfig failed\n!");
    return 1;
  }
  const EGLint ctx_attrs[] = {
      EGL_CONTEXT_CLIENT_VERSION, 2,
      EGL_NONE
  };
  EGLContext share_ctx = eglCreateContext(d, cfg, NULL, ctx_attrs);
  EGLContext ctx = eglCreateContext(d, cfg, share_ctx, ctx_attrs);
  if(!ctx) {
    printf("eglCreateContext failed: 0x%X\n", eglGetError());
    return 1;
  }
  b = eglMakeCurrent(d, EGL_NO_SURFACE, EGL_NO_SURFACE, ctx);
  if(!b) {
    printf("eglMakeCurrent failed!\n");
    return 1;
  }
  
  printf("VENDOR: %s\nVERSION: %s\nEXTENSIONS: %s\n", glGetString(GL_VENDOR), glGetString(GL_VERSION), glGetString(GL_EXTENSIONS));

  std::vector< std::thread* > threads;
  for(int i=0; i<N; i++) {
    threads.push_back(new std::thread([i, d, cfg, share_ctx]() {
      run_test(i, d, cfg, share_ctx);
    }));
  }
  for(auto&& t: threads) {
    t->join();
  }
  printf("complete fbs: %d/%d\n", nComplete.load(), N);

  return 0;
}

Linked against /usr/lib/nvidia-381/libEGL.so, /usr/lib/nvidia-381/libGLESv2.so and lpthread. Requires C++11 to be enabled.

On Ubuntu 16.04 + nvidia-381 on AWS G3 instance (Tesla M60) the max number per process was 255. VRAM usage did not exceed 15%.
Can this be somehow controlled / increased?

have this problem been solved??? I have the same problem. is it the bug of graphic driver?