Skip to content
This repository has been archived by the owner on Jan 26, 2022. It is now read-only.

Commit

Permalink
add file descriptors and processes limits
Browse files Browse the repository at this point in the history
Change-Id: I9fe74de30ea1fc9a6de896c87e316239bcab6fb7
Signed-off-by: Pin Xie <pxie@vmware.com>
  • Loading branch information
pxie committed Aug 9, 2012
1 parent 1440be2 commit f972c84
Show file tree
Hide file tree
Showing 3 changed files with 97 additions and 1 deletion.
1 change: 1 addition & 0 deletions sinatra/app_quota_app/Gemfile
@@ -1,3 +1,4 @@
source "http://rubygems.org"

gem 'sinatra'
gem 'json'
2 changes: 2 additions & 0 deletions sinatra/app_quota_app/Gemfile.lock
@@ -1,6 +1,7 @@
GEM
remote: http://rubygems.org/
specs:
json (1.7.3)
rack (1.4.1)
rack-protection (1.2.0)
rack
Expand All @@ -14,4 +15,5 @@ PLATFORMS
ruby

DEPENDENCIES
json
sinatra
95 changes: 94 additions & 1 deletion sinatra/app_quota_app/app.rb
@@ -1,4 +1,5 @@
require 'sinatra'
require 'json'

MILLION = 1 * 1024 * 1024
LARGE_INTEGER = 2 ** 64
Expand All @@ -7,10 +8,27 @@
MAX_WORKERS = 10
MAX_DIRECTORIES = 10

SIXTY_SECONDES = 60

$data = {}

get '/data' do
$data.keys.to_s
$data.to_json
end

delete '/data' do
n = params['n'].to_i
n = 1 if n < 1
n.times do
key = $data.keys.first
case $data[key]
when File
$data[key].close
else
# do nothing
end
$data.delete(key)
end
end

get '/eat/ram' do
Expand Down Expand Up @@ -50,6 +68,81 @@
puts "generate #{n}MB files"
end

get '/fds' do
data = {}
results = `lsof -p #{Process.pid}`.split("\n")
data[:count] = results.length
data[:details] = results
data.each do |k, v|
puts k
if v.class == Array
v.each { |item| puts item}
else
puts v
end
end
data.to_json
end

get '/allocate/fds' do
n = params['n'].to_i
n = 1 if n < 1
src_file = File.join(Dir.pwd, "tempfile")
puts `dd if=/dev/urandom of=#{src_file} bs=1b count=1`

queue = Queue.new
n.times do |job|
queue << job
end

MAX_DIRECTORIES.times do |index|
Dir.mkdir(index.to_s) unless Dir.exist?(index.to_s)
end

# 10 concurrent workers to allocate file descriptor
threads = []
lock = Mutex.new
MAX_WORKERS.times do
threads << Thread.new do
until queue.empty?
job = queue.pop
dirname = (job % MAX_DIRECTORIES).to_s
filename = rand_str
filepath = File.join(Dir.pwd, dirname, rand_str)
begin
FileUtils.cp(src_file, filepath)
lock.synchronize do
$data[filename] = open(filepath)
end
rescue Exception => e
puts e.to_s
break
end
end
end
end
threads.each { |t| t.join }
FileUtils.rm(src_file)
puts "allocate #{n} file descriptors"
end

get '/allocate/process' do
n = params['n'].to_i
n = 1 if n < 1
$data[:theads] = Queue.new
n.times do
begin
$data[:theads] << Thread.new do
sleep(60)
end
rescue Exception => e
puts e.to_s
break
end
end
"allocate #{n} threads"
end

def generate_data(size)
size = 1 if size < 1
(1..size).to_a.each do |index|
Expand Down

0 comments on commit f972c84

Please sign in to comment.