Skip to content

Commit

Permalink
version 1.0-0
Browse files Browse the repository at this point in the history
  • Loading branch information
Anatol Sargin authored and gaborcsardi committed Feb 26, 2009
0 parents commit e6415db
Show file tree
Hide file tree
Showing 33 changed files with 2,757 additions and 0 deletions.
22 changes: 22 additions & 0 deletions DESCRIPTION
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
Package: DAKS
Type: Package
Title: Data Analysis and Knowledge Spaces
Version: 1.0-0
Date: 2009-02-26
Author: Anatol Sargin <anatol.sargin@math.uni-augsburg.de>,
Ali Uenlue <ali.uenlue@math.uni-augsburg.de>
Maintainer: Anatol Sargin <anatol.sargin@math.uni-augsburg.de>,
Ali Uenlue <ali.uenlue@math.uni-augsburg.de>
Description: Functions and example datasets for the psychometric
theory of knowledge spaces. This package implements data
analysis methods and procedures for simulating data and
transforming different formulations in knowledge space
theory.
Depends: R (>= 2.8.1), relations, sets
Suggests: Rgraphviz
LazyLoad: yes
LazyData: yes
License: GPL (>= 2)
URL: http://stats.math.uni-augsburg.de/mitarbeiter/sargin/,
http://www.math.uni-augsburg.de/~uenlueal/
Packaged: Thu Feb 26 14:42:35 2009; uenlueal
8 changes: 8 additions & 0 deletions NAMESPACE
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
export(corr_iita, hasse, iita, imp2state, ind_gen, mini_iita,
ob_counter, orig_iita, pattern, pop_iita, pop_variance, simu,
state2imp, variance)

# internal functions; not intended to be visible to users
# export()

import(graphics, relations, sets)
48 changes: 48 additions & 0 deletions R/corr_iita.r
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
##################
# corrected IITA #
##################

##############################################
# #
# This function performs the corrected #
# inductive item tree analysis procedure #
# and returns the corresponding diff values. #
# #
##############################################

corr_iita<-function(dataset, A){
b<-ob_counter(dataset)
m<-ncol(dataset)
n<-nrow(dataset)

bs_neu<-list()
for(i in 1:length(A)){
bs_neu[[i]]<-matrix(0,ncol = m, nrow = m)
}
diff_value_neu<-rep(0,length(A))

error_neu<-rep(0,length(A))

#computation of error rate
for(k in 1:length(A)){
for(i in A[[k]]){
error_neu[k]<-error_neu[k] + ((b[as.integer(i[1]), as.integer(i[2])]) / sum(dataset[,as.integer(i[2])]))
}
if(set_is_empty(A[[k]])){error_neu[k]<-NA}
if(set_is_empty(A[[k]]) == FALSE) {error_neu[k]<-error_neu[k] / length(A[[k]])}
}

#computation of diff values
for(k in 1:length(A)){
if(set_is_empty(A[[k]])){diff_value_neu[k]<-NA}
else{
for(i in A[[length(A)]]){
if(is.element(set(i), A[[k]])) {bs_neu[[k]][as.integer(i[1]),as.integer(i[2])]<-error_neu[k] * sum(dataset[,as.integer(i[2])])}
if(is.element(set(i), A[[k]]) == FALSE && is.element(set(tuple(as.integer(i[2]),as.integer(i[1]))), A[[k]]) == FALSE){bs_neu[[k]][as.integer(i[1]),as.integer(i[2])]<-(1- sum(dataset[,as.integer(i[1])]) / n) * sum(dataset[,as.integer(i[2])])}
if(is.element(set(i), A[[k]]) == FALSE && is.element(set(tuple(as.integer(i[2]),as.integer(i[1]))), A[[k]]) == TRUE){bs_neu[[k]][as.integer(i[1]),as.integer(i[2])]<-sum(dataset[,as.integer(i[2])]) - sum(dataset[,as.integer(i[1])]) + sum(dataset[,as.integer(i[1])]) * error_neu[k]}
}
diff_value_neu[k]<-sum((b - bs_neu[[k]])^2) / (m^2 - m)
}
}
return(diff_value_neu)
}
44 changes: 44 additions & 0 deletions R/hasse.r
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
############
# Plotting #
############

################################################################
# #
# This function plots the Hasse diagram of a surmise relation. #
# #
################################################################

hasse<-function(imp, items){
struct<-relation(domain = list(1:items,1:items),graph = imp)

#computation of parallel items
parallel<-list()
k<-1
for(i in 1:items){
for(j in i:items){
if(relation_incidence(struct)[i,j] ==1 && relation_incidence(struct)[j,i] ==1){
parallel[[k]]<-tuple(i,j)
k<-k+1
}
}
}

#collapsing of parallel items
if(length(parallel) > 0){
pardrop<-vector(length = length(parallel))
for(i in 1:length(parallel)){
pardrop[i]<-as.integer(parallel[[i]][2])
}
pardrop<-pardrop[!duplicated(pardrop)]

nparitems<-1:items
nparitems<-nparitems[-pardrop]
struct<-relation(domain = list(nparitems, nparitems), incidence = relation_incidence(struct)[-pardrop, -pardrop])
}

#plotting
plot(struct)

#returning a list of parallel items
return(parallel)
}
44 changes: 44 additions & 0 deletions R/iita.r
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
###########################################
# Inductive item tree analysis algorithms #
###########################################

#############################################
# #
# This function can be used to perform one #
# of the three inductive item tree analysis #
# algorithms (original, corrected, and #
# minimized corrected) selectively. #
# #
#############################################

iita<-function(dataset, v){
if ((!is.data.frame(dataset) & !is.matrix(dataset)) || ncol(dataset) == 1){
stop("data must be either a numeric matrix or a data.frame, with at least two columns.\n")
}

if(sum(!(dataset == 0 | dataset == 1) | is.na(dataset)) != 0){
stop("data must contain only 0 and 1")
}

if(v != 1 && v != 2 && v !=3){
stop("IITA version must be specified")
}

# call to the chosen algorithm
if(v == 3){
i<-ind_gen(ob_counter(dataset))
ii<-orig_iita(dataset, ind_gen(ob_counter(dataset)))
}

if(v == 2){
i<-ind_gen(ob_counter(dataset))
ii<-corr_iita(dataset, ind_gen(ob_counter(dataset)))
}

if(v == 1){
i<-ind_gen(ob_counter(dataset))
ii<-mini_iita(dataset, ind_gen(ob_counter(dataset)))
}

return(list(diff = ii, implications = i[which(ii == min(ii))][[1]]))
}
78 changes: 78 additions & 0 deletions R/imp2state.r
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
##################
# tranformations #
##################

##################################################
# #
# This function transforms a set of implications #
# to the corresponding set of knowledge states. #
# #
##################################################

imp2state<-function(imp, items){
R_2<-matrix(1, ncol = items, nrow = items)
for(i in 1:items){
for(j in 1:items){
if(!is.element(set(tuple(i,j)), imp) && i != j){R_2[j,i]<-0}
}
}

#Base
base<-list()

for(i in 1:items){
tmp<-vector()
for(j in 1:items){
if(R_2[i,j] == 1){tmp<-c(tmp, j)}
}
base[[i]]<-sort(tmp)
}

base_list<-list()
for(i in 1:items){
base_list[[i]]<-set()
for(j in 1:length(base[[i]]))
base_list[[i]]<-set_union(base_list[[i]], set(base[[i]][j]))
}

#span of base
G<-list()
G[[1]]<-set(set())
G[[2]]<-set()
for(i in 1:length(base[[1]])){G[[2]]<-set_union(G[[2]], base[[1]][i])}
G[[2]]<-set(set(), G[[2]])

for(i in 2:items){
H<-set(set())
for(j in G[[i]]){
check<-0
if(set_is_subset(base_list[[i]], j) == FALSE){
for(d in 1:i){
if(set_is_subset(base_list[[d]], set(j, base_list[[i]])) == TRUE){
if(set_is_subset(base_list[[d]], j)){
H<-set_union(H,set(set_union(j,base_list[[i]])))
}
}
if(set_is_subset(base_list[[d]], set(j, base_list[[i]])) == FALSE){
H<-set_union(H,set(set_union(j,base_list[[i]])))
}
}
}
}
G[[i+1]]<-set_union(G[[i]], H)
}

#Patterns

P<-matrix(0, ncol = items, nrow = length(G[[items+1]]))
i<-1

for(k in (G[[items+1]])){
for(j in 1:items){
if(is.element(j, k)){P[i,j]<-1}
}
i<-i+1
}

return(P)
}
71 changes: 71 additions & 0 deletions R/ind_gen.r
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
########################
# inductive generation #
########################

#######################################
# #
# This function generates inductively #
# a set of competing quasi orders. #
# #
#######################################

ind_gen<-function(b){
m<-nrow(b)
#set of all pairs with a maximum of k-1 counterexamples
S<-list()

#constructed relation for a maximum of k-1 counterexamples
A<-list()

#set of non-transitive triples
M<-list()
M[[1]]<-set()
S[[1]]<-set()
for(i in 1:m){
for(j in 1:m){
if(b[i,j] == min(b) & i != j) {S[[1]]<-set_union(S[[1]],set(tuple(i,j)))}
}
}

A[[1]]<-S[[1]]

#inductive gneration process
elements<-sort(b)[!duplicated(sort(b))]
if(is.element(0,elements)){elements<-elements[2:(length(elements))]}

k<-2

for(elem in elements){
S[[k]]<-set()
A[[k]]<-set()
M[[k]]<-set()
#building of S
for(i in 1:m){
for(j in 1:m){
if(b[i,j] <= elem && i !=j && is.element(set(tuple(i,j)), A[[k-1]]) == FALSE){S[[k]]<-set_union(S[[k]], set(tuple(i,j)))}
}
}
#transitivity test
if(set_is_empty(S[[k]]) == FALSE){
M[[k]]<-S[[k]]
brake_test<-1
while(brake_test != 0){
brake<-M[[k]]
for(i in M[[k]]){
for(h in 1:m){
if(h != as.integer(i)[1] && h != as.integer(i)[2] && is.element(set(tuple(as.integer(i)[2],h)), set_union(A[[k-1]], M[[k]])) == TRUE && is.element(set(tuple(as.integer(i)[1],h)), set_union(A[[k-1]], M[[k]])) == FALSE){M[[k]]<-M[[k]] - set(i)}
if(h != as.integer(i)[1] && h != as.integer(i)[2] && is.element(set(tuple(h,as.integer(i)[1])), set_union(A[[k-1]], M[[k]])) == TRUE && is.element(set(tuple(h,as.integer(i)[2])), set_union(A[[k-1]], M[[k]])) == FALSE){M[[k]]<-M[[k]] - set(i)}
}
}
if(brake == M[[k]]){brake_test<-0}
}
A[[k]]<-set_union(A[[k-1]], (M[[k]]))
}
k<-k+1
}

#deletion of empty and duplicated quasi orders
A<-A[(!duplicated(A))]
A<-A[!set_is_empty(A)]
return(A)
}
61 changes: 61 additions & 0 deletions R/mini_iita.r
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
############################
# minimized corrected IITA #
############################

##################################################
# #
# This function performs the minimized corrected #
# inductive item tree analysis procedure and #
# returns the corresponding diff values. #
# #
##################################################

mini_iita<-function(dataset, A){
b<-ob_counter(dataset)
m<-ncol(dataset)
n<-nrow(dataset)

bs_num<-list()
for(i in 1:length(A)){
bs_num[[i]]<-matrix(0,ncol = m, nrow = m)
}

p<-rep(0,m)
for(i in 1:m){p[i]<-sum(dataset[,i])}

error_num<-rep(0,length(A))
diff_value_num<-rep(0,length(A))

#computation of error rate
for(k in 1:length(A)){
x<-rep(0,4)
for(i in 1:m){
for(j in 1:m){
if(is.element(set(tuple(i,j)), A[[k]]) == TRUE && i != j){
x[2]<-x[2]-2*b[i,j] * p[j]
x[4]<-x[4]+2 * p[j]^2
}
if(is.element(set(tuple(i,j)), A[[k]]) == FALSE && is.element(set(tuple(j,i)), A[[k]]) == TRUE && i != j){
x[1]<-x[1]-2*b[i,j]*p[i] + 2 * p[i] * p[j] - 2 * p[i]^2
x[3]<-x[3]+2*p[i]^2
}
}
}
error_num[k]<- -(x[1] + x[2]) / (x[3] + x[4])
}

#computation of diff values
for(k in 1:length(A)){
if(set_is_empty(A[[k]])){diff_value_num[k]<-NA}
else{
for(i in A[[length(A)]]){
if(is.element(set(i), A[[k]])) {bs_num[[k]][as.integer(i[1]),as.integer(i[2])]<-error_num[k] * sum(dataset[,as.integer(i[2])])}
if(is.element(set(i), A[[k]]) == FALSE && is.element(set(tuple(as.integer(i[2]),as.integer(i[1]))), A[[k]]) == FALSE){bs_num[[k]][as.integer(i[1]),as.integer(i[2])]<-(1- sum(dataset[,as.integer(i[1])]) / n) * sum(dataset[,as.integer(i[2])])}
if(is.element(set(i), A[[k]]) == FALSE && is.element(set(tuple(as.integer(i[2]),as.integer(i[1]))), A[[k]]) == TRUE){bs_num[[k]][as.integer(i[1]),as.integer(i[2])]<-sum(dataset[,as.integer(i[2])]) - sum(dataset[,as.integer(i[1])]) + sum(dataset[,as.integer(i[1])]) * error_num[k]}
}
diff_value_num[k]<-sum((b - bs_num[[k]])^2) / (m^2 - m)
}
}

return(diff_value_num)
}

0 comments on commit e6415db

Please sign in to comment.