Skip to content
Browse files

added models overview doc

  • Loading branch information...
1 parent 753fba4 commit 8be5776cfaa279342ae49250667c1d7ee8e9299e @pierrejdlf pierrejdlf committed Feb 24, 2012
View
8 media/css/reanalyse.css
@@ -5,7 +5,7 @@ body {
font-size: 11px;
color: #606060;
}
-h1, h2, h3 {
+h1, h2, h3, h4 {
color: #585858;
}
@@ -287,6 +287,12 @@ hr {
/* =================================================================================== HOME / INTRO / (project/methodology/access/....) HTML CONTENT TEXTS */
+/* admin / upload page */
+.logconsole {
+ font-size: 0.8em;
+ color:gray;
+}
+
.introDiv {
margin-right:10%;
}
View
9 media/d3vizus/d3_TexteStreamTimeline.js
@@ -116,11 +116,11 @@ function buildD3_TexteStreamTimeline(thedata,theId) {
/////////////// DEFAULT ECHANTILLONNAGE on load
// CASE A : default = best resolution !
- //var periodWantedInt = 1; // we take one i over "periodWantedInt"
- //var periodWantedScaled = 1;
+ var periodWantedInt = 1; // we take one i over "periodWantedInt"
+ var periodWantedScaled = 1;
// CASE B : default = ~ 50 steps for the whole window
- var periodWantedInt = 1 + parseInt(nEchantill/30.0);
- var periodWantedScaled = periodWantedInt;
+ //var periodWantedInt = 1 + parseInt(nEchantill/30.0);
+ //var periodWantedScaled = periodWantedInt;
///////////////////////// PARAVERBAL SCALES
// deprecated (same max for every prvbal)
@@ -158,6 +158,7 @@ function buildD3_TexteStreamTimeline(thedata,theId) {
.text(parseInt(periodWantedScaled*periodStep)+" sentences");
var scaleEchantill = d3.scale.pow()
+ .exponent(4)
.domain([0,100])
.range([1,(nEchantill/2.0)-1]);
$("#slider_"+theId).slider({
View
BIN media/images/content_models.png
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
View
7 reanalyseapp/globalvars.py
@@ -67,7 +67,12 @@
SENTENCE_UTT_SYMBOLS['not_classified']=' ' # and other keys
########## CODES ACTIVATED (every code need to be declared in DEFINITIONS below anyway)
-# ACTIVATED CODES (displayed IN edShow view to toggle show/hide) OK to do categories (aka Transcription/Verbatim)
+# ACTIVATED CODES = those in TextStreamTimeline viz (in order) - nb: if there is not code in texte, it will not show on viz !
+STREAMVIZCODES={}
+STREAMVIZCODES['codes'] = ['question','silence','hesitation','laugh','inaudible','break','comment','time']
+STREAMVIZCODES['colors'] = ['#66CCFF','#BFBD9F','#EC993B','#D9FF00','#ED5300','#ED5300','#517368','#66CCFF']
+
+# ACTIVATED CODES = displayed IN edShow to show/hide
PARVBCODES={}
PARVBCODES['Transcription'] = ['break','comment','inaudible','question','time']
PARVBCODES['Verbatim'] = ['body','directed','hesitation','interruption','laugh','silence']
View
2 reanalyseapp/imexport.py
@@ -68,7 +68,7 @@ def importEnqueteUsingMeta(folderPath):
# create permission for this enquete
content_type,isnew = ContentType.objects.get_or_create(app_label='reanalyseapp', model='Enquete')
- permname = 'EXPLORE e_'+str(newEnquete.id) + ' '+newEnquete.name
+ permname = 'EXPLORe_'+str(newEnquete.id) + ' '+newEnquete.name
p,isnew = Permission.objects.get_or_create(codename='can_explore_'+str(newEnquete.id),name=permname,content_type=content_type)
if os.path.exists(docPath):
View
24 reanalyseapp/views.py
@@ -113,8 +113,9 @@ def init_mylogging():
logging.basicConfig(level=logging.INFO,format='%(asctime)s %(levelname)s %(message)s', filename=settings.REANALYSELOGPATH+'reanalyse.log', filemode='a+')
logInitDone=False
if not logInitDone:
- logInitDone = True
- init_mylogging()
+ logging.info("you'll have to to it another way. (LOG)")
+ logInitDone = True
+ init_mylogging()
###########################################################################
def init_users():
nothing=1
@@ -152,7 +153,7 @@ def init_users():
try:
init_users()
except:
- logging.info("you'll have to to it another way.")
+ logging.info("you'll have to to it another way. (USERS)")
###########################################################################
@@ -329,6 +330,16 @@ def eAdmin(request):
else:
ctx.update({'solrstatus':'running'})
ctx.update({'staffemail':settings.STAFF_EMAIL})
+
+ # log file
+ logging.info("Looking at ADMIn page")
+ logLines = int(request.GET.get('log','20'))
+ try:
+ logFile = open(settings.REANALYSELOGPATH+'reanalyse.log','r')
+ ctx.update({'logs':reversed(logFile.readlines()[-logLines:])})
+ logFile.close()
+ except:
+ ctx.update({'logs':['no log file found']})
return render_to_response('admin.html', ctx , context_instance=RequestContext(request))
################################################################################
@login_required
@@ -442,7 +453,7 @@ def eParse(request):
e.save()
# let's make stream timeline viz for each text
- logging.info("make streamtimeline viz")
+ #logging.info("make streamtimeline viz")
try:
makeViz(e,"TexteStreamTimeline",textes=[t])
except:
@@ -456,6 +467,11 @@ def eParse(request):
makeAllTfidf(e)
logging.info("tfidf sucessfully updated")
+ makeViz(e,'Cloud_SolrSpeakerTagCloud')
+ makeViz(e,'Graph_SpeakersSpeakers')
+ makeViz(e,'Graph_SpeakersWords')
+ makeViz(e,'Graph_SpeakersAttributes')
+
e.status='0'
e.save()
logging.info("IMPORT ENQUETE DONE !")
View
162 reanalyseapp/visualization.py
@@ -51,16 +51,21 @@ def makeViz(e,typ,speakers=[],textes=[],attributetypes=[],count=0):
#descr = VIZTYPESDESCR[typ] # we used to set a different one for each
descr = VIZTYPESDESCR # now just invite user to update it (see globalvars.py)
- logging.info("makeViz:"+typ)
+ logging.info("making viz: "+typ)
if typ in ['Graph_SpeakersAttributes','Graph_SpeakersWords','Graph_SpeakersSpeakers']:
newVizu = makeVisualizationObject(e,typ,descr)
- if speakers!=[]:
- for s in speakers:
- newVizu.speakers.add(s)
- if textes!=[]:
- for t in textes:
- newVizu.textes.add(t)
+ if speakers==[]:
+ if textes==[]:
+ speakers = e.speaker_set.all()
+ else:
+ speakers=[]
+ for t in textes:
+ for s in t.speaker_set.all():
+ if s not in speakers:
+ speakers.append(s)
+ for s in speakers:
+ newVizu.speakers.add(s)
newVizu.save()
# todo: launch threads, to avoid blocking ?
@@ -242,7 +247,7 @@ def visMakeSpeakersSpeakersGraph(e,viz,param):
method = param['method']
if method=='pattern':
- #################### USING PATTERN (deprecated ? only english !)
+ #################### USING PATTERN (deprecated ? only english!!)
g=networkx.Graph()
corpus=Corpus()
for s in e.speaker_set.all():
@@ -293,9 +298,6 @@ def visMakeTermVectorGraph(e,viz,param):
g=networkx.DiGraph()
termVectorDic={}
-
- if speakers==[]:
- speakers = e.speaker_set.all()
# make spk nodes in any case
for s in speakers:
@@ -405,18 +407,7 @@ def visMakeTermVectorGraph(e,viz,param):
###########################################################################
def visMakeSpeakersAttributesGraph(enquete,viz,param):
speakers = param['who']
- textes = param['where']
attributetypes = param['whoatt']
-
- if speakers==[]:
- if textes==[]:
- speakers = enquete.speaker_set.all()
- else:
- speakers=[]
- for t in textes:
- for s in t.speaker_set.all():
- if s not in speakers:
- speakers.append(s)
if attributetypes==[]:
attributetypes = enquete.attributetype_set.all()
@@ -625,7 +616,7 @@ def visMakeAttributes(e,param):
#
def visMakeStreamTimeline(e,param):
# step = window width in which we count paraverbal/sentences occurences
- step = 3 # ie. counting spk & paraverbal in a "window" of width "step" (3 sentences at a time)
+ step = 2 # ie. counting spk & paraverbal in a "window" of width "step" (x sentences at a time)
#factorParaverbal = 10*step # height is managed in d3 (deprecated:y division factor for paraverbal (outvalue should be normalized in [0,1])
res={}
t = param['where']
@@ -641,16 +632,24 @@ def visMakeStreamTimeline(e,param):
maxPeriods = 0
nSteps = 1+int(maxPeriods/step) # one more if nSentences undivisible by step
- paravList=['silence','laugh','hesitation','interruption','break']
- par_colors=['#BFBD9F','#D9FF00','#517368','#ED5300','#EC993B']
+ allpar_names = STREAMVIZCODES['codes']
+ allpar_colors= STREAMVIZCODES['colors']
+
+ # only keep non-null paraverbal from that list
+ par_names = []
+ par_colors = []
+ for i,pname in enumerate(allpar_names):
+ if Sentence.objects.filter(texte=t,word__wordentityspeaker__wordentity__code__name=pname).count()!=0:
+ par_names.append(pname)
+ par_colors.append(allpar_colors[i])
speakers = t.speaker_set.order_by('-ddi_type')
############# init
for s in speakers:
spk_layers.append([ {'x':k, 'y':0,'info':s.name} for k in range(nSteps) ])
spk_ids.append([s.id,s.name])
- for i,p in enumerate(paravList):
+ for i,p in enumerate(par_names):
par_layers.append([ 0 for k in range(nSteps) ])
par_ids.append([i,p])
@@ -669,19 +668,21 @@ def visMakeStreamTimeline(e,param):
# paraverbal
for w in sent.word_set.all(): # assuming there is only paraverbal in words
par = w.wordentityspeaker.wordentity.code.name
- if par in paravList:
- par_layers[ paravList.index(par) ][k] += 1/float(step)
- maxParavbCount = max(maxParavbCount,par_layers[ paravList.index(par) ][k])
+ if par in par_names:
+ par_layers[ par_names.index(par) ][k] += 1/float(step)
+ maxParavbCount = max(maxParavbCount,par_layers[ par_names.index(par) ][k])
res['spk_layers']=spk_layers
res['spk_ids']=spk_ids
-
+
+ # now we filter the parvb to keep only non null values, based on maxParvb
res['par_layers']=par_layers
res['par_ids']=par_ids
res['par_colors']=par_colors
# we can send maximum prvb value(s) or let js do it...
res['maxParavbCount']=maxParavbCount # maximum y-value for parvb
+ # nb: we send every prvb values, but js will not display the null ones !
res['period']=step # one value for each 'step' value of i ex. 3
res['maxPeriods']=maxPeriods # maximum i for all the i-o of sentences ex. 15
@@ -769,10 +770,11 @@ def getSolrTermVectorsDict(speakers,field,count,mintn): # field = 'text'/'ngrams
q=None
for s in speakers:
- if q==None:
- q='(speakerid:'+str(s.id)
- else:
- q=q+' OR speakerid:'+str(s.id)
+ if s.ngramspeaker_set.count()>1:
+ if q==None:
+ q='(speakerid:'+str(s.id)
+ else:
+ q=q+' OR speakerid:'+str(s.id)
q=q+')'
conn = pythonsolr.Solr( settings.HAYSTACK_SOLR_URL )
@@ -802,57 +804,57 @@ def getSolrTermVectorsDict(speakers,field,count,mintn): # field = 'text'/'ngrams
if t.contenttxt!="":
totalDocuments+=1
- try:
- totalTerms = len(tv[field])
- res = list2dict(tv[field])
-
- # first transform all data in dict
- alldic={}
- for k,v in res.iteritems():
- d = list2dict(v)
- alldic.update({k:d})
-
- # then keep words wanted
- out={}
- for w,d in alldic.items():
- keepw = len(w)>2
- ###### RULE 1 : dont keep ngrams which appear only 1 time for that speaker and never else (df=tf=1)
- keepw = keepw and d['df']+d['tf']!=2
- ###### RULE 1 bis: keep ngrams that appears at least mintn
- keepw = keepw and d['tf']>=mintn
- ###### RULE 2 : dont keep ngrams included in other-longer-word (if same df/tf)
- keepw = keepw and not True in [(w in otherw and w!=otherw and d['df']==alldic[otherw]['df'] and d['tf']==alldic[otherw]['tf']) for otherw in alldic.keys()]
-
- if keepw:
- df = d['df']/float(totalDocuments)
- tf = d['tf']/float(totalTerms)
- tfidf = 1000*tf/df
- newd = {'dn':d['df'],'tn':d['tf'], 'df':df, 'tf':tf, 'tfidf':tfidf}
- out[w] = newd
- if len(out)==0:
- return {'nongramsfoundwith_mintn='+str(mintn)+"_spk="+"_".join([str(s.id) for s in speakers]):{'df':0,'tf':0,'dn':0,'tn':0,'tfidf':0}}
- # todo: only keep 'n' top wanted based on tfidf (can we do it in solr query rather than python ?)
- # we can do it here, or later when making graph/tagcloud
-
- # 1. get all words with tfidf and sort
- # wtfs = [ [v['tfidf'],k] for k,v in out.items() ]
- # wtfs = sorted(wtfs, key=lambda a: -a[0])
- #
- # # 2.
- # outF={}
- # for w in wtfs[:maxcount]:
- # outF[w[1]]=out[w[1]]
+ #try:
+ totalTerms = len(tv[field])
+ res = list2dict(tv[field])
+
+ # first transform all data in dict
+ alldic={}
+ for k,v in res.iteritems():
+ d = list2dict(v)
+ alldic.update({k:d})
+
+ # then keep words wanted
+ out={}
+ for w,d in alldic.items():
+ keepw = len(w)>2
+ ###### RULE 1 : dont keep ngrams which appear only 1 time for that speaker and never else (df=tf=1)
+ keepw = keepw and d['df']+d['tf']!=2
+ ###### RULE 1 bis: keep ngrams that appears at least mintn
+ keepw = keepw and d['tf']>=mintn
+ ###### RULE 2 : dont keep ngrams included in other-longer-word (if same df/tf)
+ keepw = keepw and not True in [(w in otherw and w!=otherw and d['df']==alldic[otherw]['df'] and d['tf']==alldic[otherw]['tf']) for otherw in alldic.keys()]
- return out
- # 'text': {'df': 24, 'tf': 4, 'tf-idf': 0.16666666666666666 }
- # 'tout': {'df': 464, 'tf': 1, 'tf-idf': 0.0021551724137931034 }
- except:
- return {'nosolrtermvectordict_spk='+"_".join([str(s.id) for s in speakers]):{'df':0,'tf':0,'dn':0,'tn':0,'tfidf':0}}
+ if keepw:
+ df = d['df']/float(totalDocuments)
+ tf = d['tf']/float(totalTerms)
+ tfidf = 1000*tf/df
+ newd = {'dn':d['df'],'tn':d['tf'], 'df':df, 'tf':tf, 'tfidf':tfidf}
+ out[w] = newd
+ if len(out)==0:
+ return {'nongramsfoundwith_mintn='+str(mintn)+"_spk="+"_".join([str(s.id) for s in speakers]):{'df':0,'tf':0,'dn':0,'tn':0,'tfidf':0}}
+ # todo: only keep 'n' top wanted based on tfidf (can we do it in solr query rather than python ?)
+ # we can do it here, or later when making graph/tagcloud
+
+ # 1. get all words with tfidf and sort
+# wtfs = [ [v['tfidf'],k] for k,v in out.items() ]
+# wtfs = sorted(wtfs, key=lambda a: -a[0])
+#
+# # 2.
+# outF={}
+# for w in wtfs[:maxcount]:
+# outF[w[1]]=out[w[1]]
+
+ return out
+ # 'text': {'df': 24, 'tf': 4, 'tf-idf': 0.16666666666666666 }
+ # 'tout': {'df': 464, 'tf': 1, 'tf-idf': 0.0021551724137931034 }
+ #except:
+ # return {'nosolrtermvectordict_spk='+"_".join([str(s.id) for s in speakers]):{'df':0,'tf':0,'dn':0,'tn':0,'tfidf':0}}
####################################################################
# to avoid querying solr everyday, we store ngrams in DB
def makeAllTfidf(e):
for s in e.speaker_set.all():
- logging.info("now reseting tfidf ngrams for speaker:"+str(s.id))
+ #logging.info("now reseting tfidf ngrams for speaker:"+str(s.id))
s.ngramspeaker_set.all().delete()
termd = getSolrTermVectorsDict([s],'ngrams',count=0,mintn=3)
for w in termd.keys():
View
24 templates/admin.html
@@ -13,18 +13,16 @@
{% block content %}
<! ================================================= -->
<div class="introHtml">
- <h1>Admin & Users</h1>
- As described (for the public) in the <a href="/reanalyse/?p=access">access</a> page, here is how it works:
+ <h1>Access & Users</h1>
+ As described (for the public) in the <a href="/reanalyse/?p=access">access</a> page, here is how it works on the admin side:
<ol>
<li>Public users (without login) can only access Overview pages for every study</li>
- <li>They can ask for a Discover status [using online form]</li>
- <li>An unactivated user is created, and a mail is sent to: <i>{{staffemail}}</i></li>
+ <li>When they ask for the Discover status [online form], an unactivated user is created, and a mail is sent to the staff team</li>
<li>Staff can then activate the user in the <a href="/reanalyse/admin">admin</a> panel (and send a mail to congratulate the guy).</li>
- <li>With this Discover status, the user can also access to the Research & Research page for every study.</li>
- <li>She/he can ask (from a study page) for an Explore status [using online form]</li>
- <li>A mail is also sent to the staff, including the 'motivation' field</li>
- <li>Staff can then add the permission for this enquete ('EXPLORE e_[id]') to this user on the <a href="/reanalyse/admin">admin</a> panel (and send a mail to the the so happy guy).</li>
+ <li>If she/he asks (on a study page) for an Explore status [online form], another mail is sent to the staff, including the 'motivation' field</li>
+ <li>Staff can then add the permission for this enquete ('EXPLORE e_[id]') to this user on the <a href="/reanalyse/admin">admin</a> panel (and reply regardly to the guy).</li>
</ol>
+ <p>Current staff email adress is: <i>{{staffemail}}</i></p>
<h1>Add a new study</h1>
<div>
@@ -44,7 +42,7 @@
<li>if you don't understand a thing, please read the <a href="/reanalyse/?p=method&q=3">normalization</a> page carefully</li>
</ul>
</div>
- <h1>If you feel ready, go on</h1>
+ <p>
<div id="eBrowseNewEnquete">
<div id="file-uploader">
<noscript>
@@ -53,7 +51,15 @@
</noscript>
</div>
</div>
+ </p>
+
+ <h1>Logs</h1>
+ <ul class="logconsole">
+ {% for l in logs %}<li>{{l}}</li>{% endfor %}
+ </ul>
</div>
+
+
<! ================================================= -->
<!--
View
25 templates/content/method_content_fr.html
@@ -24,6 +24,13 @@
<div class="content_norm">
+<h2>Go to</h2>
+<ul>
+ <li><a href="#meta_study.csv"><span class="content_norm_file">meta_study.csv</span></a></li>
+ <li><a href="#meta_documents.csv"><span class="content_norm_file">meta_documents.csv</span></a></li>
+ <li><a href="#meta_speakers.csv"><span class="content_norm_file">meta_speakers.csv</span></a></li>
+ <li><a href="#meta_codes.csv"><span class="content_norm_file">meta_codes.csv</span></a></li>
+</ul>
<h2>Overview</h2>
<img src="/reanalyse/media/images/content_overview.png" alt="normalisation">
<h2>Description complète de la normalisation d'une enquête</h2>
@@ -76,7 +83,7 @@
</ul>
<ol>
- <li><h4>Table des métadonnées de l'enquête <span class="content_norm_file">meta_study.csv</span></h4>
+ <li><a name="meta_study.csv"><h4>Table des métadonnées de l'enquête <span class="content_norm_file">meta_study.csv</span></h4></a>
Cette table contient les métadonnées générales concernant une enquête.<br/>
Elles seront affichées sur la page Overview de chaque enquête.
<table class="content_norm_ex">
@@ -139,7 +146,7 @@
</table>
</li>
- <li><h4>Table des documents <span class="content_norm_file">meta_documents.csv</span></h4>
+ <li><a name="meta_documents.csv"><h4>Table des documents <span class="content_norm_file">meta_documents.csv</span></h4></a>
Décrit les documents constitutifs d'une enquête.<br/>
Notez les deux dernière colonnes permettant de conserver l'historique des modifications liées à l'archivage.<br/>
S'il existe une enquête sur enquête (ESE), les documents associés sont également listés.
@@ -194,7 +201,7 @@
</table>
</li>
- <li><h4>Tables des intervenants <span class="content_norm_file">meta_speakers.csv</span></h4>
+ <li><a name="meta_speakers.csv"><h4>Tables des intervenants <span class="content_norm_file">meta_speakers.csv</span></h4></a>
Contient les métadonnées relatives aux personnes impliquées dans l'enquête.<br/>
Les colonnes dont le nom commence par _ seront masquées dans la liste des intervenants du site-enquête.<br/>
La visualisation des <a href="/reanalyse/?p=method&q=4#viz_ex_Graph_SpeakersAttributes">attributs</a> ne pourra se faire que sur cette liste, tandis que la création d'un <a href="/reanalyse/?p=method&q=4#viz_ex_Graph_SpeakersAttributes">graphe</a> incluera tous les types d'attributs.
@@ -223,7 +230,7 @@
</li>
- <li><h4>Table des codages <span class="content_norm_file">meta_codes.csv</span></h4>
+ <li><a name="meta_codes.csv"><h4>Table des codages <span class="content_norm_file">meta_codes.csv</span></h4></a>
Décrit les <a href="/reanalyse/?p=method&q=5#gloss_code">codes</a> utilisés dans les verbatims.<br/>
Ce fichier permet de tracer les adaptations successives des verbatims (différentes syntaxes pour le codage).<br/>
La liste des codes supportés est (pour l'instant) fixée à celle donnée ci-dessous.
@@ -491,7 +498,7 @@
<li><a name="viz_ex_view_sBrowse"><h2>Liste des intervenants</h2></a>
<img src="/reanalyse/media/images/viz_ex_view_sBrowse.png" width="100%">
-<br/>Les icônes de couleurs permettent d'accéder aux visualisations liés à chaque intervenant (par ex. <a href="#viz_ex_Cloud_SolrSpeakerTagCloud">nuage de mots</a> utilisés). Les types d'attribut affichées sont ceux de la table meta_speakers.csv (voir la page <a href="/reanalyse/?p=method&q=3">Normalisation</a>).
+<br/>Les icônes de couleurs permettent d'accéder aux visualisations liés à chaque intervenant (par ex. <a href="#viz_ex_Cloud_SolrSpeakerTagCloud">nuage de mots</a> utilisés). Les types d'attribut affichées sont ceux de la table <span class="content_norm_file">meta_speakers.csv</span> (voir la page <a href="/reanalyse/?p=method&q=3#meta_speakers.csv">Normalisation</a>).
</li>
<li><a name="viz_ex_view_dShow"><h2>Exploration d'un entretien</h2></a>
@@ -620,7 +627,7 @@
<li><b>outline_content</b>: les codes hors texte; ex. comment:"tout le monde se regarde", break:"changement de cassette", time:"65min"</li>
<li><b>inword</b>: code s'appliquant au texte; ex. uncertain:"or|alors", strong:"vraiment"</li>
</ul>-->
- Veuillez vous référer au tableau <span class="content_norm_file">meta_codes.csv</span> de la page <a href="/reanalyse/?p=method&q=3">Normalisation</a> pour consulter la liste des codes actuellement actifs dans les sites-enquêtes.
+ Veuillez vous référer au tableau <span class="content_norm_file">meta_codes.csv</span> de la page <a href="/reanalyse/?p=method&q=3#meta_codes.csv">Normalisation</a> pour consulter la liste des codes actuellement actifs dans les sites-enquêtes.
</li>
<!-- ================================ -->
<li><a name="gloss_ngram"><h2>Indexation et Ngrams</h2></a>
@@ -675,4 +682,8 @@
</ul>
</div>
-
+<h1>Django Models & Views</h1>
+<div>
+<p>Please switch to fullscreen if your eyes hurt.</p>
+<img src="/reanalyse/media/images/content_models.png" width="100%" alt="models">
+</div>
View
7 templates/e_selectviz.html
@@ -68,14 +68,17 @@
</li>
<li><a class="imHelp" target="_new" href="/reanalyse/?p=method&q=4#viz_ex_Cloud_SolrSpeakerTagCloud"></a>
- <a href="" onclick='createVisualization({{enquete.id}},"Cloud_SolrSpeakerTagCloud",{"speakers":selectedSpeakerIds,"textes":selectedTexteIds});return false;'>Tag cloud</a> (for each text)
+ <a href="" onclick='createVisualization({{enquete.id}},"Cloud_SolrSpeakerTagCloud",{"speakers":selectedSpeakerIds,"textes":selectedTexteIds});return false;'>Tag cloud</a>
+ {% if source = 'Documents' %}(fo each text){% endif %}
<span class="vizLoadingSpinner Cloud_SolrSpeakerTagCloud" style="display:none"></span>
</li>
-
+
+ {% if source = 'Documents' %}
<li><a class="imHelp" target="_new" href="/reanalyse/?p=method&q=4#viz_ex_TexteStreamTimeline"></a>
<a href="" onclick='createVisualization({{enquete.id}},"TexteStreamTimeline",{"speakers":selectedSpeakerIds,"textes":selectedTexteIds});return false;'>Interaction dynamics</a> (for each text)
<span class="vizLoadingSpinner TexteStreamTimeline" style="display:none"></span>
</li>
+ {% endif %}
<li><a class="imHelp" target="_new" href="/reanalyse/?p=method&q=4#viz_ex_Attributes"></a>
<a href="" onclick='createVisualization({{enquete.id}},"Attributes",{"speakers":selectedSpeakerIds,"textes":selectedTexteIds});return false;'>Attributes</a>

0 comments on commit 8be5776

Please sign in to comment.
Something went wrong with that request. Please try again.