<rqdef version="2">
  <!-- defines an RQ review quality grading scheme.
       I. Aspects:
       There are exactly four aspects
         1. timeliness, 
         2. has graded co-reviews,
         3. helpful for decision, 
         4. helpful for authors
       whose weights must sum to 100 and
       each of which will receive 0 to 100 points during the grading.
       Aspects 1 and 2 are graded automatically, 
       3 and 4 are graded by the co-reviewers.
       
       II. Levels:
       For the human-graded aspects, each possible number of points within
       one facet is defined by a level, which has a verbal description.
       
       III. Facets:
       Facets partition each of the human-graded aspects (and its 100 points)
       into parts each having their own levels.
       
       IV. Weights:
       The overall RQ score is the percent-weighted sum of points
       of the aspects.
       The weights must sum to 100.
       The score for each of the human-graded aspects
       is the local percent-weighted sum of points 
       of its facets.
       The local weights must sum to 100 per aspect.
       
       V. Overall ranking:
       To give reviewers with more work better chances,
       only the best N reviews of each reviewer are used 
       for the overall ranking and
       reviewers with less than N reviews are excluded from the ranking.       
       
       Below is a vanilla RQ grading scheme; modify it as fits your conference.
  -->

  <gradedByMachine>
    <timeliness weight="10">
      <!-- Aspect: Was the review submitted in due time so as to 
         not obstruct the process?
         The entries below say when a review
         is late no more than  h1; h2; h3;... hours, 
         it should get         p1; p2; p3;... points.
         The times are relative to the official reviewing deadline,
         so h1 and p1 should normally be 0 and 100.
         Subsequent entries should reflect how much late reviews obstruct
         the decision-making for the conference.
         If the effect is strong for your conference, also make sure to give
         this aspect enough weight (considering that every review can and
         should get 100 points here).
      -->
      <hoursLate>  0; 12; 24; 36 </hoursLate>
      <points>   100; 50; 25; 12 </points>
    </timeliness>

    <hasGradedCoreviews weight="50">
      <!-- A reviewer who has fully graded all/some/none of the co-reviews
         for the given article will receive 100/X/0 points, respectively.
         This entry defines X which should usually be either 0
         or a low-to-medium number (e.g. 20 or 50).

         Everybody can have 100 points here but missing grades obstruct
         the system massively so make sure, somewhat counter-intuitively,
         to give this aspect a lot of weight (e.g. 50).
      -->
      <pointsHasGradedSome> 25 </pointsHasGradedSome>
    </hasGradedCoreviews>
    
    <overallRanking>
      <!-- reviewers with fewer than N graded reviews are not ranked,
           reviewers with more have only their best N ones ranked,
           where N is specified by rankedReviews
      -->
      <rankedReviews> 3 </rankedReviews>
    </overallRanking>
  </gradedByMachine>

  
  <gradedByHuman>
    <helpfulForDecision weight="40">
      <!-- Aspect: Was the review helpful for a sound acceptance decision?   
      -->

      <facet name="summary" localweight="12">
        <level points="100">
          The review starts with a summary of the article's contents the perspective
          of which is geared towards what will follow in the review text.
        </level>
        <level points="60">
          The review starts with a generic summary of the article's contents
        </level>
        <level points="25">
          The review starts with a minimal token summary of the article's contents
        </level>
        <level points="0" otherwise>
          The review appears partially dubious
        </level>      
      </facet> 

      <facet name="trustworthiness" localweight="40">
        <level points="100">
          The review appears fully sound and trustworthy
        </level>
        <level points="60">
          The review appears almost fully sound and trustworthy
        </level>
        <level points="40">
          The review appears by-and-large sound and trustworthy
        </level>
        <level points="0" otherwise>
          The review appears partially dubious
        </level>      
      </facet> 

      <facet name="completeness" localweight="16">
        <level points="100">
          The review covers all important issues
        </level>
        <level points="60">
          The review leaves one important issue undiscussed
        </level>
        <level points="0">
          The review leaves several important issues undiscussed
        </level>
      </facet>

      <facet name="weighting" localweight="14">
        <level points="100">
          For any strength or issue mentioned, the review provides clear indication how important it is
        </level>
        <level points="75">
          For most strengths or issues mentioned, the review provides clear indication how important it is
        </level>
        <level points="33">
          For at least some strengths or issues mentioned, the review provides clear indication how important it is
        </level>
        <level points="0">
          Otherwise
       </level>
      </facet>

      <facet name="balancedness" localweight="18">
        <level points="100">
          The review explicitly and clearly weighs strengths against weaknesses to arrive at its recommendation
        </level>
        <level points="55">
          The review vaguely weighs strengths against weaknesses to arrive at its recommendation
        </level>
        <level points="0">
          No weighing is apparent, the recommendation might surprise someone 
          who has read the review text
        </level>
      </facet>
    </helpfulForDecision>


    <helpfulForAuthors weight="0">
      <!-- Aspect: Does the review guide the authors well on how to improve their article?   
      -->
    </helpfulForAuthors>
  </gradedByHumans>

</rqdef>