@ARTICLE{33704756_33929230_2011, author = {Eran Hermelin and Filip Lievens and Ivan Robertson}, keywords = {, validity, personnel assessment, meta-analysis, overall evaluation ratingperformance rating}, title = {The Validity of Assessment Centres for the Prediction of SupervisoryPerformance Ratings: A meta-analysis}, journal = {Organizational Psychology}, year = {2011}, volume = {1}, number = {1}, pages = {39-49}, url = {https://orgpsyjournal.hse.ru/en/2011-1-1/33929230.html}, publisher = {}, abstract = {Purpose. The main aim of the present study is to update previous research data on the Assessment Center (AC) Validity, obtained in Gaugler study in 1987. This meta-analytic study focused on Assessment Center Validity examination. Study design. This meta-analysis examined works carried out from 1985 to 2005, 26 studies and 27 validity coefficients (N = 5850), linking the Overall Assessment Rating (OAR) to the supervisory performance ratings (PR) were analyzed. The same methodology as Gaugler employed to analyze data. AC has seen in terms of the ability to select the best candidates for a position, Overall Assessment Rating has used as a criterion measure Assessment Center Effectiveness. The validity of the individual competencies measurement has not considered. Findings. The average value of the correlation between the OAR and PR is 0.28 (95% confidence interval 0.24 ≤ ρ ≤ 0.32). Number of assessed competencies, number of original selection methods and type of procedure were taken into account. As might be expected, none of the variables are affected. Research implications for practice. Focus future research predictive validity AC deserves the following issues. Firstly, it is the need to expand and refine the criteria by which the validity of the AC is measured. Secondly, it is important to examine the validity of the AC competitive compared with simulation exercises with low ecological validity, such as case-tests. It has been suggested that this validity estimate is likely to be conservative given that assessment centre validities tend to be affected by indirect range restriction.}, annote = {Purpose. The main aim of the present study is to update previous research data on the Assessment Center (AC) Validity, obtained in Gaugler study in 1987. This meta-analytic study focused on Assessment Center Validity examination. Study design. This meta-analysis examined works carried out from 1985 to 2005, 26 studies and 27 validity coefficients (N = 5850), linking the Overall Assessment Rating (OAR) to the supervisory performance ratings (PR) were analyzed. The same methodology as Gaugler employed to analyze data. AC has seen in terms of the ability to select the best candidates for a position, Overall Assessment Rating has used as a criterion measure Assessment Center Effectiveness. The validity of the individual competencies measurement has not considered. Findings. The average value of the correlation between the OAR and PR is 0.28 (95% confidence interval 0.24 ≤ ρ ≤ 0.32). Number of assessed competencies, number of original selection methods and type of procedure were taken into account. As might be expected, none of the variables are affected. Research implications for practice. Focus future research predictive validity AC deserves the following issues. Firstly, it is the need to expand and refine the criteria by which the validity of the AC is measured. Secondly, it is important to examine the validity of the AC competitive compared with simulation exercises with low ecological validity, such as case-tests. It has been suggested that this validity estimate is likely to be conservative given that assessment centre validities tend to be affected by indirect range restriction.} }